From: Graff Yang<graff.yang@xxxxxxxxx>
This patch supports dynamic alloc/free percpu area for nommu arch like
blackfin.
It allocates contiguous pages in funtion pcpu_get_vm_areas() instead of
getting none contiguous pages then vmap it in mmu arch.
As we can not get the real page structure through vmalloc_to_page(), so
it also modified the nommu version vmalloc_to_page()/vmalloc_to_pfn().
Signed-off-by: Graff Yang<graff.yang@xxxxxxxxx>
diff --git a/mm/nommu.c b/mm/nommu.c
index 605ace8..98bbdf4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -255,13 +255,15 @@ EXPORT_SYMBOL(vmalloc_user);
struct page *vmalloc_to_page(const void *addr)
{
- return virt_to_page(addr);
+ return (struct page *)
+ (virt_to_page(addr)->index) ? : virt_to_page(addr);
+#ifdef CONFIG_SMP
+int map_kernel_range_noflush(unsigned long addr, unsigned long size,
+ pgprot_t prot, struct page **pages)
+{
+ int i, nr_page = size>> PAGE_SHIFT;
+ for (i = 0; i< nr_page; i++, addr += PAGE_SIZE)
+ virt_to_page(addr)->index = (pgoff_t)pages[i];
+ return size>> PAGE_SHIFT;
+void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
+{
+ int i, nr_page = size>> PAGE_SHIFT;
+ for (i = 0; i< nr_page; i++, addr += PAGE_SIZE)
+ virt_to_page(addr)->index = 0;
+}
+
+struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+ const size_t *sizes, int nr_vms,
+ size_t align, gfp_t gfp_mask)