[PATCH mm v5 24/39] kasan, vmalloc: add vmalloc tagging for SW_TAGS

From: andrey . konovalov
Date: Thu Dec 30 2021 - 14:15:23 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

Add vmalloc tagging support to SW_TAGS KASAN.

- __kasan_unpoison_vmalloc() now assigns a random pointer tag, poisons
the virtual mapping accordingly, and embeds the tag into the returned
pointer.

- __get_vm_area_node() (used by vmalloc() and vmap()) and
pcpu_get_vm_areas() save the tagged pointer into vm_struct->addr
(note: not into vmap_area->addr). This requires putting
kasan_unpoison_vmalloc() after setup_vmalloc_vm[_locked]();
otherwise the latter will overwrite the tagged pointer.
The tagged pointer then is naturally propagateed to vmalloc()
and vmap().

- vm_map_ram() returns the tagged pointer directly.

As a result of this change, vm_struct->addr is now tagged.

Enabling KASAN_VMALLOC with SW_TAGS is not yet allowed.

Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

---

Changes v2->v3:
- Drop accidentally added kasan_unpoison_vmalloc() argument for when
KASAN is off.
- Drop __must_check for kasan_unpoison_vmalloc(), as its result is
sometimes intentionally ignored.
- Move allowing enabling KASAN_VMALLOC with SW_TAGS into a separate
patch.
- Update patch description.

Changes v1->v2:
- Allow enabling KASAN_VMALLOC with SW_TAGS in this patch.
---
include/linux/kasan.h | 16 ++++++++++------
mm/kasan/shadow.c | 6 ++++--
mm/vmalloc.c | 14 ++++++++------
3 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index da320069e7cf..92c5dfa29a35 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -424,12 +424,13 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);

-void __kasan_unpoison_vmalloc(const void *start, unsigned long size);
-static __always_inline void kasan_unpoison_vmalloc(const void *start,
- unsigned long size)
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size);
+static __always_inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size)
{
if (kasan_enabled())
- __kasan_unpoison_vmalloc(start, size);
+ return __kasan_unpoison_vmalloc(start, size);
+ return (void *)start;
}

void __kasan_poison_vmalloc(const void *start, unsigned long size);
@@ -454,8 +455,11 @@ static inline void kasan_release_vmalloc(unsigned long start,
unsigned long free_region_start,
unsigned long free_region_end) { }

-static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
-{ }
+static inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size)
+{
+ return (void *)start;
+}
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }

diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 39d0b32ebf70..5a866f6663fc 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -475,12 +475,14 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
}
}

-void __kasan_unpoison_vmalloc(const void *start, unsigned long size)
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size)
{
if (!is_vmalloc_or_module_addr(start))
- return;
+ return (void *)start;

+ start = set_tag(start, kasan_random_tag());
kasan_unpoison(start, size, false);
+ return (void *)start;
}

/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 52336b034fbb..da419db620ba 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2210,7 +2210,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
mem = (void *)addr;
}

- kasan_unpoison_vmalloc(mem, size);
+ mem = kasan_unpoison_vmalloc(mem, size);

if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
pages, PAGE_SHIFT) < 0) {
@@ -2443,10 +2443,10 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
return NULL;
}

- kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
-
setup_vmalloc_vm(area, va, flags, caller);

+ area->addr = kasan_unpoison_vmalloc(area->addr, requested_size);
+
return area;
}

@@ -3802,9 +3802,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
for (area = 0; area < nr_vms; area++) {
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
goto err_free_shadow;
-
- kasan_unpoison_vmalloc((void *)vas[area]->va_start,
- sizes[area]);
}

/* insert all vm's */
@@ -3817,6 +3814,11 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
}
spin_unlock(&vmap_area_lock);

+ /* mark allocated areas as accessible */
+ for (area = 0; area < nr_vms; area++)
+ vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
+ vms[area]->size);
+
kfree(vas);
return vms;

--
2.25.1