[PATCH 3/5] kasan, mm: integrate page_alloc init with HW_TAGS

From: Andrey Konovalov
Date: Fri Mar 05 2021 - 19:19:00 EST


This change uses the previously added memory initialization feature
of HW_TAGS KASAN routines for page_alloc memory when init_on_alloc/free
is enabled.

With this change, kernel_init_free_pages() is no longer called when
both HW_TAGS KASAN and init_on_alloc/free are enabled. Instead, memory
is initialized in KASAN runtime.

To avoid discrepancies with which memory gets initialized that can be
caused by future changes, both KASAN and kernel_init_free_pages() hooks
are put together and a warning comment is added.

This patch changes the order in which memory initialization and page
poisoning hooks are called. This doesn't lead to any side-effects, as
whenever page poisoning is enabled, memory initialization gets disabled.

Combining setting allocation tags with memory initialization improves
HW_TAGS KASAN performance when init_on_alloc/free is enabled.

Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
---
include/linux/kasan.h | 16 ++++++++--------
mm/kasan/common.c | 8 ++++----
mm/mempool.c | 4 ++--
mm/page_alloc.c | 37 ++++++++++++++++++++++++++-----------
4 files changed, 40 insertions(+), 25 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 1d89b8175027..4c0f414a893b 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -120,20 +120,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
__kasan_unpoison_range(addr, size);
}

-void __kasan_alloc_pages(struct page *page, unsigned int order);
+void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_alloc_pages(struct page *page,
- unsigned int order)
+ unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_alloc_pages(page, order);
+ __kasan_alloc_pages(page, order, init);
}

-void __kasan_free_pages(struct page *page, unsigned int order);
+void __kasan_free_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_free_pages(struct page *page,
- unsigned int order)
+ unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_free_pages(page, order);
+ __kasan_free_pages(page, order, init);
}

void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -282,8 +282,8 @@ static inline slab_flags_t kasan_never_merge(void)
return 0;
}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
+static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 316f7f8cd8e6..6107c795611f 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
return 0;
}

-void __kasan_alloc_pages(struct page *page, unsigned int order)
+void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
unsigned long i;
@@ -108,14 +108,14 @@ void __kasan_alloc_pages(struct page *page, unsigned int order)
tag = kasan_random_tag();
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
- kasan_unpoison(page_address(page), PAGE_SIZE << order, false);
+ kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
}

-void __kasan_free_pages(struct page *page, unsigned int order)
+void __kasan_free_pages(struct page *page, unsigned int order, bool init)
{
if (likely(!PageHighMem(page)))
kasan_poison(page_address(page), PAGE_SIZE << order,
- KASAN_FREE_PAGE, false);
+ KASAN_FREE_PAGE, init);
}

/*
diff --git a/mm/mempool.c b/mm/mempool.c
index 79959fac27d7..fe19d290a301 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -106,7 +106,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_slab_free_mempool(element);
else if (pool->alloc == mempool_alloc_pages)
- kasan_free_pages(element, (unsigned long)pool->pool_data);
+ kasan_free_pages(element, (unsigned long)pool->pool_data, false);
}

static void kasan_unpoison_element(mempool_t *pool, void *element)
@@ -114,7 +114,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_range(element, __ksize(element));
else if (pool->alloc == mempool_alloc_pages)
- kasan_alloc_pages(element, (unsigned long)pool->pool_data);
+ kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
}

static __always_inline void add_element(mempool_t *pool, void *element)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0efb07b5907c..175bdb36d113 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -396,14 +396,14 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
* initialization is done, but this is not likely to happen.
*/
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
- fpi_t fpi_flags)
+ bool init, fpi_t fpi_flags)
{
if (static_branch_unlikely(&deferred_pages))
return;
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
(fpi_flags & FPI_SKIP_KASAN_POISON))
return;
- kasan_free_pages(page, order);
+ kasan_free_pages(page, order, init);
}

/* Returns true if the struct page for the pfn is uninitialised */
@@ -455,12 +455,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
}
#else
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
- fpi_t fpi_flags)
+ bool init, fpi_t fpi_flags)
{
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
(fpi_flags & FPI_SKIP_KASAN_POISON))
return;
- kasan_free_pages(page, order);
+ kasan_free_pages(page, order, init);
}

static inline bool early_page_uninitialised(unsigned long pfn)
@@ -1242,6 +1242,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free, fpi_t fpi_flags)
{
int bad = 0;
+ bool init;

VM_BUG_ON_PAGE(PageTail(page), page);

@@ -1299,16 +1300,21 @@ static __always_inline bool free_pages_prepare(struct page *page,
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
- if (want_init_on_free())
- kernel_init_free_pages(page, 1 << order);

kernel_poison_pages(page, 1 << order);

/*
+ * As memory initialization is integrated with hardware tag-based
+ * KASAN, kasan_free_pages and kernel_init_free_pages must be
+ * kept together to avoid discrepancies in behavior.
+ *
* With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page.
*/
- kasan_free_nondeferred_pages(page, order, fpi_flags);
+ init = want_init_on_free();
+ if (init && !IS_ENABLED(CONFIG_KASAN_HW_TAGS))
+ kernel_init_free_pages(page, 1 << order);
+ kasan_free_nondeferred_pages(page, order, init, fpi_flags);

/*
* arch_free_page() can make the page's contents inaccessible. s390
@@ -2315,17 +2321,26 @@ static bool check_new_pages(struct page *page, unsigned int order)
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
+ bool init;
+
set_page_private(page, 0);
set_page_refcounted(page);

arch_alloc_page(page, order);
debug_pagealloc_map_pages(page, 1 << order);
- kasan_alloc_pages(page, order);
- kernel_unpoison_pages(page, 1 << order);
- set_page_owner(page, order, gfp_flags);

- if (!want_init_on_free() && want_init_on_alloc(gfp_flags))
+ /*
+ * As memory initialization is integrated with hardware tag-based
+ * KASAN, kasan_alloc_pages and kernel_init_free_pages must be
+ * kept together to avoid discrepancies in behavior.
+ */
+ init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
+ kasan_alloc_pages(page, order, init);
+ if (init && !IS_ENABLED(CONFIG_KASAN_HW_TAGS))
kernel_init_free_pages(page, 1 << order);
+
+ kernel_unpoison_pages(page, 1 << order);
+ set_page_owner(page, order, gfp_flags);
}

static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
--
2.30.1.766.gb4fecdf3b7-goog