[PATCH 2/2] x86/mm: Populate KASAN shadow for per-CPU DS buffers in CPU entry area

From: Sean Christopherson
Date: Fri Nov 04 2022 - 18:01:27 EST


Bounce through cea_map_percpu_pages() when setting the initial
protections for per-CPU DS buffers so that KASAN populates a shadow for
said mapping. Failure to populate the shadow will result in a
not-present #PF during KASAN validation if DS buffers are activated
later on.

Fixes: 9fd429c28073 ("x86/kasan: Map shadow for percpu pages on demand")
Cc: Andrey Ryabinin <ryabinin.a.a@xxxxxxxxx>
Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/mm/cpu_entry_area.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index d831aae94b41..64ae557ceb22 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -91,13 +91,12 @@ void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
static void __init
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
{
- phys_addr_t pa = per_cpu_ptr_to_phys(ptr);
+ int nid = ptr ? early_pfn_to_nid(PFN_DOWN(per_cpu_ptr_to_phys(ptr))) : 0;

- kasan_populate_shadow_for_vaddr(cea_vaddr, pages * PAGE_SIZE,
- early_pfn_to_nid(PFN_DOWN(pa)));
+ kasan_populate_shadow_for_vaddr(cea_vaddr, pages * PAGE_SIZE, nid);

for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
- cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
+ cea_set_pte(cea_vaddr, ptr ? per_cpu_ptr_to_phys(ptr) : 0, prot);
}

static void __init percpu_setup_debug_store(unsigned int cpu)
@@ -121,8 +120,7 @@ static void __init percpu_setup_debug_store(unsigned int cpu)
* memory like debug store buffers.
*/
npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
- for (; npages; npages--, cea += PAGE_SIZE)
- cea_set_pte(cea, 0, PAGE_NONE);
+ cea_map_percpu_pages(cea, NULL, npages, PAGE_NONE);
#endif
}

--
2.38.1.431.g37b22c650d-goog