[PATCH v2 mm-hotfixes 5/5] x86/mm: drop unnecessary calls to sync_global_pgds() and fold into its sole user

From: Harry Yoo
Date: Sun Jul 20 2025 - 19:44:44 EST


Now that p*d_populate_kernel{,init}() handles page table synchronization,
calling sync_global_pgds() is no longer necessary. Remove those
redundant calls.

Additionally, since arch_sync_kernel_mappings() is now the only remaining
caller of sync_global_pgds(), fold the function into its user.

Cc: stable@xxxxxxxxxxxxxxx
Suggested-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Signed-off-by: Harry Yoo <harry.yoo@xxxxxxxxxx>
---
arch/x86/mm/init_64.c | 17 ++---------------
1 file changed, 2 insertions(+), 15 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e4922b9c8403..f1507de3b7a3 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -228,7 +228,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
* When memory was added make sure all the processes MM have
* suitable PGD entries in the local PGD level page.
*/
-static void sync_global_pgds(unsigned long start, unsigned long end)
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
if (pgtable_l5_enabled())
sync_global_pgds_l5(start, end);
@@ -236,11 +236,6 @@ static void sync_global_pgds(unsigned long start, unsigned long end)
sync_global_pgds_l4(start, end);
}

-void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
-{
- sync_global_pgds(start, end);
-}
-
/*
* NOTE: This function is marked __ref because it calls __init function
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -746,13 +741,11 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
unsigned long page_size_mask,
pgprot_t prot, bool init)
{
- bool pgd_changed = false;
- unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
+ unsigned long vaddr, vaddr_end, vaddr_next, paddr_last;

paddr_last = paddr_end;
vaddr = (unsigned long)__va(paddr_start);
vaddr_end = (unsigned long)__va(paddr_end);
- vaddr_start = vaddr;

for (; vaddr < vaddr_end; vaddr = vaddr_next) {
pgd_t *pgd = pgd_offset_k(vaddr);
@@ -781,12 +774,8 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
(pud_t *) p4d, init);

spin_unlock(&init_mm.page_table_lock);
- pgd_changed = true;
}

- if (pgd_changed)
- sync_global_pgds(vaddr_start, vaddr_end - 1);
-
return paddr_last;
}

@@ -1580,8 +1569,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
err = -ENOMEM;
} else
err = vmemmap_populate_basepages(start, end, node, NULL);
- if (!err)
- sync_global_pgds(start, end - 1);
return err;
}

--
2.43.0