On Fri, Jun 13, 2025 at 07:13:51PM +0530, Dev Jain wrote:
-/*x86 has a cpa_lock for set_memory/set_direct_map to ensure that there's on
- * This function assumes that the range is mapped with PAGE_SIZE pages.
- */
-static int __change_memory_common(unsigned long start, unsigned long size,
+static int ___change_memory_common(unsigned long start, unsigned long size,
pgprot_t set_mask, pgprot_t clear_mask)
{
struct page_change_data data;
@@ -61,9 +140,28 @@ static int __change_memory_common(unsigned long start, unsigned long size,
data.set_mask = set_mask;
data.clear_mask = clear_mask;
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
+ arch_enter_lazy_mmu_mode();
+
+ /*
+ * The caller must ensure that the range we are operating on does not
+ * partially overlap a block mapping. Any such case should either not
+ * exist, or must be eliminated by splitting the mapping - which for
+ * kernel mappings can be done only on BBML2 systems.
+ *
+ */
+ ret = walk_kernel_page_table_range_lockless(start, start + size,
+ &pageattr_ops, NULL, &data);
concurrency in kernel page table updates. I think arm64 has to have such
lock as well.
+ arch_leave_lazy_mmu_mode();
+
+ return ret;
+}