There is not need to modify page table synchronization mask
while apply_to_pte_range() holds user page tables spinlock.
Signed-off-by: Alexander Gordeev <agordeev@xxxxxxxxxxxxx>
---
mm/memory.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mm/memory.c b/mm/memory.c
index 8eba595056fe..6849ab4e44bf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3035,12 +3035,13 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
}
} while (pte++, addr += PAGE_SIZE, addr != end);
}
- *mask |= PGTBL_PTE_MODIFIED;
arch_leave_lazy_mmu_mode();
if (mm != &init_mm)
pte_unmap_unlock(mapped_pte, ptl);
+ *mask |= PGTBL_PTE_MODIFIED;
+
return err;
}