[PATCH] mm/vma: correctly invoke late KSM check after mmap hook

From: Lorenzo Stoakes
Date: Fri Jun 20 2025 - 08:21:03 EST


Previously we erroneously checked whether KSM was applicable prior to
invoking the f_op->mmap() hook in the case of not being able to perform
this check early.

This is problematic, as filesystems such as hugetlb, which use anonymous
memory and might otherwise get KSM'd, set VM_HUGETLB in the f_op->mmap()
hook.

Correct this by checking at the appropriate time.

Reported-by: syzbot+a74a028d848147bc5931@xxxxxxxxxxxxxxxxxxxxxxxxx
Closes: https://lore.kernel.org/all/6853fc57.a00a0220.137b3.0009.GAE@xxxxxxxxxx/
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx>
---
mm/vma.c | 27 +++++++++++++++++----------
1 file changed, 17 insertions(+), 10 deletions(-)

diff --git a/mm/vma.c b/mm/vma.c
index 4abed296d882..eccc4e0b4d32 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -32,6 +32,9 @@ struct mmap_state {
struct vma_munmap_struct vms;
struct ma_state mas_detach;
struct maple_tree mt_detach;
+
+ /* Determine if we can check KSM flags early in mmap() logic. */
+ bool check_ksm_early;
};

#define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \
@@ -2334,6 +2337,11 @@ static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
vms_complete_munmap_vmas(vms, mas_detach);
}

+static void update_ksm_flags(struct mmap_state *map)
+{
+ map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags);
+}
+
/*
* __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
* unmapped once the map operation is completed, check limits, account mapping
@@ -2438,6 +2446,7 @@ static int __mmap_new_file_vma(struct mmap_state *map,
!(map->vm_flags & VM_MAYWRITE) &&
(vma->vm_flags & VM_MAYWRITE));

+ map->file = vma->vm_file;
map->vm_flags = vma->vm_flags;

return 0;
@@ -2487,6 +2496,11 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
if (error)
goto free_iter_vma;

+ if (!map->check_ksm_early) {
+ update_ksm_flags(map);
+ vm_flags_init(vma, map->vm_flags);
+ }
+
#ifdef CONFIG_SPARC64
/* TODO: Fix SPARC ADI! */
WARN_ON_ONCE(!arch_validate_flags(map->vm_flags));
@@ -2606,11 +2620,6 @@ static void set_vma_user_defined_fields(struct vm_area_struct *vma,
vma->vm_private_data = map->vm_private_data;
}

-static void update_ksm_flags(struct mmap_state *map)
-{
- map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags);
-}
-
/*
* Are we guaranteed no driver can change state such as to preclude KSM merging?
* If so, let's set the KSM mergeable flag early so we don't break VMA merging.
@@ -2650,7 +2659,8 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr,
bool have_mmap_prepare = file && file->f_op->mmap_prepare;
VMA_ITERATOR(vmi, mm, addr);
MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
- bool check_ksm_early = can_set_ksm_flags_early(&map);
+
+ map.check_ksm_early = can_set_ksm_flags_early(&map);

error = __mmap_prepare(&map, uf);
if (!error && have_mmap_prepare)
@@ -2658,7 +2668,7 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr,
if (error)
goto abort_munmap;

- if (check_ksm_early)
+ if (map.check_ksm_early)
update_ksm_flags(&map);

/* Attempt to merge with adjacent VMAs... */
@@ -2670,9 +2680,6 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr,

/* ...but if we can't, allocate a new VMA. */
if (!vma) {
- if (!check_ksm_early)
- update_ksm_flags(&map);
-
error = __mmap_new_vma(&map, &vma);
if (error)
goto unacct_error;
--
2.49.0