[PATCH v3] mm/ksm: introduce ksm_force for each process

From: cgel . zte
Date: Sat May 07 2022 - 01:47:14 EST


From: xu xin <xu.xin16@xxxxxxxxxx>

To use KSM, we must explicitly call madvise() in application code,
which means installed apps on OS needs to be uninstall and source
code needs to be modified. It is inconvenient.

In order to change this situation, We add a new proc 'ksm_force'
under /proc/<pid>/ to support turning on/off KSM scanning of a
process's mm dynamically.

If ksm_force is set as 1, force all anonymous and 'qualified' vma
of this mm to be involved in KSM scanning without explicitly
calling madvise to make vma MADV_MERGEABLE. But It is effctive only
when the klob of '/sys/kernel/mm/ksm/run' is set as 1.

If ksm_enale is set as 0, cancel the feature of ksm_force of this
process and unmerge those merged pages which is not madvised as
MERGEABLE of this process, but leave MERGEABLE areas merged.

Signed-off-by: xu xin <xu.xin16@xxxxxxxxxx>
Reviewed-by: Yang Yang <yang.yang29@xxxxxxxxxx>
Reviewed-by: Ran Xiaokai <ran.xiaokai@xxxxxxxxxx>
Reviewed-by: wangyong <wang.yong12@xxxxxxxxxx>
Reviewed-by: Yunkai Zhang <zhang.yunkai@xxxxxxxxxx>
---
v3:
- fix compile error of mm/ksm.c
v2:
- fix a spelling error in commit log.
- remove a redundant condition check in ksm_force_write().
---
fs/proc/base.c | 99 ++++++++++++++++++++++++++++++++++++++++
include/linux/mm_types.h | 9 ++++
mm/ksm.c | 32 ++++++++++++-
3 files changed, 138 insertions(+), 2 deletions(-)

diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8dfa36a99c74..3115ffa4c9fb 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -96,6 +96,7 @@
#include <linux/time_namespace.h>
#include <linux/resctrl.h>
#include <linux/cn_proc.h>
+#include <linux/ksm.h>
#include <trace/events/oom.h>
#include "internal.h"
#include "fd.h"
@@ -3168,6 +3169,102 @@ static int proc_pid_ksm_merging_pages(struct seq_file *m, struct pid_namespace *

return 0;
}
+
+static ssize_t ksm_force_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct task_struct *task;
+ struct mm_struct *mm;
+ char buffer[PROC_NUMBUF];
+ ssize_t len;
+ int ret;
+
+ task = get_proc_task(file_inode(file));
+ if (!task)
+ return -ESRCH;
+
+ mm = get_task_mm(task);
+ ret = 0;
+ if (mm) {
+ len = snprintf(buffer, sizeof(buffer), "%d\n", mm->ksm_force);
+ ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
+ mmput(mm);
+ }
+
+ return ret;
+}
+
+static ssize_t ksm_force_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ struct mm_struct *mm;
+ char buffer[PROC_NUMBUF];
+ int force;
+ int err = 0;
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count)) {
+ err = -EFAULT;
+ goto out_return;
+ }
+
+ err = kstrtoint(strstrip(buffer), 0, &force);
+
+ if (err)
+ goto out_return;
+ if (force != 0 && force != 1) {
+ err = -EINVAL;
+ goto out_return;
+ }
+
+ task = get_proc_task(file_inode(file));
+ if (!task) {
+ err = -ESRCH;
+ goto out_return;
+ }
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_put_task;
+
+ if (mm->ksm_force != force) {
+ if (mmap_write_lock_killable(mm)) {
+ err = -EINTR;
+ goto out_mmput;
+ }
+
+ if (force == 0)
+ mm->ksm_force = force;
+ else {
+ /*
+ * Force anonymous pages of this mm to be involved in KSM merging
+ * without explicitly calling madvise.
+ */
+ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ err = __ksm_enter(mm);
+ if (!err)
+ mm->ksm_force = force;
+ }
+
+ mmap_write_unlock(mm);
+ }
+
+out_mmput:
+ mmput(mm);
+out_put_task:
+ put_task_struct(task);
+out_return:
+ return err < 0 ? err : count;
+}
+
+static const struct file_operations proc_pid_ksm_force_operations = {
+ .read = ksm_force_read,
+ .write = ksm_force_write,
+ .llseek = generic_file_llseek,
+};
#endif /* CONFIG_KSM */

#ifdef CONFIG_STACKLEAK_METRICS
@@ -3303,6 +3400,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
#ifdef CONFIG_KSM
ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages),
+ REG("ksm_force", S_IRUSR|S_IWUSR, proc_pid_ksm_force_operations),
#endif
};

@@ -3639,6 +3737,7 @@ static const struct pid_entry tid_base_stuff[] = {
#endif
#ifdef CONFIG_KSM
ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages),
+ REG("ksm_force", S_IRUSR|S_IWUSR, proc_pid_ksm_force_operations),
#endif
};

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index b34ff2cdbc4f..1b1592c2f5cf 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -661,6 +661,15 @@ struct mm_struct {
* merging.
*/
unsigned long ksm_merging_pages;
+ /*
+ * If true, force anonymous pages of this mm to be involved in KSM
+ * merging without explicitly calling madvise. It is effctive only
+ * when the klob of '/sys/kernel/mm/ksm/run' is set as 1. If false,
+ * cancel the feature of ksm_force of this process and unmerge
+ * those merged pages which is not madvised as MERGEABLE of this
+ * process, but leave MERGEABLE areas merged.
+ */
+ bool ksm_force;
#endif
} __randomize_layout;

diff --git a/mm/ksm.c b/mm/ksm.c
index 38360285497a..c9f672dcc72e 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -334,6 +334,34 @@ static void __init ksm_slab_free(void)
mm_slot_cache = NULL;
}

+/* Check if vma is qualified for ksmd scanning */
+static bool ksm_vma_check(struct vm_area_struct *vma)
+{
+ unsigned long vm_flags = vma->vm_flags;
+
+ if (!(vma->vm_flags & VM_MERGEABLE) && !(vma->vm_mm->ksm_force))
+ return false;
+
+ if (vm_flags & (VM_SHARED | VM_MAYSHARE |
+ VM_PFNMAP | VM_IO | VM_DONTEXPAND |
+ VM_HUGETLB | VM_MIXEDMAP))
+ return false; /* just ignore this vma*/
+
+ if (vma_is_dax(vma))
+ return false;
+
+#ifdef VM_SAO
+ if (vm_flags & VM_SAO)
+ return false;
+#endif
+#ifdef VM_SPARC_ADI
+ if (vm_flags & VM_SPARC_ADI)
+ return false;
+#endif
+
+ return true;
+}
+
static __always_inline bool is_stable_node_chain(struct stable_node *chain)
{
return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
@@ -523,7 +551,7 @@ static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
if (ksm_test_exit(mm))
return NULL;
vma = vma_lookup(mm, addr);
- if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ if (!vma || !ksm_vma_check(vma) || !vma->anon_vma)
return NULL;
return vma;
}
@@ -2297,7 +2325,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
vma = find_vma(mm, ksm_scan.address);

for (; vma; vma = vma->vm_next) {
- if (!(vma->vm_flags & VM_MERGEABLE))
+ if (!ksm_vma_check(vma))
continue;
if (ksm_scan.address < vma->vm_start)
ksm_scan.address = vma->vm_start;
--
2.25.1