[RFC PATCH V1 1/1] sched/numa: Enhance vma scanning logic

From: Raghavendra K T
Date: Sun Jan 15 2023 - 20:36:53 EST


During the Numa scanning make sure only relevant vmas of the
tasks are scanned.

Logic:
1) For the first two time allow unconditional scanning of vmas
2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
False negetives in case of collison should be fine here.
3) If more than 4 pids exist assume task indeed accessed vma to
to avoid false negetives

Co-developed-by: Bharata B Rao <bharata@xxxxxxx>
(initial patch to store pid information)

Suggested-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
Signed-off-by: Bharata B Rao <bharata@xxxxxxx>
Signed-off-by: Raghavendra K T <raghavendra.kt@xxxxxxx>
---
include/linux/mm_types.h | 2 ++
kernel/sched/fair.c | 32 ++++++++++++++++++++++++++++++++
mm/memory.c | 21 +++++++++++++++++++++
3 files changed, 55 insertions(+)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 500e536796ca..07feae37b8e6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -506,6 +506,8 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+ unsigned int accessing_pids;
+ int next_pid_slot;
} __randomize_layout;

struct kioctx_table;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e4a0b8bd941c..944d2e3b0b3c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2916,6 +2916,35 @@ static void reset_ptenuma_scan(struct task_struct *p)
p->mm->numa_scan_offset = 0;
}

+static bool vma_is_accessed(struct vm_area_struct *vma)
+{
+ int i;
+ bool more_pids_exist;
+ unsigned long pid, max_pids;
+ unsigned long current_pid = current->pid & LAST__PID_MASK;
+
+ max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
+
+ /* By default we assume >= max_pids exist */
+ more_pids_exist = true;
+
+ if (READ_ONCE(current->mm->numa_scan_seq) < 2)
+ return true;
+
+ for (i = 0; i < max_pids; i++) {
+ pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
+ LAST__PID_MASK;
+ if (pid == current_pid)
+ return true;
+ if (pid == 0) {
+ more_pids_exist = false;
+ break;
+ }
+ }
+
+ return more_pids_exist;
+}
+
/*
* The expensive part of numa migration is done from task_work context.
* Triggered from task_tick_numa().
@@ -3015,6 +3044,9 @@ static void task_numa_work(struct callback_head *work)
if (!vma_is_accessible(vma))
continue;

+ if (!vma_is_accessed(vma))
+ continue;
+
do {
start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
diff --git a/mm/memory.c b/mm/memory.c
index 8c8420934d60..fafd78d87a51 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4717,7 +4717,28 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
pte_t pte, old_pte;
bool was_writable = pte_savedwrite(vmf->orig_pte);
int flags = 0;
+ int pid_slot = vma->next_pid_slot;

+ int i;
+ unsigned long pid, max_pids;
+ unsigned long current_pid = current->pid & LAST__PID_MASK;
+
+ max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
+
+ /* Avoid duplicate PID updation */
+ for (i = 0; i < max_pids; i++) {
+ pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
+ LAST__PID_MASK;
+ if (pid == current_pid)
+ goto skip_update;
+ }
+
+ vma->next_pid_slot = (++pid_slot) % max_pids;
+ vma->accessing_pids &= ~(LAST__PID_MASK << (pid_slot * LAST__PID_SHIFT));
+ vma->accessing_pids |= ((current_pid) <<
+ (pid_slot * LAST__PID_SHIFT));
+
+skip_update:
/*
* The "pte" at this point cannot be used safely without
* validation through pte_unmap_same(). It's of NUMA type but
--
2.34.1