[PATCH 19/19] mm, numa: retry failed page migrations

From: Peter Zijlstra
Date: Tue Jul 31 2012 - 15:48:40 EST


From: Rik van Riel <riel@xxxxxxxxxx>

Keep track of how many NUMA page migrations succeeded and
failed (in a way that wants retrying later) per process.

If a lot of the page migrations of a process fail, unmap the
process pages some point later, so the migration can be tried
again at the next fault.

Signed-off-by: Rik van Riel <riel@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
---
include/linux/mm_types.h | 2 ++
kernel/sched/core.c | 2 ++
kernel/sched/fair.c | 19 ++++++++++++++++++-
mm/memory.c | 15 ++++++++++++---
4 files changed, 34 insertions(+), 4 deletions(-)
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -397,6 +397,8 @@ struct mm_struct {
#ifdef CONFIG_NUMA
unsigned int numa_big;
unsigned long numa_next_scan;
+ unsigned int numa_migrate_success;
+ unsigned int numa_migrate_failed;
#endif
struct uprobes_state uprobes_state;
};
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1727,6 +1727,8 @@ static void __sched_fork(struct task_str
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
p->mm->numa_big = 0;
p->mm->numa_next_scan = jiffies;
+ p->mm->numa_migrate_success = 0;
+ p->mm->numa_migrate_failed = 0;
}

p->node = -1;
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -857,6 +857,18 @@ static bool task_numa_big(struct task_st
return runtime > walltime * max(1, weight / 2);
}

+static bool many_migrate_failures(struct task_struct *p)
+{
+ if (!p->mm)
+ return false;
+
+ /* More than 1/4 of the attempted NUMA page migrations failed. */
+ if (p->mm->numa_migrate_failed * 3 > p->mm->numa_migrate_success)
+ return true;
+
+ return false;
+}
+
/*
* The expensive part of numa migration is done from task_work context.
*/
@@ -909,6 +921,10 @@ void task_numa_work(struct task_work *wo
rcu_read_unlock();
}

+ /* Age the numa migrate statistics. */
+ p->mm->numa_migrate_failed /= 2;
+ p->mm->numa_migrate_success /= 2;
+
/*
* Trigger fault driven migration, small processes do direct
* lazy migration, big processes do gradual task<->page relations.
@@ -962,7 +978,8 @@ void task_tick_numa(struct rq *rq, struc
* keep the task<->page map accurate.
*/
if (curr->node_last == node &&
- (curr->node != node || curr->mm->numa_big)) {
+ (curr->node != node || curr->mm->numa_big ||
+ many_migrate_failures(curr))) {
/*
* We can re-use curr->rcu because we checked curr->mm
* != NULL so release_task()->call_rcu() was not called
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3452,7 +3452,7 @@ static int do_prot_none(struct mm_struct
{
struct page *page = NULL;
spinlock_t *ptl;
- int node;
+ int node, ret;

ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
@@ -3472,18 +3472,27 @@ static int do_prot_none(struct mm_struct
pte_unmap_unlock(ptep, ptl);

node = mpol_misplaced(page, vma, address, mm->numa_big);
- if (node == -1)
+ if (node == -1) {
+ mm->numa_migrate_success++;
goto do_fixup;
+ }

/*
* Page migration will install a new pte with vma->vm_page_prot,
* otherwise fall-through to the fixup. Next time,.. perhaps.
*/
- if (!migrate_misplaced_page(mm, page, node)) {
+ ret = migrate_misplaced_page(mm, page, node);
+ if (!ret) {
+ mm->numa_migrate_success++;
put_page(page);
return 0;
}

+ if (ret == -ENOMEM || ret == -EBUSY) {
+ /* This fault should be tried again later. */
+ mm->numa_migrate_failed++;
+ }
+
do_fixup:
/*
* OK, nothing to do,.. change the protection back to what it


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/