[RFC][PATCH v2 11/11] context_tracking,x86: Fix text_poke_sync() vs NOHZ_FULL

From: Peter Zijlstra
Date: Wed Sep 29 2021 - 11:58:19 EST


Use the new context_tracking infrastructure to avoid disturbing
userspace tasks when we rewrite kernel code.

XXX re-audit the entry code to make sure only the context_tracking
static_branch is before hitting this code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/x86/include/asm/sync_core.h | 2 ++
arch/x86/kernel/alternative.c | 8 +++++++-
include/linux/context_tracking.h | 1 +
kernel/context_tracking.c | 12 ++++++++++++
4 files changed, 22 insertions(+), 1 deletion(-)

--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -87,6 +87,8 @@ static inline void sync_core(void)
*/
iret_to_self();
}
+#define sync_core sync_core
+

/*
* Ensure that a core serializing instruction is issued before returning
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -18,6 +18,7 @@
#include <linux/mmu_context.h>
#include <linux/bsearch.h>
#include <linux/sync_core.h>
+#include <linux/context_tracking.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
@@ -924,9 +925,14 @@ static void do_sync_core(void *info)
sync_core();
}

+static bool do_sync_core_cond(int cpu, void *info)
+{
+ return !context_tracking_set_cpu_work(cpu, CT_WORK_SYNC);
+}
+
void text_poke_sync(void)
{
- on_each_cpu(do_sync_core, NULL, 1);
+ on_each_cpu_cond(do_sync_core_cond, do_sync_core, NULL, 1);
}

struct text_poke_loc {
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -11,6 +11,7 @@

enum ct_work {
CT_WORK_KLP = 1,
+ CT_WORK_SYNC = 2,
};

/*
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,10 @@ static __always_inline void context_trac
__this_cpu_dec(context_tracking.recursion);
}

+#ifndef sync_core
+static inline void sync_core(void) { }
+#endif
+
/* CT_WORK_n, must be noinstr, non-blocking, NMI safe and deal with spurious calls */
static noinstr void ct_exit_user_work(struct context_tracking *ct)
{
@@ -64,6 +68,14 @@ static noinstr void ct_exit_user_work(struct
arch_atomic_andnot(CT_WORK_KLP, &ct->work);
}

+ if (work & CT_WORK_SYNC) {
+ /* NMI happens here and must still do/finish CT_WORK_n */
+ sync_core();
+
+ smp_mb__before_atomic();
+ arch_atomic_andnot(CT_WORK_SYNC, &ct->work);
+ }
+
smp_mb__before_atomic();
arch_atomic_andnot(CT_SEQ_WORK, &ct->seq);
}