[RFC 01/10] cpu/hotplug: Make __cpuhp_kick_ap() ready for async

From: Pingfan Liu
Date: Sun Aug 21 2022 - 22:15:55 EST


At present, during the kexec reboot, the teardown of cpus can not run in
parallel. As the first step towards the parallel, it demands the
initiator to kick ap thread one by one instead of waiting for each ap
thread completion.

Change the prototype of __cpuhp_kick_ap() to cope with this demand.

Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Steven Price <steven.price@xxxxxxx>
Cc: "Peter Zijlstra
Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Cc: Frederic Weisbecker <frederic@xxxxxxxxxx>
Cc: "Jason A. Donenfeld" <Jason@xxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
To: linux-kernel@xxxxxxxxxxxxxxx
---
kernel/cpu.c | 41 ++++++++++++++++++++++++++++++-----------
1 file changed, 30 insertions(+), 11 deletions(-)

diff --git a/kernel/cpu.c b/kernel/cpu.c
index bbad5e375d3b..338e1d426c7e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -526,7 +526,7 @@ cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
}

/* Regular hotplug invocation of the AP hotplug thread */
-static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
+static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st, bool sync)
{
if (!st->single && st->state == st->target)
return;
@@ -539,20 +539,22 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
smp_mb();
st->should_run = true;
wake_up_process(st->thread);
- wait_for_ap_thread(st, st->bringup);
+ if (sync)
+ wait_for_ap_thread(st, st->bringup);
}

static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
- enum cpuhp_state target)
+ enum cpuhp_state target, bool sync)
{
enum cpuhp_state prev_state;
int ret;

prev_state = cpuhp_set_state(cpu, st, target);
- __cpuhp_kick_ap(st);
- if ((ret = st->result)) {
+ __cpuhp_kick_ap(st, sync);
+ ret = st->result;
+ if (sync && ret) {
cpuhp_reset_state(cpu, st, prev_state);
- __cpuhp_kick_ap(st);
+ __cpuhp_kick_ap(st, true);
}

return ret;
@@ -583,7 +585,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;

- return cpuhp_kick_ap(cpu, st, st->target);
+ return cpuhp_kick_ap(cpu, st, st->target, true);
}

static int bringup_cpu(unsigned int cpu)
@@ -835,7 +837,7 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
st->cb_state = state;
st->single = true;

- __cpuhp_kick_ap(st);
+ __cpuhp_kick_ap(st, true);

/*
* If we failed and did a partial, do a rollback.
@@ -844,7 +846,7 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
st->rollback = true;
st->bringup = !bringup;

- __cpuhp_kick_ap(st);
+ __cpuhp_kick_ap(st, true);
}

/*
@@ -868,12 +870,29 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
cpuhp_lock_release(true);

trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
- ret = cpuhp_kick_ap(cpu, st, st->target);
+ ret = cpuhp_kick_ap(cpu, st, st->target, true);
trace_cpuhp_exit(cpu, st->state, prev_state, ret);

return ret;
}

+/* In the async case, trace is meaningless since ret value is not available */
+static int cpuhp_kick_ap_work_async(unsigned int cpu)
+{
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int ret;
+
+ cpuhp_lock_acquire(false);
+ cpuhp_lock_release(false);
+
+ cpuhp_lock_acquire(true);
+ cpuhp_lock_release(true);
+
+ ret = cpuhp_kick_ap(cpu, st, st->target, false);
+
+ return ret;
+}
+
static struct smp_hotplug_thread cpuhp_threads = {
.store = &cpuhp_state.thread,
.thread_should_run = cpuhp_should_run,
@@ -1171,7 +1190,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
if (ret && st->state < prev_state) {
if (st->state == CPUHP_TEARDOWN_CPU) {
cpuhp_reset_state(cpu, st, prev_state);
- __cpuhp_kick_ap(st);
+ __cpuhp_kick_ap(st, true);
} else {
WARN(1, "DEAD callback error for CPU%d", cpu);
}
--
2.31.1