[PATCH 09/11] smp: replace smp_call_function_single_async with smp_xcall_private

From: Donghai Qiao
Date: Thu Apr 14 2022 - 22:47:53 EST


Replace smp_call_function_single_async with smp_xcall_private
and modify all the invocations.

Signed-off-by: Donghai Qiao <dqiao@xxxxxxxxxx>
---
arch/mips/kernel/process.c | 2 +-
arch/mips/kernel/smp.c | 2 +-
arch/s390/pci/pci_irq.c | 2 +-
arch/x86/kernel/cpuid.c | 2 +-
arch/x86/lib/msr-smp.c | 2 +-
block/blk-mq.c | 2 +-
drivers/clocksource/ingenic-timer.c | 2 +-
drivers/cpuidle/coupled.c | 2 +-
drivers/net/ethernet/cavium/liquidio/lio_core.c | 2 +-
include/linux/smp.h | 3 ---
kernel/debug/debug_core.c | 2 +-
kernel/sched/core.c | 2 +-
kernel/sched/fair.c | 2 +-
net/core/dev.c | 2 +-
14 files changed, 13 insertions(+), 16 deletions(-)

diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c2d5f4bfe1f3..5a63adccdcaf 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -745,7 +745,7 @@ static void raise_backtrace(cpumask_t *mask)
}

csd = &per_cpu(backtrace_csd, cpu);
- smp_call_function_single_async(cpu, csd);
+ smp_xcall_private(cpu, csd, XCALL_TYPE_ASYNC);
}
}

diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d5bb38bfaef5..6202e9c1ca0c 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -701,7 +701,7 @@ void tick_broadcast(const struct cpumask *mask)

for_each_cpu(cpu, mask) {
csd = &per_cpu(tick_broadcast_csd, cpu);
- smp_call_function_single_async(cpu, csd);
+ smp_xcall_private(cpu, csd, XCALL_TYPE_ASYNC);
}
}

diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 325c42c6ddb4..37724c600d51 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -212,7 +212,7 @@ static void zpci_handle_fallback_irq(void)
continue;

INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
- smp_call_function_single_async(cpu, &cpu_data->csd);
+ smp_xcall_private(cpu, &cpu_data->csd, XCALL_TYPE_ASYNC);
}
}

diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 6f7b8cc1bc9f..3e75dfe07314 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -81,7 +81,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
cmd.regs.eax = pos;
cmd.regs.ecx = pos >> 32;

- err = smp_call_function_single_async(cpu, &csd);
+ err = smp_xcall_private(cpu, &csd, XCALL_TYPE_ASYNC);
if (err)
break;
wait_for_completion(&cmd.done);
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 68170a28270f..8c6b85bdc2d3 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -178,7 +178,7 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
init_completion(&rv.done);
rv.msr.msr_no = msr_no;

- err = smp_call_function_single_async(cpu, &csd);
+ err = smp_xcall_private(cpu, &csd, XCALL_TYPE_ASYNC);
if (!err) {
wait_for_completion(&rv.done);
err = rv.msr.err;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ed3ed86f7dd2..548960494d79 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1062,7 +1062,7 @@ static void blk_mq_complete_send_ipi(struct request *rq)
list = &per_cpu(blk_cpu_done, cpu);
if (llist_add(&rq->ipi_list, list)) {
INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
- smp_call_function_single_async(cpu, &rq->csd);
+ smp_xcall_private(cpu, &rq->csd, XCALL_TYPE_ASYNC);
}
}

diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 24ed0f1f089b..30e437679ca9 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -121,7 +121,7 @@ static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
csd->info = (void *) &timer->cevt;
csd->func = ingenic_per_cpu_event_handler;
- smp_call_function_single_async(timer->cpu, csd);
+ smp_xcall_private(timer->cpu, csd, XCALL_TYPE_ASYNC);
}

return IRQ_HANDLED;
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 74068742cef3..bec03fdc6edf 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -334,7 +334,7 @@ static void cpuidle_coupled_poke(int cpu)
call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);

if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
- smp_call_function_single_async(cpu, csd);
+ smp_xcall_private(cpu, csd, XCALL_TYPE_ASYNC);
}

/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..ae97533c7f8b 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -729,7 +729,7 @@ static void liquidio_napi_drv_callback(void *arg)
napi_schedule_irqoff(&droq->napi);
} else {
INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
- smp_call_function_single_async(droq->cpu_id, &droq->csd);
+ smp_xcall_private(droq->cpu_id, &droq->csd, XCALL_TYPE_ASYNC);
}
}

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 673192e2192e..de9b850722b3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -206,9 +206,6 @@ extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);

-#define smp_call_function_single_async(cpu, csd) \
- smp_xcall_private(cpu, csd, XCALL_TYPE_ASYNC)
-
/*
* Cpus stopping functions in panic. All have default weak definitions.
* Architecture-dependent code may override them.
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index da06a5553835..cb69113251c9 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -264,7 +264,7 @@ void __weak kgdb_roundup_cpus(void)
continue;
kgdb_info[cpu].rounding_up = true;

- ret = smp_call_function_single_async(cpu, csd);
+ ret = smp_xcall_private(cpu, csd, XCALL_TYPE_ASYNC);
if (ret)
kgdb_info[cpu].rounding_up = false;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 417355fbe32d..610e02b4c598 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -836,7 +836,7 @@ void hrtick_start(struct rq *rq, u64 delay)
if (rq == this_rq())
__hrtick_restart(rq);
else
- smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
+ smp_xcall_private(cpu_of(rq), &rq->hrtick_csd, XCALL_TYPE_ASYNC);
}

#else
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d4bd299d67ab..1b060a64cb93 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10461,7 +10461,7 @@ static void kick_ilb(unsigned int flags)
* is idle. And the softirq performing nohz idle load balance
* will be run before returning from the IPI.
*/
- smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
+ smp_xcall_private(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd, XCALL_TYPE_ASYNC);
}

/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 8c6c08446556..0c8e18f6f53c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5788,7 +5788,7 @@ static void net_rps_send_ipi(struct softnet_data *remsd)
struct softnet_data *next = remsd->rps_ipi_next;

if (cpu_online(remsd->cpu))
- smp_call_function_single_async(remsd->cpu, &remsd->csd);
+ smp_xcall_private(remsd->cpu, &remsd->csd, XCALL_TYPE_ASYNC);
remsd = next;
}
#endif
--
2.27.0