[PATCH 07/11] smp: change smp_call_function_any() to smp_xcall_any()

From: Donghai Qiao
Date: Thu Apr 14 2022 - 22:47:43 EST


Rename smp_call_function_any() to smp_xcall_any() and also make
the changes necessary.

Replace all the invocations of smp_call_function_any() with
smp_xcall_any() for all.

Actually the kernel consumers can use smp_xcall() when they want
to use smp_call_function_any(). The extra logics handled by
smp_call_function_any() should be moved out of there and have the
consumers choose the preferred CPU. Because there are quite a few
of the cross call consumers need to run their functions on just
one of the CPUs of a given CPU set, so there is some advantage to
add smp_xcall_any() to the interface.

Signed-off-by: Donghai Qiao <dqiao@xxxxxxxxxx>
---
arch/arm/kernel/perf_event_v7.c | 6 +-
arch/arm64/kernel/perf_event.c | 6 +-
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +-
drivers/cpufreq/acpi-cpufreq.c | 4 +-
drivers/cpufreq/powernv-cpufreq.c | 12 ++--
drivers/perf/arm_spe_pmu.c | 2 +-
include/linux/smp.h | 12 +---
kernel/smp.c | 78 ++++++++++-------------
8 files changed, 53 insertions(+), 69 deletions(-)

diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index eb2190477da1..f07e9221019a 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1192,9 +1192,9 @@ static void armv7_read_num_pmnc_events(void *info)

static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
{
- return smp_call_function_any(&arm_pmu->supported_cpus,
- armv7_read_num_pmnc_events,
- &arm_pmu->num_events, 1);
+ return smp_xcall_any(&arm_pmu->supported_cpus,
+ armv7_read_num_pmnc_events,
+ &arm_pmu->num_events, XCALL_TYPE_SYNC);
}

static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cb69ff1e6138..7e847044492b 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1186,9 +1186,9 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
};
int ret;

- ret = smp_call_function_any(&cpu_pmu->supported_cpus,
- __armv8pmu_probe_pmu,
- &probe, 1);
+ ret = smp_xcall_any(&cpu_pmu->supported_cpus,
+ __armv8pmu_probe_pmu,
+ &probe, XCALL_TYPE_SYNC);
if (ret)
return ret;

diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 87666275eed9..7e45da5f3c8b 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -512,7 +512,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
rr->val = 0;
rr->first = first;

- smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
+ (void) smp_xcall_any(&d->cpu_mask, mon_event_count, rr, XCALL_TYPE_SYNC);
}

int rdtgroup_mondata_show(struct seq_file *m, void *arg)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3d514b82d055..fd595c1cdd2f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -312,8 +312,8 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
};
int err;

- err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
- WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+ err = smp_xcall_any(mask, do_drv_read, &cmd, XCALL_TYPE_SYNC);
+ WARN_ON_ONCE(err); /* smp_xcall_any() was buggy? */
return cmd.val;
}

diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index fddbd1ea1635..aa7a02e1c647 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -507,8 +507,8 @@ static unsigned int powernv_cpufreq_get(unsigned int cpu)
{
struct powernv_smp_call_data freq_data;

- smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
- &freq_data, 1);
+ (void) smp_xcall_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
+ &freq_data, XCALL_TYPE_SYNC);

return freq_data.freq;
}
@@ -820,8 +820,10 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
* if current CPU is within policy->cpus (core)
+ *
+ * Shouldn't return the value of smp_xcall_any() ?
*/
- smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+ (void) smp_xcall_any(policy->cpus, set_pstate, &freq_data, XCALL_TYPE_SYNC);
return 0;
}

@@ -921,8 +923,8 @@ static void powernv_cpufreq_work_fn(struct work_struct *work)

cpus_read_lock();
cpumask_and(&mask, &chip->mask, cpu_online_mask);
- smp_call_function_any(&mask,
- powernv_cpufreq_throttle_check, NULL, 0);
+ (void) smp_xcall_any(&mask, powernv_cpufreq_throttle_check,
+ NULL, XCALL_TYPE_ASYNC);

if (!chip->restore)
goto out;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d44bcc29d99c..f81fa4a496a6 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1108,7 +1108,7 @@ static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
cpumask_t *mask = &spe_pmu->supported_cpus;

/* Make sure we probe the hardware on a relevant CPU */
- ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
+ ret = smp_xcall_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, XCALL_TYPE_SYNC);
if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
return -ENXIO;

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 8a234e707f10..3ddd4c6107e1 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -161,6 +161,8 @@ do { \
*(_csd) = CSD_INIT((_func), (_info)); \
} while (0)

+extern int smp_xcall_any(const struct cpumask *mask, smp_call_func_t func,
+ void *info, unsigned int flags);

/*
* smp_xcall Interface.
@@ -304,9 +306,6 @@ void smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);

-int smp_call_function_any(const struct cpumask *mask,
- smp_call_func_t func, void *info, int wait);
-
void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);

@@ -355,13 +354,6 @@ static inline void smp_send_reschedule(int cpu) { }
(up_smp_call_function(func, info))
static inline void call_function_init(void) { }

-static inline int
-smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
- void *info, int wait)
-{
- return smp_call_function_single(0, func, info, wait);
-}
-
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }

diff --git a/kernel/smp.c b/kernel/smp.c
index aef913b54f81..94df3b3a38cf 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -626,49 +626,6 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
}
EXPORT_SYMBOL(smp_call_function_single);

-/*
- * smp_call_function_any - Run a function on any of the given cpus
- * @mask: The mask of cpus it can run on.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait until function has completed.
- *
- * Returns 0 on success, else a negative status code (if no cpus were online).
- *
- * Selection preference:
- * 1) current cpu if in @mask
- * 2) any cpu of current node if in @mask
- * 3) any other online cpu in @mask
- */
-int smp_call_function_any(const struct cpumask *mask,
- smp_call_func_t func, void *info, int wait)
-{
- unsigned int cpu;
- const struct cpumask *nodemask;
- int ret;
-
- /* Try for same CPU (cheapest) */
- cpu = get_cpu();
- if (cpumask_test_cpu(cpu, mask))
- goto call;
-
- /* Try for same node. */
- nodemask = cpumask_of_node(cpu_to_node(cpu));
- for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
- cpu = cpumask_next_and(cpu, nodemask, mask)) {
- if (cpu_online(cpu))
- goto call;
- }
-
- /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
- cpu = cpumask_any_and(mask, cpu_online_mask);
-call:
- ret = smp_call_function_single(cpu, func, info, wait);
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL_GPL(smp_call_function_any);
-
static void smp_call_function_many_cond(const struct cpumask *mask,
smp_call_func_t func, void *info,
bool wait,
@@ -1276,6 +1233,39 @@ EXPORT_SYMBOL(smp_xcall_private);
int smp_xcall_any(const struct cpumask *mask, smp_call_func_t func,
void *info, unsigned int flags)
{
- return 0;
+ int cpu;
+ const struct cpumask *nodemask;
+
+ if (mask == NULL || func == NULL ||
+ (flags != XCALL_TYPE_SYNC && flags != XCALL_TYPE_ASYNC))
+ return -EINVAL;
+
+ /* Try for same CPU (cheapest) */
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, mask))
+ goto call;
+
+ /* Try for same node. */
+ nodemask = cpumask_of_node(cpu_to_node(cpu));
+ for (cpu = cpumask_first_and(nodemask, mask); (unsigned int)cpu < nr_cpu_ids;
+ cpu = cpumask_next_and(cpu, nodemask, mask)) {
+ if (cpu_online(cpu))
+ goto call;
+ }
+
+ /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ if ((unsigned int)cpu >= nr_cpu_ids) {
+ preempt_enable();
+ return -ENXIO;
+ }
+
+call:
+ (void) smp_xcall(cpu, func, info, flags);
+
+ preempt_enable();
+ return 0;
}
EXPORT_SYMBOL(smp_xcall_any);
--
2.27.0