[PATCH] smp: Function call tracepoints

From: Wojciech Kudla
Date: Thu May 21 2020 - 04:38:38 EST


Following the feedback after the first approach:
https://lkml.kernel.org/r/20200520135156.GO317569@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

This patch introduces generic SMP function call trace points:
- smp:function_call_issue (captures target cpumask and function pointer)
- smp:function_call_execute (captures executing cpu and function pointer)

Events show function symbols instead of pointers when printk()-ed

Signed-off-by: Wojciech Kudla <wk.kernel@xxxxxxxxx>
---
include/trace/events/smp.h | 67 ++++++++++++++++++++++++++++++++++++++
kernel/smp.c | 18 +++++++++-
2 files changed, 84 insertions(+), 1 deletion(-)
create mode 100644 include/trace/events/smp.h

diff --git a/include/trace/events/smp.h b/include/trace/events/smp.h
new file mode 100644
index 000000000000..ecbd2bb7613b
--- /dev/null
+++ b/include/trace/events/smp.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM smp
+
+#if !defined(_TRACE_SMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SMP_H
+
+#include <linux/tracepoint.h>
+
+/**
+ * function_call_issue - called when an smp call is made
+ *
+ * @mask: mask of recipient CPUs for the SMP function call
+ * @function: pointer to the function to be executed
+ *
+ */
+TRACE_EVENT(function_call_issue,
+
+ TP_PROTO(const struct cpumask *mask, smp_call_func_t func),
+
+ TP_ARGS(mask, func),
+
+ TP_STRUCT__entry(
+ __bitmask(target_cpus, nr_cpumask_bits)
+ __field(smp_call_func_t, func)
+ ),
+
+ TP_fast_assign(
+ __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
+ __entry->func = func;
+ ),
+
+ TP_printk("target_mask=%s, function=%pS",
+ __get_bitmask(target_cpus), __entry->func)
+);
+
+
+/**
+ * function_call_execute - called when smp call is executed on the target cpu
+ *
+ * @cpu: cpu the SMP function call is being executed on
+ * @function: pointer to the function to be executed
+ *
+ */
+TRACE_EVENT(function_call_execute,
+
+ TP_PROTO(int cpu, smp_call_func_t func),
+
+ TP_ARGS(cpu, func),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(smp_call_func_t, func)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->func = func;
+ ),
+
+ TP_printk("cpu=%d, function=%pS", __entry->cpu, __entry->func)
+);
+
+#endif /* _TRACE_SMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/smp.c b/kernel/smp.c
index 7dbcb402c2fc..acb20bd118e0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -21,6 +21,9 @@
#include <linux/sched/idle.h>
#include <linux/hypervisor.h>

+#define CREATE_TRACE_POINTS
+#include <trace/events/smp.h>
+
#include "smpboot.h"

enum {
@@ -176,8 +179,12 @@ static int generic_exec_single(int cpu, call_single_data_t *csd,
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
- if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
+ if (trace_function_call_issue_enabled())
+ trace_function_call_issue(cpumask_of(cpu), func);
+
arch_send_call_function_single_ipi(cpu);
+ }

return 0;
}
@@ -241,10 +248,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)

/* Do we wait until *after* callback? */
if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
+ if (trace_function_call_execute_enabled())
+ trace_function_call_execute(smp_processor_id(), func);
+
func(info);
csd_unlock(csd);
} else {
csd_unlock(csd);
+
+ if (trace_function_call_execute_enabled())
+ trace_function_call_execute(smp_processor_id(), func);
+
func(info);
}
}
@@ -474,6 +488,8 @@ void smp_call_function_many(const struct cpumask *mask,
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
}

+ trace_function_call_issue(cfd->cpumask_ipi, func);
+
/* Send a message to all CPUs in the map */
arch_send_call_function_ipi_mask(cfd->cpumask_ipi);

--
2.17.1