[PATCH v6 bpf-next 2/3] bpf: introduce helper bpf_get_branch_snapshot

From: Song Liu
Date: Tue Sep 07 2021 - 16:28:58 EST


Introduce bpf_get_branch_snapshot(), which allows tracing pogram to get
branch trace from hardware (e.g. Intel LBR). To use the feature, the
user need to create perf_event with proper branch_record filtering
on each cpu, and then calls bpf_get_branch_snapshot in the bpf function.
On Intel CPUs, VLBR event (raw event 0x1b00) can be use for this.

Acked-by: John Fastabend <john.fastabend@xxxxxxxxx>
Acked-by: Andrii Nakryiko <andrii@xxxxxxxxxx>
Signed-off-by: Song Liu <songliubraving@xxxxxx>
---
include/uapi/linux/bpf.h | 22 ++++++++++++++++++++++
kernel/bpf/trampoline.c | 3 ++-
kernel/trace/bpf_trace.c | 30 ++++++++++++++++++++++++++++++
tools/include/uapi/linux/bpf.h | 22 ++++++++++++++++++++++
4 files changed, 76 insertions(+), 1 deletion(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 791f31dd0abee..b695ef151001e 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4877,6 +4877,27 @@ union bpf_attr {
* Get the struct pt_regs associated with **task**.
* Return
* A pointer to struct pt_regs.
+ *
+ * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
+ * Description
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5055,6 +5076,7 @@ union bpf_attr {
FN(get_func_ip), \
FN(get_attach_cookie), \
FN(task_pt_regs), \
+ FN(get_branch_snapshot), \
/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index fe1e857324e66..39eaaff81953d 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -10,6 +10,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/rcupdate_wait.h>
#include <linux/module.h>
+#include <linux/static_call.h>

/* dummy _ops. The verifier will operate on target program's ops. */
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -526,7 +527,7 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
}

#define NO_START_TIME 1
-static u64 notrace bpf_prog_start_time(void)
+static __always_inline u64 notrace bpf_prog_start_time(void)
{
u64 start = NO_START_TIME;

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 8e2eb950aa829..067e88c3d2ee5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1017,6 +1017,34 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
.arg1_type = ARG_PTR_TO_CTX,
};

+BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
+{
+#ifndef CONFIG_X86
+ return -ENOENT;
+#else
+ static const u32 br_entry_size = sizeof(struct perf_branch_entry);
+ u32 entry_cnt = size / br_entry_size;
+
+ entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
+
+ if (unlikely(flags))
+ return -EINVAL;
+
+ if (!entry_cnt)
+ return -ENOENT;
+
+ return entry_cnt * br_entry_size;
+#endif
+}
+
+static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
+ .func = bpf_get_branch_snapshot,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1132,6 +1160,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_snprintf_proto;
case BPF_FUNC_get_func_ip:
return &bpf_get_func_ip_proto_tracing;
+ case BPF_FUNC_get_branch_snapshot:
+ return &bpf_get_branch_snapshot_proto;
default:
return bpf_base_func_proto(func_id);
}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 791f31dd0abee..b695ef151001e 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4877,6 +4877,27 @@ union bpf_attr {
* Get the struct pt_regs associated with **task**.
* Return
* A pointer to struct pt_regs.
+ *
+ * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
+ * Description
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5055,6 +5076,7 @@ union bpf_attr {
FN(get_func_ip), \
FN(get_attach_cookie), \
FN(task_pt_regs), \
+ FN(get_branch_snapshot), \
/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
--
2.30.2