[PATCH v3 bpf-next 1/3] bpf: Add bpf_perf_prog_read_branches() helper

From: Daniel Xu
Date: Thu Jan 23 2020 - 16:23:45 EST


Branch records are a CPU feature that can be configured to record
certain branches that are taken during code execution. This data is
particularly interesting for profile guided optimizations. perf has had
branch record support for a while but the data collection can be a bit
coarse grained.

We (Facebook) have seen in experiments that associating metadata with
branch records can improve results (after postprocessing). We generally
use bpf_probe_read_*() to get metadata out of userspace. That's why bpf
support for branch records is useful.

Aside from this particular use case, having branch data available to bpf
progs can be useful to get stack traces out of userspace applications
that omit frame pointers.

Signed-off-by: Daniel Xu <dxu@xxxxxxxxx>
---
include/uapi/linux/bpf.h | 15 ++++++++++++++-
kernel/trace/bpf_trace.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f1d74a2bd234..50c580c8a201 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2892,6 +2892,18 @@ union bpf_attr {
* Obtain the 64bit jiffies
* Return
* The 64 bit jiffies
+ *
+ * int bpf_perf_prog_read_branches(struct bpf_perf_event_data *ctx, void *buf, u32 buf_size)
+ * Description
+ * For en eBPF program attached to a perf event, retrieve the
+ * branch records (struct perf_branch_entry) associated to *ctx*
+ * and store it in the buffer pointed by *buf* up to size
+ * *buf_size* bytes.
+ *
+ * Any unused parts of *buf* will be filled with zeros.
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3012,7 +3024,8 @@ union bpf_attr {
FN(probe_read_kernel_str), \
FN(tcp_send_ack), \
FN(send_signal_thread), \
- FN(jiffies64),
+ FN(jiffies64), \
+ FN(perf_prog_read_branches),

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 19e793aa441a..24c51272a1f7 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1028,6 +1028,35 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
.arg3_type = ARG_CONST_SIZE,
};

+BPF_CALL_3(bpf_perf_prog_read_branches, struct bpf_perf_event_data_kern *, ctx,
+ void *, buf, u32, size)
+{
+ struct perf_branch_stack *br_stack = ctx->data->br_stack;
+ u32 to_copy = 0, to_clear = size;
+ int err = -EINVAL;
+
+ if (unlikely(!br_stack))
+ goto clear;
+
+ to_copy = min_t(u32, br_stack->nr * sizeof(struct perf_branch_entry), size);
+ to_clear -= to_copy;
+
+ memcpy(buf, br_stack->entries, to_copy);
+ err = to_copy;
+clear:
+ memset(buf + to_copy, 0, to_clear);
+ return err;
+}
+
+static const struct bpf_func_proto bpf_perf_prog_read_branches_proto = {
+ .func = bpf_perf_prog_read_branches,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+};
+
static const struct bpf_func_proto *
pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1040,6 +1069,8 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_stack_proto_tp;
case BPF_FUNC_perf_prog_read_value:
return &bpf_perf_prog_read_value_proto;
+ case BPF_FUNC_perf_prog_read_branches:
+ return &bpf_perf_prog_read_branches_proto;
default:
return tracing_func_proto(func_id, prog);
}
--
2.21.1