Re: [PATCH v3 08/10] perf: cache perf_event_groups_first for cgroups

From: Peter Zijlstra
Date: Thu Nov 14 2019 - 05:26:06 EST


On Wed, Nov 13, 2019 at 04:30:40PM -0800, Ian Rogers wrote:
> Add a per-CPU cache of the pinned and flexible perf_event_groups_first
> value for a cgroup avoiding an O(log(#perf events)) searches during
> sched_in.
>
> Based-on-work-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
> Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
> ---
> include/linux/perf_event.h | 6 +++
> kernel/events/core.c | 79 +++++++++++++++++++++++++++-----------
> 2 files changed, 62 insertions(+), 23 deletions(-)
>
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index b3580afbf358..cfd0b320418c 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -877,6 +877,12 @@ struct perf_cgroup_info {
> struct perf_cgroup {
> struct cgroup_subsys_state css;
> struct perf_cgroup_info __percpu *info;
> + /* A cache of the first event with the perf_cpu_context's
> + * perf_event_context for the first event in pinned_groups or
> + * flexible_groups. Avoids an rbtree search during sched_in.
> + */

Broken comment style.

> + struct perf_event * __percpu *pinned_event;
> + struct perf_event * __percpu *flexible_event;

Where is the actual storage allocated? There is a conspicuous lack of
alloc_percpu() in this patch, see for example perf_cgroup_css_alloc()
which fills out the above @info field.

> };
>
> /*
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 11594d8bbb2e..9f0febf51d97 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -1638,6 +1638,25 @@ perf_event_groups_insert(struct perf_event_groups *groups,
>
> rb_link_node(&event->group_node, parent, node);
> rb_insert_color(&event->group_node, &groups->tree);
> +#ifdef CONFIG_CGROUP_PERF
> + if (is_cgroup_event(event)) {
> + struct perf_event **cgrp_event;
> +
> + if (event->attr.pinned)
> + cgrp_event = per_cpu_ptr(event->cgrp->pinned_event,
> + event->cpu);
> + else
> + cgrp_event = per_cpu_ptr(event->cgrp->flexible_event,
> + event->cpu);

Codingstyle requires { } here (or just bust the line length a little).

> + /*
> + * Cgroup events for the same cgroup on the same CPU will
> + * always be inserted at the right because of bigger
> + * @groups->index. Only need to set *cgrp_event when it's NULL.
> + */
> + if (!*cgrp_event)
> + *cgrp_event = event;

I would feel much better if you had some actual leftmost logic in the
insertion iteration.

> + }
> +#endif
> }
>
> /*
> @@ -1652,6 +1671,9 @@ add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
> perf_event_groups_insert(groups, event);
> }
>
> +static struct perf_event *
> +perf_event_groups_next(struct perf_event *event);
> +
> /*
> * Delete a group from a tree.
> */
> @@ -1662,6 +1684,22 @@ perf_event_groups_delete(struct perf_event_groups *groups,
> WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
> RB_EMPTY_ROOT(&groups->tree));
>
> +#ifdef CONFIG_CGROUP_PERF
> + if (is_cgroup_event(event)) {
> + struct perf_event **cgrp_event;
> +
> + if (event->attr.pinned)
> + cgrp_event = per_cpu_ptr(event->cgrp->pinned_event,
> + event->cpu);
> + else
> + cgrp_event = per_cpu_ptr(event->cgrp->flexible_event,
> + event->cpu);

Codingstyle again.

> +
> + if (*cgrp_event == event)
> + *cgrp_event = perf_event_groups_next(event);
> + }
> +#endif
> +
> rb_erase(&event->group_node, &groups->tree);
> init_event_group(event);
> }
> @@ -1679,20 +1717,14 @@ del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
> }
>
> /*
> - * Get the leftmost event in the cpu/cgroup subtree.
> + * Get the leftmost event in the cpu subtree without a cgroup (ie task or
> + * system-wide).
> */
> static struct perf_event *
> -perf_event_groups_first(struct perf_event_groups *groups, int cpu,
> - struct cgroup *cgrp)
> +perf_event_groups_first_no_cgroup(struct perf_event_groups *groups, int cpu)

I'm going to impose a function name length limit soon :/ That's insane
(again).

> {
> struct perf_event *node_event = NULL, *match = NULL;
> struct rb_node *node = groups->tree.rb_node;
> -#ifdef CONFIG_CGROUP_PERF
> - int node_cgrp_id, cgrp_id = 0;
> -
> - if (cgrp)
> - cgrp_id = cgrp->id;
> -#endif
>
> while (node) {
> node_event = container_of(node, struct perf_event, group_node);
> @@ -1706,18 +1738,10 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu,
> continue;
> }
> #ifdef CONFIG_CGROUP_PERF
> - node_cgrp_id = 0;
> - if (node_event->cgrp && node_event->cgrp->css.cgroup)
> - node_cgrp_id = node_event->cgrp->css.cgroup->id;
> -
> - if (cgrp_id < node_cgrp_id) {
> + if (node_event->cgrp) {
> node = node->rb_left;
> continue;
> }
> - if (cgrp_id > node_cgrp_id) {
> - node = node->rb_right;
> - continue;
> - }
> #endif
> match = node_event;
> node = node->rb_left;

Also, just leave that in and let callers have: .cgrp = NULL. Then you
can forgo that monstrous name.