Re: [PATCH 1/3] perf record: Fix per-thread option.

From: Alexey Bayduraev
Date: Tue Apr 12 2022 - 21:42:14 EST


On 12.04.2022 09:21, Ian Rogers wrote:

> From: Alexey Bayduraev <alexey.bayduraev@xxxxxxxxx>

Thanks,

The tag
Signed-off-by: Alexey Bayduraev <alexey.bayduraev@xxxxxxxxx>
can be added to this patch.

Regards,
Alexey

>
> Per-thread mode doesn't have specific CPUs for events, add checks for
> this case.
>
> Minor fix to a pr_debug by Ian Rogers <irogers@xxxxxxxxxx> to avoid an
> out of bound array access.
>
> Reported-by: Ian Rogers <irogers@xxxxxxxxxx>
> Fixes: 7954f71689f9 ("perf record: Introduce thread affinity and mmap masks")
> Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
> ---
> tools/perf/builtin-record.c | 22 +++++++++++++++++-----
> 1 file changed, 17 insertions(+), 5 deletions(-)
>
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index ba74fab02e62..069825c48d40 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -989,8 +989,11 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
> struct mmap *overwrite_mmap = evlist->overwrite_mmap;
> struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
>
> - thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
> - thread_data->mask->maps.nbits);
> + if (cpu_map__is_dummy(cpus))
> + thread_data->nr_mmaps = nr_mmaps;
> + else
> + thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
> + thread_data->mask->maps.nbits);
> if (mmap) {
> thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
> if (!thread_data->maps)
> @@ -1007,16 +1010,17 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
> thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
>
> for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
> - if (test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
> + if (cpu_map__is_dummy(cpus) ||
> + test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
> if (thread_data->maps) {
> thread_data->maps[tm] = &mmap[m];
> pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
> - thread_data, cpus->map[m].cpu, tm, m);
> + thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
> }
> if (thread_data->overwrite_maps) {
> thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
> pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
> - thread_data, cpus->map[m].cpu, tm, m);
> + thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
> }
> tm++;
> }
> @@ -3329,6 +3333,9 @@ static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_c
> {
> int c;
>
> + if (cpu_map__is_dummy(cpus))
> + return;
> +
> for (c = 0; c < cpus->nr; c++)
> set_bit(cpus->map[c].cpu, mask->bits);
> }
> @@ -3680,6 +3687,11 @@ static int record__init_thread_masks(struct record *rec)
> if (!record__threads_enabled(rec))
> return record__init_thread_default_masks(rec, cpus);
>
> + if (cpu_map__is_dummy(cpus)) {
> + pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
> + return -EINVAL;
> + }
> +
> switch (rec->opts.threads_spec) {
> case THREAD_SPEC__CPU:
> ret = record__init_thread_cpu_masks(rec, cpus);
>