Re: [PATCH v13 01/16] perf record: Introduce thread affinity and mmap masks

From: Ian Rogers
Date: Mon Apr 04 2022 - 19:03:14 EST


On Mon, Jan 17, 2022 at 10:38 AM Alexey Bayduraev
<alexey.v.bayduraev@xxxxxxxxxxxxxxx> wrote:
>
> Introduce affinity and mmap thread masks. Thread affinity mask
> defines CPUs that a thread is allowed to run on. Thread maps
> mask defines mmap data buffers the thread serves to stream
> profiling data from.
>
> Acked-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
> Acked-by: Namhyung Kim <namhyung@xxxxxxxxx>
> Reviewed-by: Riccardo Mancini <rickyman7@xxxxxxxxx>
> Tested-by: Riccardo Mancini <rickyman7@xxxxxxxxx>
> Signed-off-by: Alexey Bayduraev <alexey.v.bayduraev@xxxxxxxxxxxxxxx>
> ---
> tools/perf/builtin-record.c | 123 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 123 insertions(+)
>
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index bb716c953d02..41998f2140cd 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -87,6 +87,11 @@ struct switch_output {
> int cur_file;
> };
>
> +struct thread_mask {
> + struct mmap_cpu_mask maps;
> + struct mmap_cpu_mask affinity;
> +};
> +
> struct record {
> struct perf_tool tool;
> struct record_opts opts;
> @@ -112,6 +117,8 @@ struct record {
> struct mmap_cpu_mask affinity_mask;
> unsigned long output_max_size; /* = 0: unlimited */
> struct perf_debuginfod debuginfod;
> + int nr_threads;
> + struct thread_mask *thread_masks;
> };
>
> static volatile int done;
> @@ -2204,6 +2211,47 @@ static int record__parse_affinity(const struct option *opt, const char *str, int
> return 0;
> }
>
> +static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
> +{
> + mask->nbits = nr_bits;
> + mask->bits = bitmap_zalloc(mask->nbits);
> + if (!mask->bits)
> + return -ENOMEM;
> +
> + return 0;
> +}
> +
> +static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
> +{
> + bitmap_free(mask->bits);
> + mask->nbits = 0;
> +}
> +
> +static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
> +{
> + int ret;
> +
> + ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
> + if (ret) {
> + mask->affinity.bits = NULL;
> + return ret;
> + }
> +
> + ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
> + if (ret) {
> + record__mmap_cpu_mask_free(&mask->maps);
> + mask->maps.bits = NULL;
> + }
> +
> + return ret;
> +}
> +
> +static void record__thread_mask_free(struct thread_mask *mask)
> +{
> + record__mmap_cpu_mask_free(&mask->maps);
> + record__mmap_cpu_mask_free(&mask->affinity);
> +}
> +
> static int parse_output_max_size(const struct option *opt,
> const char *str, int unset)
> {
> @@ -2683,6 +2731,73 @@ static struct option __record_options[] = {
>
> struct option *record_options = __record_options;
>
> +static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
> +{
> + int c;
> + for (c = 0; c < cpus->nr; c++)
> + set_bit(cpus->map[c].cpu, mask->bits);
> +}
> +

In per-thread mode it is possible that cpus is the dummy CPU map here.
This means that the cpu below has the value -1 and setting bit -1
actually has the effect of setting bit 63. Here is a reproduction
based on the acme/perf/core branch:

```
$ make STATIC=1 DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer
-fsanitize=undefined -fno-sanitize-recover'
$ perf record -o /tmp/perf.data --per-thread true
tools/include/asm-generic/bitops/atomic.h:10:36: runtime error: shift
exponent -1 is negative
$ UBSAN_OPTIONS=abort_on_error=1 gdb --args perf record -o
/tmp/perf.data --per-thread true
(gdb) r
tools/include/asm-generic/bitops/atomic.h:10:36: runtime error: shift
exponent -1 is negative
(gdb) bt
#0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:49
#1 0x00007ffff71d2546 in __GI_abort () at abort.c:79
#2 0x00007ffff640db9f in __sanitizer::Abort () at
../../../../src/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp:151
#3 0x00007ffff6418efc in __sanitizer::Die () at
../../../../src/libsanitizer/sanitizer_common/sanitizer_termination.cpp:58
#4 0x00007ffff63fd99e in
__ubsan::__ubsan_handle_shift_out_of_bounds_abort (Data=<optimized
out>, LHS=<optimized out>,
RHS=<optimized out>) at
../../../../src/libsanitizer/ubsan/ubsan_handlers.cpp:378
#5 0x0000555555c54405 in set_bit (nr=-1, addr=0x555556ecd0a0)
at tools/include/asm-generic/bitops/atomic.h:10
#6 0x0000555555c6ddaf in record__mmap_cpu_mask_init
(mask=0x555556ecd070, cpus=0x555556ecd050) at builtin-record.c:3333
#7 0x0000555555c7044c in record__init_thread_default_masks
(rec=0x55555681b100 <record>, cpus=0x555556ecd050) at
builtin-record.c:3668
#8 0x0000555555c705b3 in record__init_thread_masks
(rec=0x55555681b100 <record>) at builtin-record.c:3681
#9 0x0000555555c7297a in cmd_record (argc=1, argv=0x7fffffffdcc0) at
builtin-record.c:3976
#10 0x0000555555e06d41 in run_builtin (p=0x555556827538
<commands+216>, argc=5, argv=0x7fffffffdcc0) at perf.c:313
#11 0x0000555555e07253 in handle_internal_command (argc=5,
argv=0x7fffffffdcc0) at perf.c:365
#12 0x0000555555e07508 in run_argv (argcp=0x7fffffffdb0c,
argv=0x7fffffffdb00) at perf.c:409
#13 0x0000555555e07b32 in main (argc=5, argv=0x7fffffffdcc0) at perf.c:539
```

Not setting the mask->bits if the cpu map is dummy causes no data to
be written. Setting mask->bits 0 causes a segv. Setting bit 63 works
but feels like there are more invariants broken in the code.

Here is a not good workaround patch:

diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ba74fab02e62..62727b676f98 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -3329,6 +3329,11 @@ static void record__mmap_cpu_mask_init(struct
mmap_cpu_mask *mask, struct perf_c
{
int c;

+ if (cpu_map__is_dummy(cpus)) {
+ set_bit(63, mask->bits);
+ return;
+ }
+
for (c = 0; c < cpus->nr; c++)
set_bit(cpus->map[c].cpu, mask->bits);
}

Alexey, what should the expected behavior be with per-thread mmaps?

Thanks,
Ian

> +static void record__free_thread_masks(struct record *rec, int nr_threads)
> +{
> + int t;
> +
> + if (rec->thread_masks)
> + for (t = 0; t < nr_threads; t++)
> + record__thread_mask_free(&rec->thread_masks[t]);
> +
> + zfree(&rec->thread_masks);
> +}
> +
> +static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
> +{
> + int t, ret;
> +
> + rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
> + if (!rec->thread_masks) {
> + pr_err("Failed to allocate thread masks\n");
> + return -ENOMEM;
> + }
> +
> + for (t = 0; t < nr_threads; t++) {
> + ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
> + if (ret) {
> + pr_err("Failed to allocate thread masks[%d]\n", t);
> + goto out_free;
> + }
> + }
> +
> + return 0;
> +
> +out_free:
> + record__free_thread_masks(rec, nr_threads);
> +
> + return ret;
> +}
> +
> +static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
> +{
> + int ret;
> +
> + ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
> + if (ret)
> + return ret;
> +
> + record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
> +
> + rec->nr_threads = 1;
> +
> + return 0;
> +}
> +
> +static int record__init_thread_masks(struct record *rec)
> +{
> + struct perf_cpu_map *cpus = rec->evlist->core.cpus;
> +
> + return record__init_thread_default_masks(rec, cpus);
> +}
> +
> int cmd_record(int argc, const char **argv)
> {
> int err;
> @@ -2948,6 +3063,12 @@ int cmd_record(int argc, const char **argv)
> goto out;
> }
>
> + err = record__init_thread_masks(rec);
> + if (err) {
> + pr_err("Failed to initialize parallel data streaming masks\n");
> + goto out;
> + }
> +
> if (rec->opts.nr_cblocks > nr_cblocks_max)
> rec->opts.nr_cblocks = nr_cblocks_max;
> pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
> @@ -2966,6 +3087,8 @@ int cmd_record(int argc, const char **argv)
> symbol__exit();
> auxtrace_record__free(rec->itr);
> out_opts:
> + record__free_thread_masks(rec, rec->nr_threads);
> + rec->nr_threads = 0;
> evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
> return err;
> }
> --
> 2.19.0
>