Re: [PATCH v10 1/4] trace: Add trace any kernel object

From: Jeff Xie
Date: Sat May 21 2022 - 13:26:35 EST


Hi Masami,

Thanks for your detailed review.

On Sat, May 21, 2022 at 11:25 PM Masami Hiramatsu <mhiramat@xxxxxxxxxx> wrote:
>
> Hi Jeff,
>
> On Fri, 13 May 2022 01:00:05 +0800
> Jeff Xie <xiehuan09@xxxxxxxxx> wrote:
>
> > Introduce objtrace trigger to get the call flow by tracing any kernel
> > object in the function parameter.
> >
> > The objtrace trigger makes a list of the target object address from
> > the given event parameter, and records all kernel function calls
> > which has the object address in its parameter.
>
> Thank you for updating this. Please read my comments below
>
> [...]
> > +
> > +static bool object_exist(void *obj, struct trace_array *tr)
> > +{
> > + int i, max;
> > + struct objtrace_data *obj_data;
> > +
> > + obj_data = get_obj_data(tr);
> > + if (!obj_data)
> > + return false;
> > +
> > + max = atomic_read(&obj_data->num_traced_obj);
>
> max = READ_ONCE(&obj_data->num_traced_obj);
> (see below)

Thanks, it is indeed more appropriate to use READ_ONCE in places like this ;-)

>
> > + smp_rmb();
> > + for (i = 0; i < max; i++) {
> > + if (obj_data->traced_obj[i].obj == obj)
> > + return true;
> > + }
> > + return false;
> > +}
> > +
> > +static bool object_empty(struct trace_array *tr)
> > +{
> > + struct objtrace_data *obj_data;
> > +
> > + obj_data = get_obj_data(tr);
> > + if (!obj_data)
> > + return false;
> > +
> > + return !atomic_read(&obj_data->num_traced_obj);
>
> return !READ_ONCE(&obj_data->num_traced_obj);
> (see below)
>
> > +}
> > +
> > +static void set_trace_object(void *obj, struct trace_array *tr)
> > +{
> > + unsigned long flags;
> > + struct object_instance *obj_ins;
> > + struct objtrace_data *obj_data;
> > +
> > + if (in_nmi())
> > + return;
> > +
> > + if (!obj && object_exist(obj, tr))
> > + return;
> > +
> > + obj_data = get_obj_data(tr);
> > + if (!obj_data)
> > + return;
> > +
> > + /* only this place has write operations */
> > + raw_spin_lock_irqsave(&obj_data->obj_data_lock, flags);
> > + if (atomic_read(&obj_data->num_traced_obj) == MAX_TRACED_OBJECT) {
>
> Since obj_data->num_traced_obj update is protected by obj_data->obj_data_lock,
> you don't need atomic operation. What you need is using READ_ONCE() to
> access the num_traced_obj outside of this protected area.

Thank you for your reminder, this atomic operation is indeed redundant.

> > + trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
> > + "object_pool is full, can't trace object:0x%px\n", obj);
> > + goto out;
> > + }
> > + obj_ins = &obj_data->traced_obj[atomic_read(&obj_data->num_traced_obj)];
> > + obj_ins->obj = obj;
> > + obj_ins->tr = tr;
> > + /* make sure the num_traced_obj update always appears after traced_obj update */
> > + smp_wmb();
> > + atomic_inc(&obj_data->num_traced_obj);
> > +out:
> > + raw_spin_unlock_irqrestore(&obj_data->obj_data_lock, flags);
> > +}
> > +
> [...]
> > +
> > +static int
> > +event_object_trigger_parse(struct event_command *cmd_ops,
> > + struct trace_event_file *file,
> > + char *glob, char *cmd, char *param_and_filter)
> > +{
> > + struct event_trigger_data *trigger_data;
> > + struct objtrace_trigger_data *obj_data;
> > + struct ftrace_event_field *field;
> > + char *objtrace_cmd, *arg;
> > + char *param, *filter;
> > + int ret;
> > + bool remove;
> > +
> > + remove = event_trigger_check_remove(glob);
> > +
> > + /*
> > + * separate the param and the filter:
> > + * objtrace:add:OBJ[:COUNT] [if filter]
> > + */
> > + ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
> > + if (ret)
> > + return ret;
> > +
> > + objtrace_cmd = strsep(&param, ":");
> > + if (!objtrace_cmd || strcmp(objtrace_cmd, "add")) {
> > + pr_err("error objtrace command\n");
> > + return -EINVAL;
> > + }
> > +
> > + arg = strsep(&param, ":");
> > + if (!arg)
> > + return -EINVAL;
> > +
> > + field = trace_find_event_field(file->event_call, arg);
> > + if (!field)
> > + return -EINVAL;
> > +
> > + if (field->size != sizeof(void *)) {
> > + pr_err("the size of the %s should be:%ld\n", field->name, sizeof(void *));
> > + return -EINVAL;
> > + }
> > +
> > + if (remove && !field_exist(file, cmd_ops, field->name))
> > + return -EINVAL;
>
> This may return -ENOENT.

It would be better indeed to use -ENOENT ;-)

> > +
> > + obj_data = kzalloc(sizeof(*obj_data), GFP_KERNEL);
> > + if (!obj_data)
> > + return -ENOMEM;
> > +
> > + obj_data->field = field;
> > + obj_data->tr = file->tr;
> > + snprintf(obj_data->objtrace_cmd, OBJTRACE_CMD_LEN, objtrace_cmd);
>
> If objtrace_cmd is fixed command string, can you make this a flag, like
> OBJTRACE_CMD_ADD.

Yes I can use a macro to define the "add" OBJTRACE_CMD:
#define OBJTRACE_CMD_ADD "add"

> > +
> > + trigger_data = event_trigger_alloc(cmd_ops, cmd, param, obj_data);
> > + if (!trigger_data) {
> > + kfree(obj_data);
> > + return -ENOMEM;
> > + }
> > + if (remove) {
> > + event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
> > + kfree(obj_data);
> > + kfree(trigger_data);
> > + return 0;
> > + }
> > +
> > + ret = event_trigger_parse_num(param, trigger_data);
> > + if (ret)
> > + goto out_free;
> > +
> > + ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
> > + if (ret < 0)
> > + goto out_free;
> > +
> > + ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
> > + if (ret)
> > + goto out_free;
> > +
> > + return ret;
> > +
> > + out_free:
> > + event_trigger_reset_filter(cmd_ops, trigger_data);
> > + kfree(obj_data);
> > + kfree(trigger_data);
> > + return ret;
> > +}
> > +
> > +static struct event_command trigger_object_cmd = {
> > + .name = "objtrace",
> > + .trigger_type = ETT_TRACE_OBJECT,
> > + .flags = EVENT_CMD_FL_NEEDS_REC,
> > + .parse = event_object_trigger_parse,
> > + .reg = register_trigger,
> > + .unreg = unregister_trigger,
> > + .get_trigger_ops = objecttrace_get_trigger_ops,
> > + .set_filter = set_trigger_filter,
> > +};
> > +
> > +__init int register_trigger_object_cmd(void)
> > +{
> > + int ret;
> > +
> > + ret = register_event_command(&trigger_object_cmd);
> > + WARN_ON(ret < 0);
> > +
> > + return ret;
> > +}
> > +
> > +int allocate_objtrace_data(struct trace_array *tr)
> > +{
> > + struct objtrace_data *obj_data;
> > + struct ftrace_ops *fops;
> > +
> > + obj_data = kzalloc(sizeof(*obj_data), GFP_KERNEL);
> > + if (!obj_data)
> > + return -ENOMEM;
> > +
> > + tr->obj_data = obj_data;
>
> I suggest to move this line after initializing all field in
> the obj_data.

Thanks ,Indeed it is better to move it after initializing all fields
in the obj_data ;-)

> > + obj_data->tr = tr;
> > + fops = &obj_data->fops;
> > + fops->func = trace_object_events_call;
> > + fops->flags = FTRACE_OPS_FL_SAVE_REGS;
> > + fops->private = tr;
> > +
> > + raw_spin_lock(&obj_data->obj_data_lock);
>
> You don't need to lock this spinlock becuase this data structure
> is not used yet. Also, you need to initialize the lock with
> __RAW_SPIN_LOCK_UNLOCKED() macro.

Thanks, I will remove the spinlock from here,
maybe it is better to use raw_spin_lock_init(&obj_data->obj_data_lock)
instead of
__RAW_SPIN_LOCK_UNLOCKED(obj_data->obj_data_lock)

The compiler will report warning if we use the __RAW_SPIN_LOCK_UNLOCKED

<compile warning>
kernel/trace/trace_object.c: In function ‘allocate_objtrace_data’:
./include/linux/spinlock_types_raw.h:69:2: warning: value computed is
not used [-Wunused-value]
69 | (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
| ^
kernel/trace/trace_object.c:422:2: note: in expansion of macro
‘__RAW_SPIN_LOCK_UNLOCKED’
422 | __RAW_SPIN_LOCK_UNLOCKED(obj_data->obj_data_lock);
</compile warning>

> > + list_add(&obj_data->head, &obj_data_head);
> > + raw_spin_unlock(&obj_data->obj_data_lock);
> > + return 0;
> > +}
>
>
> Thank you,
>
>
> --
> Masami Hiramatsu <mhiramat@xxxxxxxxxx>

Thanks,
JeffXie