Re: [PATCH 3/4] kprobes: move kprobe_ftrace_handler() from x86 and make it weak
From: Jisheng Zhang
Date: Mon Aug 19 2019 - 21:58:08 EST
On Tue, 20 Aug 2019 09:07:35 +0900 Masami Hiramatsu wrote:
>
>
> Hi Jisheng,
Hi,
>
> On Mon, 19 Aug 2019 11:37:32 +0000
> Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx> wrote:
>
> > This code could be reused. So move it from x86 to common code.
>
> Yes, it can be among some arch, but at first, please make your
> architecture implementation. After making sure that is enough
> stable, we will optimize (consolidate) the code.
Got it. I will duplicate the function firstly then make the consolidation
as a TODO.
>
> For example,
> > - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> > - instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
>
> This may depend on arch implementation of kprobes.
Indeed, for arm64, would be as simple as s/ip/pc.
Thanks
>
> Could you make a copy and update comments on arm64?
>
> Thank you,
>
> >
> > Signed-off-by: Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx>
> > ---
> > arch/x86/kernel/kprobes/ftrace.c | 44 --------------------------------
> > kernel/kprobes.c | 44 ++++++++++++++++++++++++++++++++
> > 2 files changed, 44 insertions(+), 44 deletions(-)
> >
> > diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
> > index c2ad0b9259ca..91ae1e3e65f7 100644
> > --- a/arch/x86/kernel/kprobes/ftrace.c
> > +++ b/arch/x86/kernel/kprobes/ftrace.c
> > @@ -12,50 +12,6 @@
> >
> > #include "common.h"
> >
> > -/* Ftrace callback handler for kprobes -- called under preepmt disabed */
> > -void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> > - struct ftrace_ops *ops, struct pt_regs *regs)
> > -{
> > - struct kprobe *p;
> > - struct kprobe_ctlblk *kcb;
> > -
> > - /* Preempt is disabled by ftrace */
> > - p = get_kprobe((kprobe_opcode_t *)ip);
> > - if (unlikely(!p) || kprobe_disabled(p))
> > - return;
> > -
> > - kcb = get_kprobe_ctlblk();
> > - if (kprobe_running()) {
> > - kprobes_inc_nmissed_count(p);
> > - } else {
> > - unsigned long orig_ip = instruction_pointer(regs);
> > - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> > - instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
> > -
> > - __this_cpu_write(current_kprobe, p);
> > - kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> > - if (!p->pre_handler || !p->pre_handler(p, regs)) {
> > - /*
> > - * Emulate singlestep (and also recover regs->ip)
> > - * as if there is a 5byte nop
> > - */
> > - instruction_pointer_set(regs,
> > - (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> > - if (unlikely(p->post_handler)) {
> > - kcb->kprobe_status = KPROBE_HIT_SSDONE;
> > - p->post_handler(p, regs, 0);
> > - }
> > - instruction_pointer_set(regs, orig_ip);
> > - }
> > - /*
> > - * If pre_handler returns !0, it changes regs->ip. We have to
> > - * skip emulating post_handler.
> > - */
> > - __this_cpu_write(current_kprobe, NULL);
> > - }
> > -}
> > -NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> > -
> > int arch_prepare_kprobe_ftrace(struct kprobe *p)
> > {
> > p->ainsn.insn = NULL;
> > diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> > index f8400753a8a9..479148ee1822 100644
> > --- a/kernel/kprobes.c
> > +++ b/kernel/kprobes.c
> > @@ -960,6 +960,50 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
> > #endif /* CONFIG_OPTPROBES */
> >
> > #ifdef CONFIG_KPROBES_ON_FTRACE
> > +/* Ftrace callback handler for kprobes -- called under preepmt disabed */
> > +void __weak kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> > + struct ftrace_ops *ops, struct pt_regs *regs)
> > +{
> > + struct kprobe *p;
> > + struct kprobe_ctlblk *kcb;
> > +
> > + /* Preempt is disabled by ftrace */
> > + p = get_kprobe((kprobe_opcode_t *)ip);
> > + if (unlikely(!p) || kprobe_disabled(p))
> > + return;
> > +
> > + kcb = get_kprobe_ctlblk();
> > + if (kprobe_running()) {
> > + kprobes_inc_nmissed_count(p);
> > + } else {
> > + unsigned long orig_ip = instruction_pointer(regs);
> > + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> > + instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
> > +
> > + __this_cpu_write(current_kprobe, p);
> > + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> > + if (!p->pre_handler || !p->pre_handler(p, regs)) {
> > + /*
> > + * Emulate singlestep (and also recover regs->ip)
> > + * as if there is a 5byte nop
> > + */
> > + instruction_pointer_set(regs,
> > + (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> > + if (unlikely(p->post_handler)) {
> > + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> > + p->post_handler(p, regs, 0);
> > + }
> > + instruction_pointer_set(regs, orig_ip);
> > + }
> > + /*
> > + * If pre_handler returns !0, it changes regs->ip. We have to
> > + * skip emulating post_handler.
> > + */
> > + __this_cpu_write(current_kprobe, NULL);
> > + }
> > +}
> > +NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> > +
> > static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
> > .func = kprobe_ftrace_handler,
> > .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
> > --
> > 2.23.0.rc1
> >
>
>
> --
> Masami Hiramatsu <mhiramat@xxxxxxxxxx>