]> git.baikalelectronics.ru Git - kernel.git/commitdiff
powerpc/kprobes: Disable preemption before invoking probe handler for optprobes
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Mon, 23 Oct 2017 16:37:38 +0000 (22:07 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 12 Nov 2017 12:51:40 +0000 (23:51 +1100)
Per Documentation/kprobes.txt, probe handlers need to be invoked with
preemption disabled. Update optimized_callback() to do so. Also move
get_kprobe_ctlblk() invocation post preemption disable, since it
accesses pre-cpu data.

This was not an issue so far since optprobes wasn't selected if
CONFIG_PREEMPT was enabled. Commit a30b85df7d599f ("kprobes: Use
synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y") changes
this.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kernel/optprobes.c

index 91e037ab20a197de398161c057d86bffabc3a68a..60ba7f1370a80c545375d56b500a7ea538768208 100644 (file)
@@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p)
 static void optimized_callback(struct optimized_kprobe *op,
                               struct pt_regs *regs)
 {
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
        unsigned long flags;
 
        /* This is possible if op is under delayed unoptimizing */
@@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op,
 
        local_irq_save(flags);
        hard_irq_disable();
+       preempt_disable();
 
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(&op->kp);
        } else {
                __this_cpu_write(current_kprobe, &op->kp);
                regs->nip = (unsigned long)op->kp.addr;
-               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+               get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
                opt_pre_handler(&op->kp, regs);
                __this_cpu_write(current_kprobe, NULL);
        }
@@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op,
         * local_irq_restore() will re-enable interrupts,
         * if they were hard disabled.
         */
+       preempt_enable_no_resched();
        local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(optimized_callback);