]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/kprobes: Fix __recover_optprobed_insn check optimizing logic
authorYang Jihong <yangjihong1@huawei.com>
Mon, 20 Feb 2023 23:49:16 +0000 (08:49 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 11 Mar 2023 15:44:01 +0000 (16:44 +0100)
commit 868a6fc0ca2407622d2833adefe1c4d284766c4c upstream.

Since the following commit:

  commit 339f6eb5b9f2 ("kprobes: Set unoptimized flag after unoptimizing code")

modified the update timing of the KPROBE_FLAG_OPTIMIZED, a optimized_kprobe
may be in the optimizing or unoptimizing state when op.kp->flags
has KPROBE_FLAG_OPTIMIZED and op->list is not empty.

The __recover_optprobed_insn check logic is incorrect, a kprobe in the
unoptimizing state may be incorrectly determined as unoptimizing.
As a result, incorrect instructions are copied.

The optprobe_queued_unopt function needs to be exported for invoking in
arch directory.

Link: https://lore.kernel.org/all/20230216034247.32348-2-yangjihong1@huawei.com/
Fixes: 339f6eb5b9f2 ("kprobes: Set unoptimized flag after unoptimizing code")
Cc: stable@vger.kernel.org
Signed-off-by: Yang Jihong <yangjihong1@huawei.com>
Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kernel/kprobes/opt.c
include/linux/kprobes.h
kernel/kprobes.c

index b348dd506d58acc58feacbebe1a10a1f0d739949..0673bf4d58e65aed1ed9a9c9733feec4875dfd70 100644 (file)
@@ -43,8 +43,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
                /* This function only handles jump-optimized kprobe */
                if (kp && kprobe_optimized(kp)) {
                        op = container_of(kp, struct optimized_kprobe, kp);
-                       /* If op->list is not empty, op is under optimizing */
-                       if (list_empty(&op->list))
+                       /* If op is optimized or under unoptimizing */
+                       if (list_empty(&op->list) || optprobe_queued_unopt(op))
                                goto found;
                }
        }
index c7764d9e6f39719ce28fe2a64cf0552779075082..08c2f96c8d5bd7022723554f218c1471d8d5017d 100644 (file)
@@ -318,6 +318,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
                                             size_t *length, loff_t *ppos);
 #endif
 extern void wait_for_kprobe_optimizer(void);
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
 #else
 static inline void wait_for_kprobe_optimizer(void) { }
 #endif /* CONFIG_OPTPROBES */
index 3de56ca2801713ccd4fea9df7d7e6f1b2ef98e0e..c1a83d64d24a309382ecad73e39e4f5835f6cd2e 100644 (file)
@@ -614,7 +614,7 @@ void wait_for_kprobe_optimizer(void)
        mutex_unlock(&kprobe_mutex);
 }
 
-static bool optprobe_queued_unopt(struct optimized_kprobe *op)
+bool optprobe_queued_unopt(struct optimized_kprobe *op)
 {
        struct optimized_kprobe *_op;