]> git.baikalelectronics.ru Git - kernel.git/commitdiff
bpf: Assign bpf_tramp_run_ctx::saved_run_ctx before recursion check.
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Wed, 30 Aug 2023 08:04:05 +0000 (10:04 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 19 Sep 2023 10:28:03 +0000 (12:28 +0200)
[ Upstream commit 6764e767f4af1e35f87f3497e1182d945de37f93 ]

__bpf_prog_enter_recur() assigns bpf_tramp_run_ctx::saved_run_ctx before
performing the recursion check which means in case of a recursion
__bpf_prog_exit_recur() uses the previously set bpf_tramp_run_ctx::saved_run_ctx
value.

__bpf_prog_enter_sleepable_recur() assigns bpf_tramp_run_ctx::saved_run_ctx
after the recursion check which means in case of a recursion
__bpf_prog_exit_sleepable_recur() uses an uninitialized value. This does not
look right. If I read the entry trampoline code right, then bpf_tramp_run_ctx
isn't initialized upfront.

Align __bpf_prog_enter_sleepable_recur() with __bpf_prog_enter_recur() and
set bpf_tramp_run_ctx::saved_run_ctx before the recursion check is made.
Remove the assignment of saved_run_ctx in kern_sys_bpf() since it happens
a few cycles later.

Fixes: e384c7b7b46d0 ("bpf, x86: Create bpf_tramp_run_ctx on the caller thread's stack")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/bpf/20230830080405.251926-3-bigeasy@linutronix.de
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/bpf/syscall.c
kernel/bpf/trampoline.c

index 76484137233a322f61edb4a3b1b29046dd58fbe2..0c8b7733573ee59570ac4cfbe5ab23fe23e980dd 100644 (file)
@@ -5135,7 +5135,6 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
                }
 
                run_ctx.bpf_cookie = 0;
-               run_ctx.saved_run_ctx = NULL;
                if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
                        /* recursion detected */
                        __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
index 88841e352dcdfc3122e1ac6de7910d798ffb54d1..c4381dfcd6b099d55c015b528839a419685a1a16 100644 (file)
@@ -955,13 +955,12 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
        migrate_disable();
        might_fault();
 
+       run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
+
        if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
                bpf_prog_inc_misses_counter(prog);
                return 0;
        }
-
-       run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
-
        return bpf_prog_start_time();
 }