]> git.baikalelectronics.ru Git - kernel.git/commitdiff
bpf: Remove prog->active check for bpf_lsm and bpf_iter
authorMartin KaFai Lau <martin.lau@kernel.org>
Tue, 25 Oct 2022 18:45:16 +0000 (11:45 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 19 Sep 2023 10:28:03 +0000 (12:28 +0200)
[ Upstream commit 271de525e1d7f564e88a9d212c50998b49a54476 ]

The commit 64696c40d03c ("bpf: Add __bpf_prog_{enter,exit}_struct_ops for struct_ops trampoline")
removed prog->active check for struct_ops prog.  The bpf_lsm
and bpf_iter is also using trampoline.  Like struct_ops, the bpf_lsm
and bpf_iter have fixed hooks for the prog to attach.  The
kernel does not call the same hook in a recursive way.
This patch also removes the prog->active check for
bpf_lsm and bpf_iter.

A later patch has a test to reproduce the recursion issue
for a sleepable bpf_lsm program.

This patch appends the '_recur' naming to the existing
enter and exit functions that track the prog->active counter.
New __bpf_prog_{enter,exit}[_sleepable] function are
added to skip the prog->active tracking. The '_struct_ops'
version is also removed.

It also moves the decision on picking the enter and exit function to
the new bpf_trampoline_{enter,exit}().  It returns the '_recur' ones
for all tracing progs to use.  For bpf_lsm, bpf_iter,
struct_ops (no prog->active tracking after 64696c40d03c), and
bpf_lsm_cgroup (no prog->active tracking after 69fd337a975c7),
it will return the functions that don't track the prog->active.

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20221025184524.3526117-2-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Stable-dep-of: 7645629f7dc8 ("bpf: Invoke __bpf_prog_exit_sleepable_recur() on recursion in kern_sys_bpf().")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/arm64/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp.c
include/linux/bpf.h
include/linux/bpf_verifier.h
kernel/bpf/syscall.c
kernel/bpf/trampoline.c

index 14134fd34ff79c84589ee423c5f07a4c75f42c0c..0ce5f13eabb1bb972a24c116cce56bea4a1aa56c 100644 (file)
@@ -1655,13 +1655,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
        struct bpf_prog *p = l->link.prog;
        int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
 
-       if (p->aux->sleepable) {
-               enter_prog = (u64)__bpf_prog_enter_sleepable;
-               exit_prog = (u64)__bpf_prog_exit_sleepable;
-       } else {
-               enter_prog = (u64)__bpf_prog_enter;
-               exit_prog = (u64)__bpf_prog_exit;
-       }
+       enter_prog = (u64)bpf_trampoline_enter(p);
+       exit_prog = (u64)bpf_trampoline_exit(p);
 
        if (l->cookie == 0) {
                /* if cookie is zero, one instruction is enough to store it */
index db6053a22e866ce8219be0b5fd0d6579830990c1..5e680e039d0e11560be3eb78b99efb97e403d130 100644 (file)
@@ -1813,10 +1813,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
                           struct bpf_tramp_link *l, int stack_size,
                           int run_ctx_off, bool save_ret)
 {
-       void (*exit)(struct bpf_prog *prog, u64 start,
-                    struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
-       u64 (*enter)(struct bpf_prog *prog,
-                    struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
        u8 *prog = *pprog;
        u8 *jmp_insn;
        int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
@@ -1835,23 +1831,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
         */
        emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
 
-       if (p->aux->sleepable) {
-               enter = __bpf_prog_enter_sleepable;
-               exit = __bpf_prog_exit_sleepable;
-       } else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
-               enter = __bpf_prog_enter_struct_ops;
-               exit = __bpf_prog_exit_struct_ops;
-       } else if (p->expected_attach_type == BPF_LSM_CGROUP) {
-               enter = __bpf_prog_enter_lsm_cgroup;
-               exit = __bpf_prog_exit_lsm_cgroup;
-       }
-
        /* arg1: mov rdi, progs[i] */
        emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
        /* arg2: lea rsi, [rbp - ctx_cookie_off] */
        EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
 
-       if (emit_call(&prog, enter, prog))
+       if (emit_call(&prog, bpf_trampoline_enter(p), prog))
                return -EINVAL;
        /* remember prog start time returned by __bpf_prog_enter */
        emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@@ -1896,7 +1881,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
        emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
        /* arg3: lea rdx, [rbp - run_ctx_off] */
        EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
-       if (emit_call(&prog, exit, prog))
+       if (emit_call(&prog, bpf_trampoline_exit(p), prog))
                return -EINVAL;
 
        *pprog = prog;
index 8cef9ec3a89c2a332bfa3cc03ba6093671222536..b3d3aa8437dce295d7c39cb7a6f99fb6e6cc0d04 100644 (file)
@@ -862,22 +862,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i
                                const struct btf_func_model *m, u32 flags,
                                struct bpf_tramp_links *tlinks,
                                void *orig_call);
-/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
-                                      struct bpf_tramp_run_ctx *run_ctx);
-u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
-                                       struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
-                                       struct bpf_tramp_run_ctx *run_ctx);
-u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
-                                       struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
-                                       struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+                                            struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+                                            struct bpf_tramp_run_ctx *run_ctx);
 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
+                                     struct bpf_tramp_run_ctx *run_ctx);
+typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
+                                     struct bpf_tramp_run_ctx *run_ctx);
+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
 
 struct bpf_ksym {
        unsigned long            start;
index 0eb8f035b3d9f2c3c999bc5124776ac9988c88b9..1a32baa78ce26f8e1082c20dd0c7573813af5d1b 100644 (file)
@@ -648,4 +648,17 @@ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
                prog->aux->dst_prog->type : prog->type;
 }
 
+static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+{
+       switch (resolve_prog_type(prog)) {
+       case BPF_PROG_TYPE_TRACING:
+               return prog->expected_attach_type != BPF_TRACE_ITER;
+       case BPF_PROG_TYPE_STRUCT_OPS:
+       case BPF_PROG_TYPE_LSM:
+               return false;
+       default:
+               return true;
+       }
+}
+
 #endif /* _LINUX_BPF_VERIFIER_H */
index 0c44a716f0a24b31b2f1d4926e82ace6a4bfa3b4..7afec961c5728363fe813780bf50cee374d5c697 100644 (file)
@@ -5136,13 +5136,14 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
 
                run_ctx.bpf_cookie = 0;
                run_ctx.saved_run_ctx = NULL;
-               if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) {
+               if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
                        /* recursion detected */
                        bpf_prog_put(prog);
                        return -EBUSY;
                }
                attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
-               __bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx);
+               __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
+                                               &run_ctx);
                bpf_prog_put(prog);
                return 0;
 #endif
index 30af8f66e17b4b0a532e48c7f83ff726a2aa0466..88841e352dcdfc3122e1ac6de7910d798ffb54d1 100644 (file)
@@ -874,7 +874,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
  * [2..MAX_U64] - execute bpf prog and record execution time.
  *     This is start time.
  */
-u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
+static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
        __acquires(RCU)
 {
        rcu_read_lock();
@@ -911,7 +911,8 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
        }
 }
 
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
+static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
+                                         struct bpf_tramp_run_ctx *run_ctx)
        __releases(RCU)
 {
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
@@ -922,8 +923,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
        rcu_read_unlock();
 }
 
-u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
-                                       struct bpf_tramp_run_ctx *run_ctx)
+static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
+                                              struct bpf_tramp_run_ctx *run_ctx)
        __acquires(RCU)
 {
        /* Runtime stats are exported via actual BPF_LSM_CGROUP
@@ -937,8 +938,8 @@ u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
        return NO_START_TIME;
 }
 
-void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
-                                       struct bpf_tramp_run_ctx *run_ctx)
+static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
+                                              struct bpf_tramp_run_ctx *run_ctx)
        __releases(RCU)
 {
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
@@ -947,7 +948,8 @@ void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
        rcu_read_unlock();
 }
 
-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+                                            struct bpf_tramp_run_ctx *run_ctx)
 {
        rcu_read_lock_trace();
        migrate_disable();
@@ -963,8 +965,8 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
        return bpf_prog_start_time();
 }
 
-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
-                                      struct bpf_tramp_run_ctx *run_ctx)
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+                                            struct bpf_tramp_run_ctx *run_ctx)
 {
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
@@ -974,8 +976,30 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
        rcu_read_unlock_trace();
 }
 
-u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
-                                       struct bpf_tramp_run_ctx *run_ctx)
+static u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog,
+                                             struct bpf_tramp_run_ctx *run_ctx)
+{
+       rcu_read_lock_trace();
+       migrate_disable();
+       might_fault();
+
+       run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
+
+       return bpf_prog_start_time();
+}
+
+static void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
+                                             struct bpf_tramp_run_ctx *run_ctx)
+{
+       bpf_reset_run_ctx(run_ctx->saved_run_ctx);
+
+       update_prog_stats(prog, start);
+       migrate_enable();
+       rcu_read_unlock_trace();
+}
+
+static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
+                                   struct bpf_tramp_run_ctx *run_ctx)
        __acquires(RCU)
 {
        rcu_read_lock();
@@ -986,8 +1010,8 @@ u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
        return bpf_prog_start_time();
 }
 
-void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
-                                       struct bpf_tramp_run_ctx *run_ctx)
+static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
+                                   struct bpf_tramp_run_ctx *run_ctx)
        __releases(RCU)
 {
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
@@ -1007,6 +1031,36 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
        percpu_ref_put(&tr->pcref);
 }
 
+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
+{
+       bool sleepable = prog->aux->sleepable;
+
+       if (bpf_prog_check_recur(prog))
+               return sleepable ? __bpf_prog_enter_sleepable_recur :
+                       __bpf_prog_enter_recur;
+
+       if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
+           prog->expected_attach_type == BPF_LSM_CGROUP)
+               return __bpf_prog_enter_lsm_cgroup;
+
+       return sleepable ? __bpf_prog_enter_sleepable : __bpf_prog_enter;
+}
+
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
+{
+       bool sleepable = prog->aux->sleepable;
+
+       if (bpf_prog_check_recur(prog))
+               return sleepable ? __bpf_prog_exit_sleepable_recur :
+                       __bpf_prog_exit_recur;
+
+       if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
+           prog->expected_attach_type == BPF_LSM_CGROUP)
+               return __bpf_prog_exit_lsm_cgroup;
+
+       return sleepable ? __bpf_prog_exit_sleepable : __bpf_prog_exit;
+}
+
 int __weak
 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
                            const struct btf_func_model *m, u32 flags,