]> git.baikalelectronics.ru Git - kernel.git/commitdiff
bpf: Prevent bpf program recursion for raw tracepoint probes
authorJiri Olsa <jolsa@kernel.org>
Fri, 16 Sep 2022 07:19:14 +0000 (09:19 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 22 Sep 2022 01:05:44 +0000 (18:05 -0700)
We got report from sysbot [1] about warnings that were caused by
bpf program attached to contention_begin raw tracepoint triggering
the same tracepoint by using bpf_trace_printk helper that takes
trace_printk_lock lock.

 Call Trace:
  <TASK>
  ? trace_event_raw_event_bpf_trace_printk+0x5f/0x90
  bpf_trace_printk+0x2b/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  bpf_trace_printk+0x3f/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  bpf_trace_printk+0x3f/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  bpf_trace_printk+0x3f/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  __unfreeze_partials+0x5b/0x160
  ...

The can be reproduced by attaching bpf program as raw tracepoint on
contention_begin tracepoint. The bpf prog calls bpf_trace_printk
helper. Then by running perf bench the spin lock code is forced to
take slow path and call contention_begin tracepoint.

Fixing this by skipping execution of the bpf program if it's
already running, Using bpf prog 'active' field, which is being
currently used by trampoline programs for the same reason.

Moving bpf_prog_inc_misses_counter to syscall.c because
trampoline.c is compiled in just for CONFIG_BPF_JIT option.

Reviewed-by: Stanislav Fomichev <sdf@google.com>
Reported-by: syzbot+2251879aa068ad9c960d@syzkaller.appspotmail.com
[1] https://lore.kernel.org/bpf/YxhFe3EwqchC%2FfYf@krava/T/#t
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20220916071914.7156-1-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/syscall.c
kernel/bpf/trampoline.c
kernel/trace/bpf_trace.c

index a1435b019acaefc7cd8afdd684508caa1091553e..edd43edb27d6c1695e6a0d08534128fa1608ec15 100644 (file)
@@ -2042,6 +2042,8 @@ static inline bool has_current_bpf_ctx(void)
 {
        return !!current->bpf_ctx;
 }
+
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
 #else /* !CONFIG_BPF_SYSCALL */
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
@@ -2264,6 +2266,10 @@ static inline bool has_current_bpf_ctx(void)
 {
        return false;
 }
+
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
 #endif /* CONFIG_BPF_SYSCALL */
 
 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
index dab156f09f8de7d642087dcf0a1ee090400af3a5..372fad5ef3d3548a6a4b2b46d4f5042b666bbb66 100644 (file)
@@ -2093,6 +2093,17 @@ struct bpf_prog_kstats {
        u64 misses;
 };
 
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+       struct bpf_prog_stats *stats;
+       unsigned int flags;
+
+       stats = this_cpu_ptr(prog->stats);
+       flags = u64_stats_update_begin_irqsave(&stats->syncp);
+       u64_stats_inc(&stats->misses);
+       u64_stats_update_end_irqrestore(&stats->syncp, flags);
+}
+
 static void bpf_prog_get_stats(const struct bpf_prog *prog,
                               struct bpf_prog_kstats *stats)
 {
index ad76940b02ccf81e970442137e64b655b2ecb04f..41b67eb83ab3f3a78b842d54a2dfa27861e0b964 100644 (file)
@@ -863,17 +863,6 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
        return start;
 }
 
-static void notrace inc_misses_counter(struct bpf_prog *prog)
-{
-       struct bpf_prog_stats *stats;
-       unsigned int flags;
-
-       stats = this_cpu_ptr(prog->stats);
-       flags = u64_stats_update_begin_irqsave(&stats->syncp);
-       u64_stats_inc(&stats->misses);
-       u64_stats_update_end_irqrestore(&stats->syncp, flags);
-}
-
 /* The logic is similar to bpf_prog_run(), but with an explicit
  * rcu_read_lock() and migrate_disable() which are required
  * for the trampoline. The macro is split into
@@ -896,7 +885,7 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *ru
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
        if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
-               inc_misses_counter(prog);
+               bpf_prog_inc_misses_counter(prog);
                return 0;
        }
        return bpf_prog_start_time();
@@ -967,7 +956,7 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
        might_fault();
 
        if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
-               inc_misses_counter(prog);
+               bpf_prog_inc_misses_counter(prog);
                return 0;
        }
 
index 9df53c40cffde6f13ed609399545c09d1a0a314b..b05f0310dbd3eb01b18b7562b4711792786f4bf5 100644 (file)
@@ -2222,9 +2222,15 @@ static __always_inline
 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
 {
        cant_sleep();
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+               bpf_prog_inc_misses_counter(prog);
+               goto out;
+       }
        rcu_read_lock();
        (void) bpf_prog_run(prog, args);
        rcu_read_unlock();
+out:
+       this_cpu_dec(*(prog->active));
 }
 
 #define UNPACK(...)                    __VA_ARGS__