]> git.baikalelectronics.ru Git - kernel.git/commitdiff
bpf: Consolidate shared test timing code
authorLorenz Bauer <lmb@cloudflare.com>
Wed, 3 Mar 2021 10:18:12 +0000 (10:18 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 5 Mar 2021 03:11:29 +0000 (19:11 -0800)
Share the timing / signal interruption logic between different
implementations of PROG_TEST_RUN. There is a change in behaviour
as well. We check the loop exit condition before checking for
pending signals. This resolves an edge case where a signal
arrives during the last iteration. Instead of aborting with
EINTR we return the successful result to user space.

Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210303101816.36774-2-lmb@cloudflare.com
net/bpf/test_run.c

index 58bcb8c849d5492ba5694ed0ac7b8f0246c089c6..eb3c78cd4d7c47a1690c400d1783b0d59b2d4e4f 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/bpf_test_run.h>
 
+struct bpf_test_timer {
+       enum { NO_PREEMPT, NO_MIGRATE } mode;
+       u32 i;
+       u64 time_start, time_spent;
+};
+
+static void bpf_test_timer_enter(struct bpf_test_timer *t)
+       __acquires(rcu)
+{
+       rcu_read_lock();
+       if (t->mode == NO_PREEMPT)
+               preempt_disable();
+       else
+               migrate_disable();
+
+       t->time_start = ktime_get_ns();
+}
+
+static void bpf_test_timer_leave(struct bpf_test_timer *t)
+       __releases(rcu)
+{
+       t->time_start = 0;
+
+       if (t->mode == NO_PREEMPT)
+               preempt_enable();
+       else
+               migrate_enable();
+       rcu_read_unlock();
+}
+
+static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
+       __must_hold(rcu)
+{
+       t->i++;
+       if (t->i >= repeat) {
+               /* We're done. */
+               t->time_spent += ktime_get_ns() - t->time_start;
+               do_div(t->time_spent, t->i);
+               *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
+               *err = 0;
+               goto reset;
+       }
+
+       if (signal_pending(current)) {
+               /* During iteration: we've been cancelled, abort. */
+               *err = -EINTR;
+               goto reset;
+       }
+
+       if (need_resched()) {
+               /* During iteration: we need to reschedule between runs. */
+               t->time_spent += ktime_get_ns() - t->time_start;
+               bpf_test_timer_leave(t);
+               cond_resched();
+               bpf_test_timer_enter(t);
+       }
+
+       /* Do another round. */
+       return true;
+
+reset:
+       t->i = 0;
+       return false;
+}
+
 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
                        u32 *retval, u32 *time, bool xdp)
 {
        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
+       struct bpf_test_timer t = { NO_MIGRATE };
        enum bpf_cgroup_storage_type stype;
-       u64 time_start, time_spent = 0;
-       int ret = 0;
-       u32 i;
+       int ret;
 
        for_each_cgroup_storage_type(stype) {
                storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@ -38,40 +102,16 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
        if (!repeat)
                repeat = 1;
 
-       rcu_read_lock();
-       migrate_disable();
-       time_start = ktime_get_ns();
-       for (i = 0; i < repeat; i++) {
+       bpf_test_timer_enter(&t);
+       do {
                bpf_cgroup_storage_set(storage);
 
                if (xdp)
                        *retval = bpf_prog_run_xdp(prog, ctx);
                else
                        *retval = BPF_PROG_RUN(prog, ctx);
-
-               if (signal_pending(current)) {
-                       ret = -EINTR;
-                       break;
-               }
-
-               if (need_resched()) {
-                       time_spent += ktime_get_ns() - time_start;
-                       migrate_enable();
-                       rcu_read_unlock();
-
-                       cond_resched();
-
-                       rcu_read_lock();
-                       migrate_disable();
-                       time_start = ktime_get_ns();
-               }
-       }
-       time_spent += ktime_get_ns() - time_start;
-       migrate_enable();
-       rcu_read_unlock();
-
-       do_div(time_spent, repeat);
-       *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+       } while (bpf_test_timer_continue(&t, repeat, &ret, time));
+       bpf_test_timer_leave(&t);
 
        for_each_cgroup_storage_type(stype)
                bpf_cgroup_storage_free(storage[stype]);
@@ -674,18 +714,17 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
                                     const union bpf_attr *kattr,
                                     union bpf_attr __user *uattr)
 {
+       struct bpf_test_timer t = { NO_PREEMPT };
        u32 size = kattr->test.data_size_in;
        struct bpf_flow_dissector ctx = {};
        u32 repeat = kattr->test.repeat;
        struct bpf_flow_keys *user_ctx;
        struct bpf_flow_keys flow_keys;
-       u64 time_start, time_spent = 0;
        const struct ethhdr *eth;
        unsigned int flags = 0;
        u32 retval, duration;
        void *data;
        int ret;
-       u32 i;
 
        if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
                return -EINVAL;
@@ -721,39 +760,15 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
        ctx.data = data;
        ctx.data_end = (__u8 *)data + size;
 
-       rcu_read_lock();
-       preempt_disable();
-       time_start = ktime_get_ns();
-       for (i = 0; i < repeat; i++) {
+       bpf_test_timer_enter(&t);
+       do {
                retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
                                          size, flags);
+       } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
+       bpf_test_timer_leave(&t);
 
-               if (signal_pending(current)) {
-                       preempt_enable();
-                       rcu_read_unlock();
-
-                       ret = -EINTR;
-                       goto out;
-               }
-
-               if (need_resched()) {
-                       time_spent += ktime_get_ns() - time_start;
-                       preempt_enable();
-                       rcu_read_unlock();
-
-                       cond_resched();
-
-                       rcu_read_lock();
-                       preempt_disable();
-                       time_start = ktime_get_ns();
-               }
-       }
-       time_spent += ktime_get_ns() - time_start;
-       preempt_enable();
-       rcu_read_unlock();
-
-       do_div(time_spent, repeat);
-       duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+       if (ret < 0)
+               goto out;
 
        ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
                              retval, duration);