]> git.baikalelectronics.ru Git - kernel.git/commitdiff
xdp: Add xdp_do_redirect_frame() for pre-computed xdp_frames
authorToke Høiland-Jørgensen <toke@redhat.com>
Mon, 3 Jan 2022 15:08:10 +0000 (16:08 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 6 Jan 2022 03:46:32 +0000 (19:46 -0800)
Add an xdp_do_redirect_frame() variant which supports pre-computed
xdp_frame structures. This will be used in bpf_prog_run() to avoid having
to write to the xdp_frame structure when the XDP program doesn't modify the
frame boundaries.

Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20220103150812.87914-6-toke@redhat.com
include/linux/filter.h
net/core/filter.c

index 60eec80fa1d460cb8bc5a6cd765e0d47892973e1..71fa57b88bfc0d42db2d812858e599c93185a785 100644 (file)
@@ -1019,6 +1019,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 int xdp_do_redirect(struct net_device *dev,
                    struct xdp_buff *xdp,
                    struct bpf_prog *prog);
+int xdp_do_redirect_frame(struct net_device *dev,
+                         struct xdp_buff *xdp,
+                         struct xdp_frame *xdpf,
+                         struct bpf_prog *prog);
 void xdp_do_flush(void);
 
 /* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
index e2b83056246c9ad14c4da21a177f251e59d01151..4603b7cd3cd17e5f39d9d0e047529d07af98bc25 100644 (file)
@@ -3957,26 +3957,44 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
 }
 EXPORT_SYMBOL_GPL(xdp_master_redirect);
 
-int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
-                   struct bpf_prog *xdp_prog)
+static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
+                                       struct net_device *dev,
+                                       struct xdp_buff *xdp,
+                                       struct bpf_prog *xdp_prog)
 {
-       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
        enum bpf_map_type map_type = ri->map_type;
        void *fwd = ri->tgt_value;
        u32 map_id = ri->map_id;
-       struct xdp_frame *xdpf;
-       struct bpf_map *map;
        int err;
 
        ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
        ri->map_type = BPF_MAP_TYPE_UNSPEC;
 
-       if (map_type == BPF_MAP_TYPE_XSKMAP) {
-               err = __xsk_map_redirect(fwd, xdp);
-               goto out;
-       }
+       err = __xsk_map_redirect(fwd, xdp);
+       if (unlikely(err))
+               goto err;
+
+       _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
+       return 0;
+err:
+       _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
+       return err;
+}
+
+static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+                                                  struct net_device *dev,
+                                                  struct xdp_frame *xdpf,
+                                                  struct bpf_prog *xdp_prog)
+{
+       enum bpf_map_type map_type = ri->map_type;
+       void *fwd = ri->tgt_value;
+       u32 map_id = ri->map_id;
+       struct bpf_map *map;
+       int err;
+
+       ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+       ri->map_type = BPF_MAP_TYPE_UNSPEC;
 
-       xdpf = xdp_convert_buff_to_frame(xdp);
        if (unlikely(!xdpf)) {
                err = -EOVERFLOW;
                goto err;
@@ -4013,7 +4031,6 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
                err = -EBADRQC;
        }
 
-out:
        if (unlikely(err))
                goto err;
 
@@ -4023,8 +4040,34 @@ err:
        _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
        return err;
 }
+
+int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+                   struct bpf_prog *xdp_prog)
+{
+       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       enum bpf_map_type map_type = ri->map_type;
+
+       if (map_type == BPF_MAP_TYPE_XSKMAP)
+               return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+
+       return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
+                                      xdp_prog);
+}
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
 
+int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
+                         struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
+{
+       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       enum bpf_map_type map_type = ri->map_type;
+
+       if (map_type == BPF_MAP_TYPE_XSKMAP)
+               return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+
+       return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog);
+}
+EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
+
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,