]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net/mlx5e: xsk: Support XDP metadata on XSK RQs
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Fri, 30 Sep 2022 16:29:01 +0000 (09:29 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sat, 1 Oct 2022 20:30:21 +0000 (13:30 -0700)
Add support for XDP metadata on XSK RQs for cross-program
communication. The driver no longer calls xdp_set_data_meta_invalid and
copies the metadata to a newly allocated SKB on XDP_PASS.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c

index 661d2d5748f4b816cd850095acd0bcee3982e197..aebc1d5a900455e6d91097dcf507f871ca407123 100644 (file)
@@ -158,18 +158,24 @@ int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
        return wqe_bulk;
 }
 
-static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
-                                              u32 cqe_bcnt)
+static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp)
 {
+       u32 totallen = xdp->data_end - xdp->data_meta;
+       u32 metalen = xdp->data - xdp->data_meta;
        struct sk_buff *skb;
 
-       skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
+       skb = napi_alloc_skb(rq->cq.napi, totallen);
        if (unlikely(!skb)) {
                rq->stats->buff_alloc_err++;
                return NULL;
        }
 
-       skb_put_data(skb, data, cqe_bcnt);
+       skb_put_data(skb, xdp->data_meta, totallen);
+
+       if (metalen) {
+               skb_metadata_set(skb, metalen);
+               __skb_pull(skb, metalen);
+       }
 
        return skb;
 }
@@ -197,7 +203,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
        WARN_ON_ONCE(head_offset);
 
        xsk_buff_set_size(xdp, cqe_bcnt);
-       xdp_set_data_meta_invalid(xdp);
        xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        net_prefetch(xdp->data);
 
@@ -226,7 +231,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
        /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
         * frame. On SKB allocation failure, NULL is returned.
         */
-       return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
+       return mlx5e_xsk_construct_skb(rq, xdp);
 }
 
 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@@ -244,7 +249,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
        WARN_ON_ONCE(wi->offset);
 
        xsk_buff_set_size(xdp, cqe_bcnt);
-       xdp_set_data_meta_invalid(xdp);
        xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        net_prefetch(xdp->data);
 
@@ -256,5 +260,5 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
         * will be handled by mlx5e_free_rx_wqe.
         * On SKB allocation failure, NULL is returned.
         */
-       return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
+       return mlx5e_xsk_construct_skb(rq, xdp);
 }