/* RX data path */
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ int i;
+
+ for (i = 0; i < wqe_bulk; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+ struct mlx5e_rx_wqe_cyc *wqe;
+ dma_addr_t addr;
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
+ /* Assumes log_num_frags == 0. */
+ frag = &rq->wqe.frags[j];
+
+ frag->au->xsk = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!frag->au->xsk))
+ return i;
+
+ addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
+ }
+
+ return wqe_bulk;
+}
+
static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
u32 cqe_bcnt)
{
/* RX data path */
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
u16 cqe_bcnt,
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc(rq, frag->au);
+ err = mlx5e_page_alloc_pool(rq, frag->au);
return err;
}
goto free_frags;
headroom = i == 0 ? rq->buff.headroom : 0;
- addr = rq->xsk_pool ? xsk_buff_xdp_get_frame_dma(frag->au->xsk) :
- page_pool_get_dma_addr(frag->au->page);
+ addr = page_pool_get_dma_addr(frag->au->page);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
*/
wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
- count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
+ if (!rq->xsk_pool)
+ count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
+ else
+ count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
+
mlx5_wq_cyc_push_n(wq, count);
if (unlikely(count != wqe_bulk)) {
rq->stats->buff_alloc_err++;