*/
int ice_setup_rx_ctx(struct ice_ring *ring)
{
+ struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
return err;
xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
- dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- err = ring->xsk_umem ?
- ice_alloc_rx_bufs_zc(ring, ICE_DESC_UNUSED(ring)) :
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
- if (err)
- dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
- ring->q_index, pf_q);
+ if (ring->xsk_umem) {
+ if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
+ dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ num_bufs, ring->q_index);
+ dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
+
+ return 0;
+ }
+
+ err = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ if (err)
+ dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ ring->q_index, pf_q);
+ return 0;
+ }
+
+ ice_alloc_rx_bufs(ring, num_bufs);
return 0;
}