}
static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
- size_t size)
+ size_t size, gfp_t gfp_mask)
{
- req->skb = __dev_alloc_skb(size, GFP_KERNEL);
+ req->skb = __dev_alloc_skb(size, gfp_mask);
if (!req->skb)
return -ENOMEM;
spin_unlock_irqrestore(&queue->ring_lock, flags);
req = queue->rx_refill;
- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
if (ret)
return ret;
if (!req->gpd)
goto err_free_req;
- val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
if (val)
goto err_free_pool;
if (req->skb)
continue;
- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
if (ret)
break;