From 838d0250468fb99df38d023ea2d7dbc12ea46726 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Tue, 30 Jul 2019 15:57:16 +0200 Subject: [PATCH] net: stmmac: Sync RX Buffer upon allocation With recent changes that introduced support for Page Pool in stmmac, Jon reported that NFS boot was no longer working on an ARM64 based platform that had the IP behind an IOMMU. As Page Pool API does not guarantee DMA syncing because of the use of DMA_ATTR_SKIP_CPU_SYNC flag, we have to explicit sync the whole buffer upon re-allocation because we are always re-using same pages. In fact, ARM64 code invalidates the DMA area upon two situations [1]: - sync_single_for_cpu(): Invalidates if direction != DMA_TO_DEVICE - sync_single_for_device(): Invalidates if direction == DMA_FROM_DEVICE So, as we must invalidate both the current RX buffer and the newly allocated buffer we propose this fix. [1] arch/arm64/mm/cache.S Reported-by: Jon Hunter Tested-by: Jon Hunter Fixes: 232086e65d14 ("net: stmmac: Introducing support for Page Pool") Signed-off-by: Jose Abreu Tested-by: Ezequiel Garcia Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 98b1a5c6d5376..9a4a56ad35cd3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3271,9 +3271,11 @@ static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - int dirty = stmmac_rx_dirty(priv, queue); + int len, dirty = stmmac_rx_dirty(priv, queue); unsigned int entry = rx_q->dirty_rx; + len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + while (dirty-- > 0) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; struct dma_desc *p; @@ -3291,6 +3293,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) } buf->addr = page_pool_get_dma_addr(buf->page); + + /* Sync whole allocation to device. This will invalidate old + * data. + */ + dma_sync_single_for_device(priv->device, buf->addr, len, + DMA_FROM_DEVICE); + stmmac_set_desc_addr(priv, p, buf->addr); stmmac_refill_desc3(priv, rx_q, p); @@ -3425,8 +3434,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) skb_copy_to_linear_data(skb, page_address(buf->page), frame_len); skb_put(skb, frame_len); - dma_sync_single_for_device(priv->device, buf->addr, - frame_len, DMA_FROM_DEVICE); if (netif_msg_pktdata(priv)) { netdev_dbg(priv->dev, "frame received (%dbytes)", -- 2.39.5