]> git.baikalelectronics.ru Git - kernel.git/commitdiff
amd-xgbe: Add support for new DMA interrupt mode
authorLendacky, Thomas <Thomas.Lendacky@amd.com>
Thu, 10 Nov 2016 23:10:17 +0000 (17:10 -0600)
committerDavid S. Miller <davem@davemloft.net>
Sun, 13 Nov 2016 05:56:26 +0000 (00:56 -0500)
The current per channel DMA interrupt support is based on an edge
triggered interrupt that is not maskable. This results in having to call
the disable_irq/enable_irq functions in order to prevent interrupts
during napi processing. The hardware now has a way to configure the per
channel DMA interrupt that will allow for masking the interrupt which
prevents calling disable_irq/enable_irq now. This patch makes use of
this support.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/amd/xgbe/xgbe-platform.c
drivers/net/ethernet/amd/xgbe/xgbe.h

index b54862bb7fb41d7de00d07646b39505e7490056a..516345cfe8de165f60fad22ce89513744ffb52ef 100644 (file)
 #define DMA_ISR_MACIS_WIDTH            1
 #define DMA_ISR_MTLIS_INDEX            16
 #define DMA_ISR_MTLIS_WIDTH            1
+#define DMA_MR_INTM_INDEX              12
+#define DMA_MR_INTM_WIDTH              2
 #define DMA_MR_SWR_INDEX               0
 #define DMA_MR_SWR_WIDTH               1
 #define DMA_SBMR_EAME_INDEX            11
index 81d47807c3ccd7b2fad9d11aad595c3c4758d1ec..ff7f5ab4d5fb6193022b54fcf4e4a5625912c7fa 100644 (file)
@@ -646,6 +646,11 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
        unsigned int dma_ch_isr, dma_ch_ier;
        unsigned int i;
 
+       /* Set the interrupt mode if supported */
+       if (pdata->channel_irq_mode)
+               XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
+                                  pdata->channel_irq_mode);
+
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                /* Clear all the interrupts which are set */
@@ -667,19 +672,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
                if (channel->tx_ring) {
                        /* Enable the following Tx interrupts
                         *   TIE  - Transmit Interrupt Enable (unless using
-                        *          per channel interrupts)
+                        *          per channel interrupts in edge triggered
+                        *          mode)
                         */
-                       if (!pdata->per_channel_irq)
+                       if (!pdata->per_channel_irq || pdata->channel_irq_mode)
                                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
                }
                if (channel->rx_ring) {
                        /* Enable following Rx interrupts
                         *   RBUE - Receive Buffer Unavailable Enable
                         *   RIE  - Receive Interrupt Enable (unless using
-                        *          per channel interrupts)
+                        *          per channel interrupts in edge triggered
+                        *          mode)
                         */
                        XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
-                       if (!pdata->per_channel_irq)
+                       if (!pdata->per_channel_irq || pdata->channel_irq_mode)
                                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
                }
 
index efa01a5a81b66647b92cbe68a79bb6f38a795498..490fdb5cb63a8e6dafaa5460712b93de83cca969 100644 (file)
@@ -252,48 +252,60 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
        return rx_buf_size;
 }
 
-static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
+                                 struct xgbe_channel *channel)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_channel *channel;
        enum xgbe_int int_id;
+
+       if (channel->tx_ring && channel->rx_ring)
+               int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+       else if (channel->tx_ring)
+               int_id = XGMAC_INT_DMA_CH_SR_TI;
+       else if (channel->rx_ring)
+               int_id = XGMAC_INT_DMA_CH_SR_RI;
+       else
+               return;
+
+       hw_if->enable_int(channel, int_id);
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_channel *channel;
        unsigned int i;
 
        channel = pdata->channel;
-       for (i = 0; i < pdata->channel_count; i++, channel++) {
-               if (channel->tx_ring && channel->rx_ring)
-                       int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
-               else if (channel->tx_ring)
-                       int_id = XGMAC_INT_DMA_CH_SR_TI;
-               else if (channel->rx_ring)
-                       int_id = XGMAC_INT_DMA_CH_SR_RI;
-               else
-                       continue;
+       for (i = 0; i < pdata->channel_count; i++, channel++)
+               xgbe_enable_rx_tx_int(pdata, channel);
+}
 
-               hw_if->enable_int(channel, int_id);
-       }
+static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
+                                  struct xgbe_channel *channel)
+{
+       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       enum xgbe_int int_id;
+
+       if (channel->tx_ring && channel->rx_ring)
+               int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+       else if (channel->tx_ring)
+               int_id = XGMAC_INT_DMA_CH_SR_TI;
+       else if (channel->rx_ring)
+               int_id = XGMAC_INT_DMA_CH_SR_RI;
+       else
+               return;
+
+       hw_if->disable_int(channel, int_id);
 }
 
 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_channel *channel;
-       enum xgbe_int int_id;
        unsigned int i;
 
        channel = pdata->channel;
-       for (i = 0; i < pdata->channel_count; i++, channel++) {
-               if (channel->tx_ring && channel->rx_ring)
-                       int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
-               else if (channel->tx_ring)
-                       int_id = XGMAC_INT_DMA_CH_SR_TI;
-               else if (channel->rx_ring)
-                       int_id = XGMAC_INT_DMA_CH_SR_RI;
-               else
-                       continue;
-
-               hw_if->disable_int(channel, int_id);
-       }
+       for (i = 0; i < pdata->channel_count; i++, channel++)
+               xgbe_disable_rx_tx_int(pdata, channel);
 }
 
 static irqreturn_t xgbe_isr(int irq, void *data)
@@ -339,6 +351,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
                                /* Turn on polling */
                                __napi_schedule_irqoff(&pdata->napi);
                        }
+               } else {
+                       /* Don't clear Rx/Tx status if doing per channel DMA
+                        * interrupts, these will be cleared by the ISR for
+                        * per channel DMA interrupts.
+                        */
+                       XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
+                       XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
                }
 
                if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
@@ -348,7 +367,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
                if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
                        schedule_work(&pdata->restart_work);
 
-               /* Clear all interrupt signals */
+               /* Clear interrupt signals */
                XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
        }
 
@@ -385,18 +404,29 @@ isr_done:
 static irqreturn_t xgbe_dma_isr(int irq, void *data)
 {
        struct xgbe_channel *channel = data;
+       struct xgbe_prv_data *pdata = channel->pdata;
+       unsigned int dma_status;
 
        /* Per channel DMA interrupts are enabled, so we use the per
         * channel napi structure and not the private data napi structure
         */
        if (napi_schedule_prep(&channel->napi)) {
                /* Disable Tx and Rx interrupts */
-               disable_irq_nosync(channel->dma_irq);
+               if (pdata->channel_irq_mode)
+                       xgbe_disable_rx_tx_int(pdata, channel);
+               else
+                       disable_irq_nosync(channel->dma_irq);
 
                /* Turn on polling */
                __napi_schedule_irqoff(&channel->napi);
        }
 
+       /* Clear Tx/Rx signals */
+       dma_status = 0;
+       XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
+       XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
+       XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
+
        return IRQ_HANDLED;
 }
 
@@ -413,7 +443,10 @@ static void xgbe_tx_timer(unsigned long data)
        if (napi_schedule_prep(napi)) {
                /* Disable Tx and Rx interrupts */
                if (pdata->per_channel_irq)
-                       disable_irq_nosync(channel->dma_irq);
+                       if (pdata->channel_irq_mode)
+                               xgbe_disable_rx_tx_int(pdata, channel);
+                       else
+                               disable_irq_nosync(channel->dma_irq);
                else
                        xgbe_disable_rx_tx_ints(pdata);
 
@@ -2030,6 +2063,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
 {
        struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
                                                    napi);
+       struct xgbe_prv_data *pdata = channel->pdata;
        int processed = 0;
 
        DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
@@ -2046,7 +2080,10 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
                napi_complete_done(napi, processed);
 
                /* Enable Tx and Rx interrupts */
-               enable_irq(channel->dma_irq);
+               if (pdata->channel_irq_mode)
+                       xgbe_enable_rx_tx_int(pdata, channel);
+               else
+                       enable_irq(channel->dma_irq);
        }
 
        DBGPR("<--xgbe_one_poll: received = %d\n", processed);
index fe77945bc49e52b9775e6754cd763125fa45a610..61eb6613b732be87f02b4475cd68eb168da51492 100644 (file)
@@ -163,6 +163,7 @@ static int xgbe_config_msi(struct xgbe_prv_data *pdata)
                pdata->channel_irq_count = j;
 
                pdata->per_channel_irq = 1;
+               pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
        } else {
                pdata->ecc_irq = pdata->pcidev->irq;
                pdata->i2c_irq = pdata->pcidev->irq;
@@ -215,6 +216,7 @@ static int xgbe_config_msix(struct xgbe_prv_data *pdata)
        pdata->channel_irq_count = j;
 
        pdata->per_channel_irq = 1;
+       pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
 
        if (netif_msg_probe(pdata))
                dev_dbg(pdata->dev, "MSI-X interrupts enabled\n");
index 8d438c3f01e8d93cfdacb5832cc769366b764366..8c530dccb4470a31a58cab1a82fe17973a6d9df7 100644 (file)
@@ -426,8 +426,10 @@ static int xgbe_platform_probe(struct platform_device *pdev)
        pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
 
        /* Check for per channel interrupt support */
-       if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
+       if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
                pdata->per_channel_irq = 1;
+               pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE;
+       }
 
        /* Obtain device settings unique to ACPI/OF */
        if (pdata->use_acpi)
index e22156e6088b746c946e52d00c3c8fae3e96ea76..381144b8394738d8e5c38bb396e6ef42e89a61f6 100644 (file)
 #define XGBE_DMA_SYS_ARCACHE   0x0
 #define XGBE_DMA_SYS_AWCACHE   0x0
 
+/* DMA channel interrupt modes */
+#define XGBE_IRQ_MODE_EDGE     0
+#define XGBE_IRQ_MODE_LEVEL    1
+
 #define XGBE_DMA_INTERRUPT_MASK        0x31c7
 
 #define XGMAC_MIN_PACKET       60
@@ -874,6 +878,7 @@ struct xgbe_prv_data {
        unsigned int irq_shared;
        unsigned int irq_count;
        unsigned int channel_irq_count;
+       unsigned int channel_irq_mode;
 
        struct xgbe_hw_if hw_if;
        struct xgbe_phy_if phy_if;