[PATCH 5.16 139/186] net: stmmac: only enable DMA interrupts when ready

From: Greg Kroah-Hartman
Date: Mon Mar 07 2022 - 05:33:10 EST


From: Vincent Whitchurch <vincent.whitchurch@xxxxxxxx>

[ Upstream commit 087a7b944c5db409f7c1a68bf4896c56ba54eaff ]

In this driver's ->ndo_open() callback, it enables DMA interrupts,
starts the DMA channels, then requests interrupts with request_irq(),
and then finally enables napi.

If RX DMA interrupts are received before napi is enabled, no processing
is done because napi_schedule_prep() will return false. If the network
has a lot of broadcast/multicast traffic, then the RX ring could fill up
completely before napi is enabled. When this happens, no further RX
interrupts will be delivered, and the driver will fail to receive any
packets.

Fix this by only enabling DMA interrupts after all other initialization
is complete.

Fixes: 523f11b5d4fd72efb ("net: stmmac: move hardware setup for stmmac_open to new function")
Reported-by: Lars Persson <larper@xxxxxxxx>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@xxxxxxxx>
Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 28 +++++++++++++++++--
1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2b3752bd1ac9..8610e4d28e85 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2272,6 +2272,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}

+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2911,8 +2928,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);

/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }

/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3768,6 +3787,7 @@ static int stmmac_open(struct net_device *dev)

stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);

return 0;

@@ -6531,8 +6551,10 @@ int stmmac_xdp_open(struct net_device *dev)
}

/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }

/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6592,6 +6614,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);

return 0;

@@ -7470,6 +7493,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);

stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);

mutex_unlock(&priv->lock);
rtnl_unlock();
--
2.34.1