openwrt/target/linux/ipq806x/patches-6.1/700-02-net-stmmac-move-TX-timer-arm-after-DMA-enable.patch
Christian Marangi cafa8804a4
ipq806x: add patch fixing regression from stmmac TX timer
Add patch fixing regression from stmmac TX timer.

Refer to the single patch for extensive details on the problem.

This should restore original performance before 4.19 kernel.

Fixes: #11676
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
2023-09-30 16:55:09 +02:00

90 lines
3.1 KiB
Diff

From fb04db35447d1e8ff557c8e57139164cecab7de5 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Wed, 27 Sep 2023 15:38:31 +0200
Subject: [PATCH 2/4] net: stmmac: move TX timer arm after DMA enable
Move TX timer arm call after DMA interrupt is enabled again.
The TX timer arm function changed logic and now is skipped if a napi is
already scheduled. By moving the TX timer arm call after DMA is enabled,
we permit to correctly skip if a DMA interrupt has been fired and a napi
has been scheduled again.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
---
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2529,7 +2529,8 @@ static void stmmac_bump_dma_threshold(st
* @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
-static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
+ bool *pending_packets)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
@@ -2692,7 +2693,7 @@ static int stmmac_tx_clean(struct stmmac
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- stmmac_tx_timer_arm(priv, queue);
+ *pending_packets = true;
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -5474,12 +5475,13 @@ static int stmmac_napi_poll_tx(struct na
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ bool pending_packets = false;
u32 chan = ch->index;
int work_done;
priv->xstats.napi_poll++;
- work_done = stmmac_tx_clean(priv, budget, chan);
+ work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5490,6 +5492,10 @@ static int stmmac_napi_poll_tx(struct na
spin_unlock_irqrestore(&ch->lock, flags);
}
+ /* TX still have packet to handle, check if we need to arm tx timer */
+ if (pending_packets)
+ stmmac_tx_timer_arm(priv, chan);
+
return work_done;
}
@@ -5499,11 +5505,12 @@ static int stmmac_napi_poll_rxtx(struct
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done, rxtx_done;
+ bool tx_pending_packets = false;
u32 chan = ch->index;
priv->xstats.napi_poll++;
- tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
rx_done = stmmac_rx_zc(priv, budget, chan);
@@ -5528,6 +5535,10 @@ static int stmmac_napi_poll_rxtx(struct
spin_unlock_irqrestore(&ch->lock, flags);
}
+ /* TX still have packet to handle, check if we need to arm tx timer */
+ if (tx_pending_packets)
+ stmmac_tx_timer_arm(priv, chan);
+
return min(rxtx_done, budget - 1);
}