airoha: an7581: backport upstream ethernet fixes

Backport patches fixing some ethernet problem merged upstream.

Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
This commit is contained in:
Christian Marangi 2024-11-12 13:55:30 +01:00
parent 92eb867873
commit d83ae23d08
No known key found for this signature in database
GPG Key ID: AC001D09ADBFEAD7
2 changed files with 222 additions and 0 deletions

View File

@ -0,0 +1,92 @@
From 3affa310de523d63e52ea8e2efb3c476df29e414 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Tue, 29 Oct 2024 13:17:09 +0100
Subject: [PATCH 1/2] net: airoha: Read completion queue data in
airoha_qdma_tx_napi_poll()
In order to avoid any possible race, read completion queue head and
pending entry in airoha_qdma_tx_napi_poll routine instead of doing it in
airoha_irq_handler. Remove unused airoha_tx_irq_queue unused fields.
This is a preliminary patch to add Qdisc offload for airoha_eth driver.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://patch.msgid.link/20241029-airoha-en7581-tx-napi-work-v1-1-96ad1686b946@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/ethernet/mediatek/airoha_eth.c | 31 +++++++++-------------
1 file changed, 13 insertions(+), 18 deletions(-)
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -752,11 +752,9 @@ struct airoha_tx_irq_queue {
struct airoha_qdma *qdma;
struct napi_struct napi;
- u32 *q;
int size;
- int queued;
- u16 head;
+ u32 *q;
};
struct airoha_hw_stats {
@@ -1656,25 +1654,31 @@ static int airoha_qdma_init_rx(struct ai
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
+ int id, done = 0, irq_queued;
struct airoha_qdma *qdma;
struct airoha_eth *eth;
- int id, done = 0;
+ u32 status, head;
irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
qdma = irq_q->qdma;
id = irq_q - &qdma->q_tx_irq[0];
eth = qdma->eth;
- while (irq_q->queued > 0 && done < budget) {
- u32 qid, last, val = irq_q->q[irq_q->head];
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ head = head % irq_q->size;
+ irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+ while (irq_queued > 0 && done < budget) {
+ u32 qid, last, val = irq_q->q[head];
struct airoha_queue *q;
if (val == 0xff)
break;
- irq_q->q[irq_q->head] = 0xff; /* mark as done */
- irq_q->head = (irq_q->head + 1) % irq_q->size;
- irq_q->queued--;
+ irq_q->q[head] = 0xff; /* mark as done */
+ head = (head + 1) % irq_q->size;
+ irq_queued--;
done++;
last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
@@ -2026,20 +2030,11 @@ static irqreturn_t airoha_irq_handler(in
if (intr[0] & INT_TX_MASK) {
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
- u32 status, head;
-
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
-
- status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
- head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
- irq_q->head = head % irq_q->size;
- irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-
napi_schedule(&qdma->q_tx_irq[i].napi);
}
}

View File

@ -0,0 +1,130 @@
From 0c729f53b8c33b9e5eadc2d5e673759e3510501e Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Tue, 29 Oct 2024 13:17:10 +0100
Subject: [PATCH 2/2] net: airoha: Simplify Tx napi logic
Simplify Tx napi logic relying just on the packet index provided by
completion queue indicating the completed packet that can be removed
from the Tx DMA ring.
This is a preliminary patch to add Qdisc offload for airoha_eth driver.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://patch.msgid.link/20241029-airoha-en7581-tx-napi-work-v1-2-96ad1686b946@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/ethernet/mediatek/airoha_eth.c | 73 ++++++++++++----------
1 file changed, 41 insertions(+), 32 deletions(-)
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -1670,8 +1670,12 @@ static int airoha_qdma_tx_napi_poll(stru
irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
while (irq_queued > 0 && done < budget) {
- u32 qid, last, val = irq_q->q[head];
+ u32 qid, val = irq_q->q[head];
+ struct airoha_qdma_desc *desc;
+ struct airoha_queue_entry *e;
struct airoha_queue *q;
+ u32 index, desc_ctrl;
+ struct sk_buff *skb;
if (val == 0xff)
break;
@@ -1681,9 +1685,7 @@ static int airoha_qdma_tx_napi_poll(stru
irq_queued--;
done++;
- last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
-
if (qid >= ARRAY_SIZE(qdma->q_tx))
continue;
@@ -1691,46 +1693,53 @@ static int airoha_qdma_tx_napi_poll(stru
if (!q->ndesc)
continue;
+ index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
+ if (index >= q->ndesc)
+ continue;
+
spin_lock_bh(&q->lock);
- while (q->queued > 0) {
- struct airoha_qdma_desc *desc = &q->desc[q->tail];
- struct airoha_queue_entry *e = &q->entry[q->tail];
- u32 desc_ctrl = le32_to_cpu(desc->ctrl);
- struct sk_buff *skb = e->skb;
- u16 index = q->tail;
-
- if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
- !(desc_ctrl & QDMA_DESC_DROP_MASK))
- break;
+ if (!q->queued)
+ goto unlock;
- q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
+ desc = &q->desc[index];
+ desc_ctrl = le32_to_cpu(desc->ctrl);
- dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
- DMA_TO_DEVICE);
-
- WRITE_ONCE(desc->msg0, 0);
- WRITE_ONCE(desc->msg1, 0);
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
+ !(desc_ctrl & QDMA_DESC_DROP_MASK))
+ goto unlock;
+
+ e = &q->entry[index];
+ skb = e->skb;
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ memset(e, 0, sizeof(*e));
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ q->queued--;
+
+ /* completion ring can report out-of-order indexes if hw QoS
+ * is enabled and packets with different priority are queued
+ * to same DMA ring. Take into account possible out-of-order
+ * reports incrementing DMA ring tail pointer
+ */
+ while (q->tail != q->head && !q->entry[q->tail].dma_addr)
+ q->tail = (q->tail + 1) % q->ndesc;
- if (skb) {
- u16 queue = skb_get_queue_mapping(skb);
- struct netdev_queue *txq;
-
- txq = netdev_get_tx_queue(skb->dev, queue);
- netdev_tx_completed_queue(txq, 1, skb->len);
- if (netif_tx_queue_stopped(txq) &&
- q->ndesc - q->queued >= q->free_thr)
- netif_tx_wake_queue(txq);
-
- dev_kfree_skb_any(skb);
- e->skb = NULL;
- }
+ if (skb) {
+ u16 queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(skb->dev, queue);
+ netdev_tx_completed_queue(txq, 1, skb->len);
+ if (netif_tx_queue_stopped(txq) &&
+ q->ndesc - q->queued >= q->free_thr)
+ netif_tx_wake_queue(txq);
- if (index == last)
- break;
+ dev_kfree_skb_any(skb);
}
-
+unlock:
spin_unlock_bh(&q->lock);
}