2020-05-15 10:52:35 +00:00
|
|
|
From f7f705a63cd4bd1e3463db7662011717c5149e8a Mon Sep 17 00:00:00 2001
|
|
|
|
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
|
|
Date: Sat, 19 Oct 2019 10:13:26 +0200
|
|
|
|
Subject: [PATCH 6/7] net: mvneta: make tx buffer array agnostic
|
|
|
|
|
|
|
|
Allow tx buffer array to contain both skb and xdp buffers in order to
|
|
|
|
enable xdp frame recycling adding XDP_TX verdict support
|
|
|
|
|
|
|
|
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
|
|
---
|
|
|
|
drivers/net/ethernet/marvell/mvneta.c | 66 +++++++++++++++++----------
|
|
|
|
1 file changed, 43 insertions(+), 23 deletions(-)
|
|
|
|
|
|
|
|
--- a/drivers/net/ethernet/marvell/mvneta.c
|
|
|
|
+++ b/drivers/net/ethernet/marvell/mvneta.c
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -565,6 +565,20 @@ struct mvneta_rx_desc {
|
2020-05-15 10:52:35 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
+enum mvneta_tx_buf_type {
|
|
|
|
+ MVNETA_TYPE_SKB,
|
|
|
|
+ MVNETA_TYPE_XDP_TX,
|
|
|
|
+ MVNETA_TYPE_XDP_NDO,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct mvneta_tx_buf {
|
|
|
|
+ enum mvneta_tx_buf_type type;
|
|
|
|
+ union {
|
|
|
|
+ struct xdp_frame *xdpf;
|
|
|
|
+ struct sk_buff *skb;
|
|
|
|
+ };
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
struct mvneta_tx_queue {
|
|
|
|
/* Number of this TX queue, in the range 0-7 */
|
|
|
|
u8 id;
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -580,8 +594,8 @@ struct mvneta_tx_queue {
|
2020-05-15 10:52:35 +00:00
|
|
|
int tx_stop_threshold;
|
|
|
|
int tx_wake_threshold;
|
|
|
|
|
|
|
|
- /* Array of transmitted skb */
|
|
|
|
- struct sk_buff **tx_skb;
|
|
|
|
+ /* Array of transmitted buffers */
|
|
|
|
+ struct mvneta_tx_buf *buf;
|
|
|
|
|
|
|
|
/* Index of last TX DMA descriptor that was inserted */
|
|
|
|
int txq_put_index;
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -1793,14 +1807,9 @@ static void mvneta_txq_bufs_free(struct
|
2020-05-15 10:52:35 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
|
|
|
|
struct mvneta_tx_desc *tx_desc = txq->descs +
|
|
|
|
txq->txq_get_index;
|
|
|
|
- struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
|
|
|
|
-
|
|
|
|
- if (skb) {
|
|
|
|
- bytes_compl += skb->len;
|
|
|
|
- pkts_compl++;
|
|
|
|
- }
|
|
|
|
|
|
|
|
mvneta_txq_inc_get(txq);
|
|
|
|
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -1808,9 +1817,12 @@ static void mvneta_txq_bufs_free(struct
|
2020-05-15 10:52:35 +00:00
|
|
|
dma_unmap_single(pp->dev->dev.parent,
|
|
|
|
tx_desc->buf_phys_addr,
|
|
|
|
tx_desc->data_size, DMA_TO_DEVICE);
|
|
|
|
- if (!skb)
|
|
|
|
+ if (!buf->skb)
|
|
|
|
continue;
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
+
|
|
|
|
+ bytes_compl += buf->skb->len;
|
|
|
|
+ pkts_compl++;
|
|
|
|
+ dev_kfree_skb_any(buf->skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2335,16 +2347,19 @@ static inline void
|
2020-05-15 10:52:35 +00:00
|
|
|
mvneta_tso_put_hdr(struct sk_buff *skb,
|
|
|
|
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
|
|
|
|
{
|
|
|
|
- struct mvneta_tx_desc *tx_desc;
|
|
|
|
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
|
|
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
|
|
|
+ struct mvneta_tx_desc *tx_desc;
|
|
|
|
|
|
|
|
- txq->tx_skb[txq->txq_put_index] = NULL;
|
|
|
|
tx_desc = mvneta_txq_next_desc_get(txq);
|
|
|
|
tx_desc->data_size = hdr_len;
|
|
|
|
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
|
|
|
|
tx_desc->command |= MVNETA_TXD_F_DESC;
|
|
|
|
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
|
|
|
|
txq->txq_put_index * TSO_HEADER_SIZE;
|
|
|
|
+ buf->type = MVNETA_TYPE_SKB;
|
|
|
|
+ buf->skb = NULL;
|
|
|
|
+
|
|
|
|
mvneta_txq_inc_put(txq);
|
|
|
|
}
|
|
|
|
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2353,6 +2368,7 @@ mvneta_tso_put_data(struct net_device *d
|
2020-05-15 10:52:35 +00:00
|
|
|
struct sk_buff *skb, char *data, int size,
|
|
|
|
bool last_tcp, bool is_last)
|
|
|
|
{
|
|
|
|
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
|
|
|
struct mvneta_tx_desc *tx_desc;
|
|
|
|
|
|
|
|
tx_desc = mvneta_txq_next_desc_get(txq);
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2366,7 +2382,8 @@ mvneta_tso_put_data(struct net_device *d
|
2020-05-15 10:52:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tx_desc->command = 0;
|
|
|
|
- txq->tx_skb[txq->txq_put_index] = NULL;
|
|
|
|
+ buf->type = MVNETA_TYPE_SKB;
|
|
|
|
+ buf->skb = NULL;
|
|
|
|
|
|
|
|
if (last_tcp) {
|
|
|
|
/* last descriptor in the TCP packet */
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2374,7 +2391,7 @@ mvneta_tso_put_data(struct net_device *d
|
2020-05-15 10:52:35 +00:00
|
|
|
|
|
|
|
/* last descriptor in SKB */
|
|
|
|
if (is_last)
|
|
|
|
- txq->tx_skb[txq->txq_put_index] = skb;
|
|
|
|
+ buf->skb = skb;
|
|
|
|
}
|
|
|
|
mvneta_txq_inc_put(txq);
|
|
|
|
return 0;
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2459,6 +2476,7 @@ static int mvneta_tx_frag_process(struct
|
2020-05-15 10:52:35 +00:00
|
|
|
int i, nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_frags; i++) {
|
|
|
|
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
|
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
void *addr = skb_frag_address(frag);
|
|
|
|
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2478,12 +2496,13 @@ static int mvneta_tx_frag_process(struct
|
2020-05-15 10:52:35 +00:00
|
|
|
if (i == nr_frags - 1) {
|
|
|
|
/* Last descriptor */
|
|
|
|
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
|
|
|
|
- txq->tx_skb[txq->txq_put_index] = skb;
|
|
|
|
+ buf->skb = skb;
|
|
|
|
} else {
|
|
|
|
/* Descriptor in the middle: Not First, Not Last */
|
|
|
|
tx_desc->command = 0;
|
|
|
|
- txq->tx_skb[txq->txq_put_index] = NULL;
|
|
|
|
+ buf->skb = NULL;
|
|
|
|
}
|
|
|
|
+ buf->type = MVNETA_TYPE_SKB;
|
|
|
|
mvneta_txq_inc_put(txq);
|
|
|
|
}
|
|
|
|
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2511,6 +2530,7 @@ static netdev_tx_t mvneta_tx(struct sk_b
|
2020-05-15 10:52:35 +00:00
|
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
|
|
u16 txq_id = skb_get_queue_mapping(skb);
|
|
|
|
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
|
|
|
|
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
|
|
|
struct mvneta_tx_desc *tx_desc;
|
|
|
|
int len = skb->len;
|
|
|
|
int frags = 0;
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -2543,16 +2563,17 @@ static netdev_tx_t mvneta_tx(struct sk_b
|
2020-05-15 10:52:35 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ buf->type = MVNETA_TYPE_SKB;
|
|
|
|
if (frags == 1) {
|
|
|
|
/* First and Last descriptor */
|
|
|
|
tx_cmd |= MVNETA_TXD_FLZ_DESC;
|
|
|
|
tx_desc->command = tx_cmd;
|
|
|
|
- txq->tx_skb[txq->txq_put_index] = skb;
|
|
|
|
+ buf->skb = skb;
|
|
|
|
mvneta_txq_inc_put(txq);
|
|
|
|
} else {
|
|
|
|
/* First but not Last */
|
|
|
|
tx_cmd |= MVNETA_TXD_F_DESC;
|
|
|
|
- txq->tx_skb[txq->txq_put_index] = NULL;
|
|
|
|
+ buf->skb = NULL;
|
|
|
|
mvneta_txq_inc_put(txq);
|
|
|
|
tx_desc->command = tx_cmd;
|
|
|
|
/* Continue with other skb fragments */
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -3138,9 +3159,8 @@ static int mvneta_txq_sw_init(struct mvn
|
2020-05-15 10:52:35 +00:00
|
|
|
|
|
|
|
txq->last_desc = txq->size - 1;
|
|
|
|
|
|
|
|
- txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
|
|
|
|
- GFP_KERNEL);
|
|
|
|
- if (!txq->tx_skb) {
|
|
|
|
+ txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
|
|
|
|
+ if (!txq->buf) {
|
|
|
|
dma_free_coherent(pp->dev->dev.parent,
|
|
|
|
txq->size * MVNETA_DESC_ALIGNED_SIZE,
|
|
|
|
txq->descs, txq->descs_phys);
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -3152,7 +3172,7 @@ static int mvneta_txq_sw_init(struct mvn
|
2020-05-15 10:52:35 +00:00
|
|
|
txq->size * TSO_HEADER_SIZE,
|
|
|
|
&txq->tso_hdrs_phys, GFP_KERNEL);
|
|
|
|
if (!txq->tso_hdrs) {
|
|
|
|
- kfree(txq->tx_skb);
|
|
|
|
+ kfree(txq->buf);
|
|
|
|
dma_free_coherent(pp->dev->dev.parent,
|
|
|
|
txq->size * MVNETA_DESC_ALIGNED_SIZE,
|
|
|
|
txq->descs, txq->descs_phys);
|
2020-07-16 11:03:17 +00:00
|
|
|
@@ -3205,7 +3225,7 @@ static void mvneta_txq_sw_deinit(struct
|
2020-05-15 10:52:35 +00:00
|
|
|
{
|
|
|
|
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
|
|
|
|
|
|
|
|
- kfree(txq->tx_skb);
|
|
|
|
+ kfree(txq->buf);
|
|
|
|
|
|
|
|
if (txq->tso_hdrs)
|
|
|
|
dma_free_coherent(pp->dev->dev.parent,
|