openwrt/target/linux/mvebu/patches-5.4/016-net-mvneta-add-XDP_TX-support.patch

176 lines
4.7 KiB
Diff
Raw Normal View History

From 2ca7bbf394e6d98741f19566b82b3d5a56e9cc2c Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Sat, 19 Oct 2019 10:13:27 +0200
Subject: [PATCH 7/7] net: mvneta: add XDP_TX support
Implement XDP_TX verdict and ndo_xdp_xmit net_device_ops function
pointer
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/marvell/mvneta.c | 128 ++++++++++++++++++++++++--
1 file changed, 121 insertions(+), 7 deletions(-)
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1811,16 +1811,19 @@ static void mvneta_txq_bufs_free(struct
mvneta_txq_inc_get(txq);
- if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
+ buf->type != MVNETA_TYPE_XDP_TX)
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE);
- if (!buf->skb)
- continue;
-
- bytes_compl += buf->skb->len;
- pkts_compl++;
- dev_kfree_skb_any(buf->skb);
+ if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
+ bytes_compl += buf->skb->len;
+ pkts_compl++;
+ dev_kfree_skb_any(buf->skb);
+ } else if (buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) {
+ xdp_return_frame(buf->xdpf);
+ }
}
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -1985,6 +1988,111 @@ int mvneta_rx_refill_queue(struct mvneta
}
static int
+mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct mvneta_tx_desc *tx_desc;
+ struct mvneta_tx_buf *buf;
+ dma_addr_t dma_addr;
+
+ if (txq->count >= txq->tx_stop_threshold)
+ return MVNETA_XDP_DROPPED;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ buf = &txq->buf[txq->txq_put_index];
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ return MVNETA_XDP_DROPPED;
+ }
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) +
+ sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = xdpf;
+
+ tx_desc->command = MVNETA_TXD_FLZ_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = xdpf->len;
+
+ mvneta_update_stats(pp, 1, xdpf->len, true);
+ mvneta_txq_inc_put(txq);
+ txq->pending++;
+ txq->count++;
+
+ return MVNETA_XDP_TX;
+}
+
+static int
+mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
+{
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int cpu;
+ u32 ret;
+
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ return MVNETA_XDP_DROPPED;
+
+ cpu = smp_processor_id();
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ if (ret == MVNETA_XDP_TX)
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int
+mvneta_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ int i, drops = 0;
+ u32 ret;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ for (i = 0; i < num_frame; i++) {
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ if (ret != MVNETA_XDP_TX) {
+ xdp_return_frame_rx_napi(frames[i]);
+ drops++;
+ }
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return num_frame - drops;
+}
+
+static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp)
{
@@ -2006,6 +2114,11 @@ mvneta_run_xdp(struct mvneta_port *pp, s
}
break;
}
+ case XDP_TX:
+ ret = mvneta_xdp_xmit_back(pp, xdp);
+ if (ret != MVNETA_XDP_TX)
+ xdp_return_buff(xdp);
+ break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
@@ -4542,6 +4655,7 @@ static const struct net_device_ops mvnet
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
.ndo_bpf = mvneta_xdp,
+ .ndo_xdp_xmit = mvneta_xdp_xmit,
};
static const struct ethtool_ops mvneta_eth_tool_ops = {