mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-27 09:12:39 +00:00
255268ce1a
xrx200 max MTU is reduced so that it works correctly when set to the max, and the max MTU of the switch is increased to match. In 5.10, the switch driver now enables non-standard MTUs on a per-port basis, with the overall frame size set based on the cpu port. When the MTU is not used, this should have no effect. The maximum packet size is limited as large packets cause the switch to lock up. 0702-net-lantiq-add-support-for-jumbo-frames.patch comes from net-next commit 998ac358019e491217e752bc6dcbb3afb2a6fa3e. In 5.4, all switch ports are configured to accept the max MTU, as 5.4 does not have port_max_mtu/port_change_mtu callbacks. Signed-off-by: Thomas Nixon <tom@tomn.co.uk>
123 lines
4.0 KiB
Diff
123 lines
4.0 KiB
Diff
From 1488fc204568f707fe2a42a913788c00a95af30e Mon Sep 17 00:00:00 2001
|
|
From: Aleksander Jan Bajkowski <olek2@wp.pl>
|
|
Date: Fri, 17 Dec 2021 01:07:40 +0100
|
|
Subject: [PATCH] net: lantiq_xrx200: increase buffer reservation
|
|
|
|
If the user sets a lower mtu on the CPU port than on the switch,
|
|
then DMA inserts a few more bytes into the buffer than expected.
|
|
In the worst case, it may exceed the size of the buffer. The
|
|
experiments showed that the buffer should be a multiple of the
|
|
burst length value. This patch rounds the length of the rx buffer
|
|
upwards and fixes this bug. The reservation of FCS space in the
|
|
buffer has been removed as PMAC strips the FCS.
|
|
|
|
Fixes: 998ac358019e ("net: lantiq: add support for jumbo frames")
|
|
Reported-by: Thomas Nixon <tom@tomn.co.uk>
|
|
Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
|
|
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
|
---
|
|
drivers/net/ethernet/lantiq_xrx200.c | 34 ++++++++++++++++++++--------
|
|
1 file changed, 24 insertions(+), 10 deletions(-)
|
|
|
|
--- a/drivers/net/ethernet/lantiq_xrx200.c
|
|
+++ b/drivers/net/ethernet/lantiq_xrx200.c
|
|
@@ -70,6 +70,8 @@ struct xrx200_priv {
|
|
struct xrx200_chan chan_tx;
|
|
struct xrx200_chan chan_rx;
|
|
|
|
+ u16 rx_buf_size;
|
|
+
|
|
struct net_device *net_dev;
|
|
struct device *dev;
|
|
|
|
@@ -96,6 +98,16 @@ static void xrx200_pmac_mask(struct xrx2
|
|
xrx200_pmac_w32(priv, val, offset);
|
|
}
|
|
|
|
+static int xrx200_max_frame_len(int mtu)
|
|
+{
|
|
+ return VLAN_ETH_HLEN + mtu;
|
|
+}
|
|
+
|
|
+static int xrx200_buffer_size(int mtu)
|
|
+{
|
|
+ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
|
|
+}
|
|
+
|
|
/* drop all the packets from the DMA ring */
|
|
static void xrx200_flush_dma(struct xrx200_chan *ch)
|
|
{
|
|
@@ -108,8 +120,7 @@ static void xrx200_flush_dma(struct xrx2
|
|
break;
|
|
|
|
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
|
|
- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
|
|
- ETH_FCS_LEN);
|
|
+ ch->priv->rx_buf_size;
|
|
ch->dma.desc++;
|
|
ch->dma.desc %= LTQ_DESC_NUM;
|
|
}
|
|
@@ -157,21 +168,21 @@ static int xrx200_close(struct net_devic
|
|
|
|
static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
|
{
|
|
- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
|
|
struct sk_buff *skb = ch->skb[ch->dma.desc];
|
|
+ struct xrx200_priv *priv = ch->priv;
|
|
dma_addr_t mapping;
|
|
int ret = 0;
|
|
|
|
- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
|
|
- len);
|
|
+ ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
|
|
+ priv->rx_buf_size);
|
|
if (!ch->skb[ch->dma.desc]) {
|
|
ret = -ENOMEM;
|
|
goto skip;
|
|
}
|
|
|
|
- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
|
|
- len, DMA_FROM_DEVICE);
|
|
- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
|
|
+ mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
|
|
+ priv->rx_buf_size, DMA_FROM_DEVICE);
|
|
+ if (unlikely(dma_mapping_error(priv->dev, mapping))) {
|
|
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
|
|
ch->skb[ch->dma.desc] = skb;
|
|
ret = -ENOMEM;
|
|
@@ -183,7 +194,7 @@ static int xrx200_alloc_skb(struct xrx20
|
|
wmb();
|
|
skip:
|
|
ch->dma.desc_base[ch->dma.desc].ctl =
|
|
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
|
|
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
|
|
|
|
return ret;
|
|
}
|
|
@@ -355,6 +366,7 @@ xrx200_change_mtu(struct net_device *net
|
|
int ret = 0;
|
|
|
|
net_dev->mtu = new_mtu;
|
|
+ priv->rx_buf_size = xrx200_buffer_size(new_mtu);
|
|
|
|
if (new_mtu <= old_mtu)
|
|
return ret;
|
|
@@ -374,6 +386,7 @@ xrx200_change_mtu(struct net_device *net
|
|
ret = xrx200_alloc_skb(ch_rx);
|
|
if (ret) {
|
|
net_dev->mtu = old_mtu;
|
|
+ priv->rx_buf_size = xrx200_buffer_size(old_mtu);
|
|
break;
|
|
}
|
|
dev_kfree_skb_any(skb);
|
|
@@ -504,7 +517,8 @@ static int xrx200_probe(struct platform_
|
|
net_dev->netdev_ops = &xrx200_netdev_ops;
|
|
SET_NETDEV_DEV(net_dev, dev);
|
|
net_dev->min_mtu = ETH_ZLEN;
|
|
- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
|
|
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
|
|
+ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
|
|
|
|
/* load the memory ranges */
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|