mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-27 17:18:59 +00:00
76cabb95da
Backport Russell King's series [1]
net: mvneta: reduce size of TSO header allocation
to pending-5.15 to fix random crashes on Turris Omnia.
This also backports two patches that are dependencies to this series:
net: mvneta: Delete unused variable
net: mvneta: fix potential double-frees in mvneta_txq_sw_deinit()
[1] https://lore.kernel.org/netdev/ZCsbJ4nG+So%2Fn9qY@shell.armlinux.org.uk/
Signed-off-by: Marek Behún <kabel@kernel.org>
Signed-off-by: Christian Lamparter <chunkeey@gmail.com> (squashed)
(cherry picked from commit 7b31c2e9ed
)
98 lines
3.0 KiB
Diff
98 lines
3.0 KiB
Diff
From e9f7099d0730341b24c057acbf545dd019581db6 Mon Sep 17 00:00:00 2001
|
|
From: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
Date: Fri, 26 Nov 2021 12:20:55 +0100
|
|
Subject: net: mvneta: Allow having more than one queue per TC
|
|
|
|
The current mqprio implementation assumed that we are only using one
|
|
queue per TC. Use the offset and count parameters to allow using
|
|
multiple queues per TC. In that case, the controller will use a standard
|
|
round-robin algorithm to pick queues assigned to the same TC, with the
|
|
same priority.
|
|
|
|
This only applies to VLAN priorities in ingress traffic, each TC
|
|
corresponding to a vlan priority.
|
|
|
|
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
---
|
|
drivers/net/ethernet/marvell/mvneta.c | 35 ++++++++++++++++++++---------------
|
|
1 file changed, 20 insertions(+), 15 deletions(-)
|
|
|
|
(limited to 'drivers/net/ethernet/marvell/mvneta.c')
|
|
|
|
--- a/drivers/net/ethernet/marvell/mvneta.c
|
|
+++ b/drivers/net/ethernet/marvell/mvneta.c
|
|
@@ -498,7 +498,6 @@ struct mvneta_port {
|
|
u8 mcast_count[256];
|
|
u16 tx_ring_size;
|
|
u16 rx_ring_size;
|
|
- u8 prio_tc_map[8];
|
|
|
|
phy_interface_t phy_interface;
|
|
struct device_node *dn;
|
|
@@ -4955,13 +4954,12 @@ static void mvneta_clear_rx_prio_map(str
|
|
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
|
|
}
|
|
|
|
-static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
|
|
+static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
|
|
{
|
|
- u32 val = 0;
|
|
- int i;
|
|
+ u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
|
|
|
|
- for (i = 0; i < rxq_number; i++)
|
|
- val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
|
|
+ val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
|
|
+ val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
|
|
|
|
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
|
|
}
|
|
@@ -4970,8 +4968,8 @@ static int mvneta_setup_mqprio(struct ne
|
|
struct tc_mqprio_qopt_offload *mqprio)
|
|
{
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
+ int rxq, tc;
|
|
u8 num_tc;
|
|
- int i;
|
|
|
|
if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
|
|
return 0;
|
|
@@ -4981,21 +4979,28 @@ static int mvneta_setup_mqprio(struct ne
|
|
if (num_tc > rxq_number)
|
|
return -EINVAL;
|
|
|
|
+ mvneta_clear_rx_prio_map(pp);
|
|
+
|
|
if (!num_tc) {
|
|
- mvneta_clear_rx_prio_map(pp);
|
|
netdev_reset_tc(dev);
|
|
return 0;
|
|
}
|
|
|
|
- memcpy(pp->prio_tc_map, mqprio->qopt.prio_tc_map,
|
|
- sizeof(pp->prio_tc_map));
|
|
+ netdev_set_num_tc(dev, mqprio->qopt.num_tc);
|
|
|
|
- mvneta_setup_rx_prio_map(pp);
|
|
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
|
|
+ netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
|
|
+ mqprio->qopt.offset[tc]);
|
|
+
|
|
+ for (rxq = mqprio->qopt.offset[tc];
|
|
+ rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
|
|
+ rxq++) {
|
|
+ if (rxq >= rxq_number)
|
|
+ return -EINVAL;
|
|
|
|
- netdev_set_num_tc(dev, mqprio->qopt.num_tc);
|
|
- for (i = 0; i < mqprio->qopt.num_tc; i++)
|
|
- netdev_set_tc_queue(dev, i, mqprio->qopt.count[i],
|
|
- mqprio->qopt.offset[i]);
|
|
+ mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
|
|
+ }
|
|
+ }
|
|
|
|
return 0;
|
|
}
|