mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-27 09:12:39 +00:00
78 lines
3.4 KiB
Diff
78 lines
3.4 KiB
Diff
|
From 0c7cb8b132f28cd150eb578a73c959de736364a2 Mon Sep 17 00:00:00 2001
|
||
|
From: Ioana Radulescu <ruxandra.radulescu@nxp.com>
|
||
|
Date: Mon, 16 Sep 2019 13:15:02 +0300
|
||
|
Subject: [PATCH] dpaa2-eth: Update FQ taildrop threshold and buffer pool count
|
||
|
|
||
|
Now that we have congestion group taildrop configured at all
|
||
|
times, we can afford to increase the frame queue taildrop
|
||
|
threshold; this will ensure a better response when receiving
|
||
|
bursts of large-sized frames.
|
||
|
|
||
|
Also decouple the buffer pool count from the Rx FQ taildrop
|
||
|
threshold, as above change would increase it too much. Instead,
|
||
|
keep the old count as a hardcoded value.
|
||
|
|
||
|
With the new limits, we try to ensure that:
|
||
|
* we allow enough leeway for large frame bursts (by buffering
|
||
|
enough of them in queues to avoid heavy dropping in case of
|
||
|
bursty traffic, but when overall ingress bandwidth is manageable)
|
||
|
* allow pending frames to be evenly spread between ingress FQs,
|
||
|
regardless of frame size
|
||
|
* avoid dropping frames due to the buffer pool being empty; this
|
||
|
is not a bad behaviour per se, but system overall response is
|
||
|
more linear and predictable when frames are dropped at frame
|
||
|
queue/group level.
|
||
|
|
||
|
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
|
||
|
---
|
||
|
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 23 +++++++++++------------
|
||
|
1 file changed, 11 insertions(+), 12 deletions(-)
|
||
|
|
||
|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
|
||
|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
|
||
|
@@ -35,24 +35,24 @@
|
||
|
/* Convert L3 MTU to L2 MFL */
|
||
|
#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
|
||
|
|
||
|
-/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
|
||
|
- * frames in the Rx queues (length of the current frame is not
|
||
|
- * taken into account when making the taildrop decision)
|
||
|
+/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
|
||
|
+ * enuough number of jumbo frames in the Rx queues (length of the current
|
||
|
+ * frame is not taken into account when making the taildrop decision)
|
||
|
*/
|
||
|
-#define DPAA2_ETH_FQ_TAILDROP_THRESH (64 * 1024)
|
||
|
+#define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
|
||
|
|
||
|
/* Maximum number of Tx confirmation frames to be processed
|
||
|
* in a single NAPI call
|
||
|
*/
|
||
|
#define DPAA2_ETH_TXCONF_PER_NAPI 256
|
||
|
|
||
|
-/* Buffer quota per queue. Must be large enough such that for minimum sized
|
||
|
- * frames taildrop kicks in before the bpool gets depleted, so we compute
|
||
|
- * how many 64B frames fit inside the taildrop threshold and add a margin
|
||
|
- * to accommodate the buffer refill delay.
|
||
|
+/* Buffer qouta per channel. We want to keep in check number of ingress frames
|
||
|
+ * in flight: for small sized frames, congestion group taildrop may kick in
|
||
|
+ * first; for large sizes, Rx FQ taildrop threshold will ensure only a
|
||
|
+ * reasonable number of frames will be pending at any given time.
|
||
|
+ * Ingress frame drop due to buffer pool depletion should be a corner case only
|
||
|
*/
|
||
|
-#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_FQ_TAILDROP_THRESH / 64)
|
||
|
-#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
|
||
|
+#define DPAA2_ETH_NUM_BUFS 1280
|
||
|
#define DPAA2_ETH_REFILL_THRESH \
|
||
|
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
|
||
|
|
||
|
@@ -62,8 +62,7 @@
|
||
|
* taildrop kicks in
|
||
|
*/
|
||
|
#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
|
||
|
- (DPAA2_ETH_MAX_FRAMES_PER_QUEUE * dpaa2_eth_queue_count(priv) / \
|
||
|
- dpaa2_eth_tc_count(priv))
|
||
|
+ (1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
|
||
|
|
||
|
/* Maximum number of buffers that can be acquired/released through a single
|
||
|
* QBMan command
|