openwrt/target/linux/layerscape/patches-5.4/701-net-0192-dpaa2-eth-Add-support-for-Rx-traffic-classes.patch
Yangbo Lu cddd459140 layerscape: add patches-5.4
Add patches for linux-5.4. The patches are from NXP LSDK-20.04 release
which was tagged LSDK-20.04-V5.4.
https://source.codeaurora.org/external/qoriq/qoriq-components/linux/

For boards LS1021A-IOT, and Traverse-LS1043 which are not involved in
LSDK, port the dts patches from 4.14.

The patches are sorted into the following categories:
  301-arch-xxxx
  302-dts-xxxx
  303-core-xxxx
  701-net-xxxx
  801-audio-xxxx
  802-can-xxxx
  803-clock-xxxx
  804-crypto-xxxx
  805-display-xxxx
  806-dma-xxxx
  807-gpio-xxxx
  808-i2c-xxxx
  809-jailhouse-xxxx
  810-keys-xxxx
  811-kvm-xxxx
  812-pcie-xxxx
  813-pm-xxxx
  814-qe-xxxx
  815-sata-xxxx
  816-sdhc-xxxx
  817-spi-xxxx
  818-thermal-xxxx
  819-uart-xxxx
  820-usb-xxxx
  821-vfio-xxxx

Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
2020-05-07 12:53:06 +02:00

264 lines
8.5 KiB
Diff

From 936ce2452068cb0f6d48ca7d77d6b975802c19ae Mon Sep 17 00:00:00 2001
From: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Date: Tue, 3 Sep 2019 14:13:32 +0300
Subject: [PATCH] dpaa2-eth: Add support for Rx traffic classes
The firmware reserves for each DPNI a number of RX frame queues
equal to the number of configured flows x number of configured
traffic classes.
Current driver configuration directs all incoming traffic to
FQs corresponding to TC0, leaving all other priority levels unused.
Start adding support for multiple ingress traffic classes, by
configuring the FQs associated with all priority levels, not just
TC0. All settings that are per-TC, such as those related to
hashing and flow steering, are also updated.
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
.../ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c | 7 ++-
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 70 +++++++++++++++-------
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 4 +-
.../net/ethernet/freescale/dpaa2/dpaa2-ethtool.c | 19 ++++--
4 files changed, 68 insertions(+), 32 deletions(-)
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq
int i, err;
seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s\n",
- "VFQID", "CPU", "Type", "Frames", "Pending frames");
+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
@@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq
if (err)
fcnt = 0;
- seq_printf(file, "%5d%16d%16s%16llu%16u\n",
+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
fq->fqid,
fq->target_cpu,
+ fq->tc,
fq_type_to_str(fq),
fq->stats.frames,
fcnt);
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1231,6 +1231,7 @@ static void disable_ch_napi(struct dpaa2
static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
{
struct dpni_taildrop td = {0};
+ struct dpaa2_eth_fq *fq;
int i, err;
if (priv->rx_td_enabled == enable)
@@ -1240,11 +1241,12 @@ static void dpaa2_eth_set_rx_taildrop(st
td.threshold = DPAA2_ETH_TAILDROP_THRESH;
for (i = 0; i < priv->num_fqs; i++) {
- if (priv->fq[i].type != DPAA2_RX_FQ)
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_RX_FQ)
continue;
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
- DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
- priv->fq[i].flowid, &td);
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ fq->tc, fq->flowid, &td);
if (err) {
netdev_err(priv->net_dev,
"dpni_set_taildrop() failed\n");
@@ -2338,7 +2340,7 @@ static void set_fq_affinity(struct dpaa2
static void setup_fqs(struct dpaa2_eth_priv *priv)
{
- int i;
+ int i, j;
/* We have one TxConf FQ per Tx flow.
* The number of Tx and Rx queues is the same.
@@ -2350,10 +2352,13 @@ static void setup_fqs(struct dpaa2_eth_p
priv->fq[priv->num_fqs++].flowid = (u16)i;
}
- for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
- priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
- priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
- priv->fq[priv->num_fqs++].flowid = (u16)i;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+ priv->fq[priv->num_fqs].tc = (u8)j;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
}
/* For each FQ, decide on which core to process incoming frames */
@@ -2694,7 +2699,7 @@ static int setup_rx_flow(struct dpaa2_et
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
if (err) {
dev_err(dev, "dpni_get_queue(RX) failed\n");
return err;
@@ -2707,7 +2712,7 @@ static int setup_rx_flow(struct dpaa2_et
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid,
DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
&queue);
if (err) {
@@ -2716,6 +2721,10 @@ static int setup_rx_flow(struct dpaa2_et
}
/* xdp_rxq setup */
+ /* only once for each channel */
+ if (fq->tc > 0)
+ return 0;
+
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
fq->flowid);
if (err) {
@@ -2853,7 +2862,7 @@ static int config_legacy_hash_key(struct
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2861,9 +2870,14 @@ static int config_legacy_hash_key(struct
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
+ i, &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -2873,7 +2887,7 @@ static int config_hash_key(struct dpaa2_
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2881,9 +2895,15 @@ static int config_hash_key(struct dpaa2_
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
- err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -2893,7 +2913,7 @@ static int config_cls_key(struct dpaa2_e
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2901,9 +2921,15 @@ static int config_cls_key(struct dpaa2_e
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
- err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ break;
+ }
+ }
return err;
}
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -291,7 +291,9 @@ struct dpaa2_eth_ch_stats {
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
-#define DPAA2_ETH_MAX_RX_QUEUES 16
+#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
+#define DPAA2_ETH_MAX_RX_QUEUES \
+ (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_TX_QUEUES 16
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES)
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -502,7 +502,7 @@ static int do_cls_rule(struct net_device
dma_addr_t key_iova;
u64 fields = 0;
void *key_buf;
- int err;
+ int i, err;
if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
@@ -562,11 +562,18 @@ static int do_cls_rule(struct net_device
fs_act.options |= DPNI_FS_OPT_DISCARD;
else
fs_act.flow_id = fs->ring_cookie;
- err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
- fs->location, &rule_cfg, &fs_act);
- } else {
- err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
- &rule_cfg);
+ }
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (add)
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
+ i, fs->location, &rule_cfg,
+ &fs_act);
+ else
+ err = dpni_remove_fs_entry(priv->mc_io, 0,
+ priv->mc_token, i,
+ &rule_cfg);
+ if (err)
+ break;
}
dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);