2020-04-10 02:47:05 +00:00
|
|
|
From 73b0aa73b401810424afa90bf58663a56ad9d51a Mon Sep 17 00:00:00 2001
|
|
|
|
From: Ioana Radulescu <ruxandra.radulescu@nxp.com>
|
|
|
|
Date: Fri, 5 May 2017 19:07:50 +0300
|
|
|
|
Subject: [PATCH] dpaa2-eth: Add Rx error queue
|
|
|
|
|
|
|
|
Until now all error frames on the ingress path were discarded
|
|
|
|
in hardware. For debug purposes, add an option to have these
|
|
|
|
frames delivered to the cpu, on a dedicated queue.
|
|
|
|
|
|
|
|
TODO: Remove Kconfig option, find another way to enable
|
|
|
|
Rx error queue support
|
|
|
|
|
|
|
|
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
|
|
|
|
---
|
|
|
|
drivers/net/ethernet/freescale/dpaa2/Kconfig | 10 +++
|
|
|
|
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 97 ++++++++++++++++++++++++
|
|
|
|
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 5 +-
|
|
|
|
3 files changed, 111 insertions(+), 1 deletion(-)
|
|
|
|
|
|
|
|
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
|
|
|
|
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
|
|
|
|
@@ -15,6 +15,16 @@ config FSL_DPAA2_ETH_DCB
|
|
|
|
depends on DCB
|
|
|
|
help
|
|
|
|
Enable Priority-Based Flow Control (PFC) support in the driver
|
|
|
|
+
|
|
|
|
+config FSL_DPAA2_ETH_USE_ERR_QUEUE
|
|
|
|
+ bool "Enable Rx error queue"
|
|
|
|
+ default n
|
|
|
|
+ help
|
|
|
|
+ Allow Rx error frames to be enqueued on an error queue
|
|
|
|
+ and processed by the driver (by default they are dropped
|
|
|
|
+ in hardware).
|
|
|
|
+ This may impact performance, recommended for debugging
|
|
|
|
+ purposes only.
|
|
|
|
endif
|
|
|
|
|
|
|
|
config FSL_DPAA2_PTP_CLOCK
|
|
|
|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
|
|
|
|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
|
|
|
|
@@ -449,6 +449,53 @@ err_frame_format:
|
|
|
|
percpu_stats->rx_dropped++;
|
|
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
|
|
|
|
+/* Processing of Rx frames received on the error FQ
|
|
|
|
+ * We check and print the error bits and then free the frame
|
|
|
|
+ */
|
|
|
|
+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
|
|
|
|
+ struct dpaa2_eth_channel *ch,
|
|
|
|
+ const struct dpaa2_fd *fd,
|
|
|
|
+ struct dpaa2_eth_fq *fq __always_unused)
|
|
|
|
+{
|
|
|
|
+ struct device *dev = priv->net_dev->dev.parent;
|
|
|
|
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
|
|
|
|
+ void *vaddr;
|
|
|
|
+ struct rtnl_link_stats64 *percpu_stats;
|
|
|
|
+ struct dpaa2_fas *fas;
|
|
|
|
+ u32 status = 0;
|
|
|
|
+ u32 fd_errors;
|
|
|
|
+ bool has_fas_errors = false;
|
|
|
|
+
|
|
|
|
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
|
|
|
|
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
|
|
|
|
+
|
|
|
|
+ /* check frame errors in the FD field */
|
|
|
|
+ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
|
|
|
|
+ if (likely(fd_errors)) {
|
|
|
|
+ has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
|
|
|
|
+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
|
|
|
|
+ if (net_ratelimit())
|
|
|
|
+ netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
|
|
|
|
+ fd_errors);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* check frame errors in the FAS field */
|
|
|
|
+ if (has_fas_errors) {
|
|
|
|
+ fas = dpaa2_get_fas(vaddr, false);
|
|
|
|
+ status = le32_to_cpu(fas->status);
|
|
|
|
+ if (net_ratelimit())
|
|
|
|
+ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
|
|
|
|
+ status & DPAA2_FAS_RX_ERR_MASK);
|
|
|
|
+ }
|
|
|
|
+ free_rx_fd(priv, fd, vaddr);
|
|
|
|
+
|
|
|
|
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
|
|
|
|
+ percpu_stats->rx_errors++;
|
|
|
|
+ ch->buf_count--;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
/* Consume all frames pull-dequeued into the store. This is the simplest way to
|
|
|
|
* make sure we don't accidentally issue another volatile dequeue which would
|
|
|
|
* overwrite (leak) frames already in the store.
|
|
|
|
@@ -2351,6 +2398,7 @@ static void set_fq_affinity(struct dpaa2
|
|
|
|
fq = &priv->fq[i];
|
|
|
|
switch (fq->type) {
|
|
|
|
case DPAA2_RX_FQ:
|
|
|
|
+ case DPAA2_RX_ERR_FQ:
|
|
|
|
fq->target_cpu = rx_cpu;
|
|
|
|
rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
|
|
|
|
if (rx_cpu >= nr_cpu_ids)
|
|
|
|
@@ -2394,6 +2442,12 @@ static void setup_fqs(struct dpaa2_eth_p
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
|
|
|
|
+ /* We have exactly one Rx error queue per DPNI */
|
|
|
|
+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
|
|
|
|
+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
/* For each FQ, decide on which core to process incoming frames */
|
|
|
|
set_fq_affinity(priv);
|
|
|
|
}
|
2020-05-20 10:15:27 +00:00
|
|
|
@@ -2944,6 +2998,40 @@ static int setup_tx_flow(struct dpaa2_et
|
2020-04-10 02:47:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
|
|
|
|
+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
|
|
|
|
+ struct dpaa2_eth_fq *fq)
|
|
|
|
+{
|
|
|
|
+ struct device *dev = priv->net_dev->dev.parent;
|
|
|
|
+ struct dpni_queue q = { { 0 } };
|
|
|
|
+ struct dpni_queue_id qid;
|
|
|
|
+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
|
|
|
|
+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
|
|
|
|
+ if (err) {
|
|
|
|
+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
|
|
|
|
+ return err;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ fq->fqid = qid.fqid;
|
|
|
|
+
|
|
|
|
+ q.destination.id = fq->channel->dpcon_id;
|
|
|
|
+ q.destination.type = DPNI_DEST_DPCON;
|
|
|
|
+ q.destination.priority = 1;
|
|
|
|
+ q.user_context = (u64)fq;
|
|
|
|
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
|
|
|
|
+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
|
|
|
|
+ if (err) {
|
|
|
|
+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
|
|
|
|
+ return err;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
/* Supported header fields for Rx hash distribution key */
|
|
|
|
static const struct dpaa2_eth_dist_fields dist_fields[] = {
|
|
|
|
{
|
2020-05-20 10:15:27 +00:00
|
|
|
@@ -3313,7 +3401,11 @@ static int bind_dpni(struct dpaa2_eth_pr
|
2020-04-10 02:47:05 +00:00
|
|
|
/* Configure handling of error frames */
|
|
|
|
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
|
|
|
|
err_cfg.set_frame_annotation = 1;
|
|
|
|
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
|
|
|
|
+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
|
|
|
|
+#else
|
|
|
|
err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
|
|
|
|
+#endif
|
|
|
|
err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
|
|
|
|
&err_cfg);
|
|
|
|
if (err) {
|
2020-05-20 10:15:27 +00:00
|
|
|
@@ -3330,6 +3422,11 @@ static int bind_dpni(struct dpaa2_eth_pr
|
2020-04-10 02:47:05 +00:00
|
|
|
case DPAA2_TX_CONF_FQ:
|
|
|
|
err = setup_tx_flow(priv, &priv->fq[i]);
|
|
|
|
break;
|
|
|
|
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
|
|
|
|
+ case DPAA2_RX_ERR_FQ:
|
|
|
|
+ err = setup_rx_err_flow(priv, &priv->fq[i]);
|
|
|
|
+ break;
|
|
|
|
+#endif
|
|
|
|
default:
|
|
|
|
dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
|
|
|
|
return -EINVAL;
|
|
|
|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
|
|
|
|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
|
|
|
|
@@ -318,8 +318,10 @@ struct dpaa2_eth_ch_stats {
|
|
|
|
#define DPAA2_ETH_MAX_RX_QUEUES \
|
|
|
|
(DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
|
|
|
|
#define DPAA2_ETH_MAX_TX_QUEUES 16
|
|
|
|
+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
|
|
|
|
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
|
|
|
|
- DPAA2_ETH_MAX_TX_QUEUES)
|
|
|
|
+ DPAA2_ETH_MAX_TX_QUEUES + \
|
|
|
|
+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
|
|
|
|
#define DPAA2_ETH_MAX_NETDEV_QUEUES \
|
|
|
|
(DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
|
|
|
|
|
|
|
|
@@ -328,6 +330,7 @@ struct dpaa2_eth_ch_stats {
|
|
|
|
enum dpaa2_eth_fq_type {
|
|
|
|
DPAA2_RX_FQ = 0,
|
|
|
|
DPAA2_TX_CONF_FQ,
|
|
|
|
+ DPAA2_RX_ERR_FQ
|
|
|
|
};
|
|
|
|
|
|
|
|
struct dpaa2_eth_priv;
|