2023-03-20 17:51:06 +00:00
|
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
|
|
Date: Mon, 20 Mar 2023 11:44:30 +0100
|
|
|
|
Subject: [PATCH] net: ethernet: mtk_eth_soc: add code for offloading flows
|
|
|
|
from wlan devices
|
|
|
|
|
|
|
|
WED version 2 (on MT7986 and later) can offload flows originating from wireless
|
|
|
|
devices. In order to make that work, ndo_setup_tc needs to be implemented on
|
|
|
|
the netdevs. This adds the required code to offload flows coming in from WED,
|
|
|
|
while keeping track of the incoming wed index used for selecting the correct
|
|
|
|
PPE device.
|
|
|
|
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
|
|
---
|
|
|
|
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
|
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
2023-03-27 16:50:54 +00:00
|
|
|
@@ -1364,6 +1364,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
|
2023-03-20 17:51:06 +00:00
|
|
|
int mtk_eth_offload_init(struct mtk_eth *eth);
|
|
|
|
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
|
|
|
void *type_data);
|
|
|
|
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
|
|
|
|
+ int ppe_index);
|
|
|
|
+void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
|
|
|
|
void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
|
|
|
|
|
|
|
|
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
|
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
|
|
|
@@ -235,7 +235,8 @@ out:
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
-mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
|
|
|
|
+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
|
|
|
|
+ int ppe_index)
|
|
|
|
{
|
|
|
|
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
|
|
|
struct flow_action_entry *act;
|
|
|
|
@@ -452,6 +453,7 @@ mtk_flow_offload_replace(struct mtk_eth
|
|
|
|
entry->cookie = f->cookie;
|
|
|
|
memcpy(&entry->data, &foe, sizeof(entry->data));
|
|
|
|
entry->wed_index = wed_index;
|
|
|
|
+ entry->ppe_index = ppe_index;
|
|
|
|
|
|
|
|
err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
|
|
|
|
if (err < 0)
|
|
|
|
@@ -520,25 +522,15 @@ mtk_flow_offload_stats(struct mtk_eth *e
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(mtk_flow_offload_mutex);
|
|
|
|
|
|
|
|
-static int
|
|
|
|
-mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
|
|
|
|
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
|
|
|
|
+ int ppe_index)
|
|
|
|
{
|
|
|
|
- struct flow_cls_offload *cls = type_data;
|
|
|
|
- struct net_device *dev = cb_priv;
|
|
|
|
- struct mtk_mac *mac = netdev_priv(dev);
|
|
|
|
- struct mtk_eth *eth = mac->hw;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
- if (!tc_can_offload(dev))
|
|
|
|
- return -EOPNOTSUPP;
|
|
|
|
-
|
|
|
|
- if (type != TC_SETUP_CLSFLOWER)
|
|
|
|
- return -EOPNOTSUPP;
|
|
|
|
-
|
|
|
|
mutex_lock(&mtk_flow_offload_mutex);
|
|
|
|
switch (cls->command) {
|
|
|
|
case FLOW_CLS_REPLACE:
|
|
|
|
- err = mtk_flow_offload_replace(eth, cls);
|
|
|
|
+ err = mtk_flow_offload_replace(eth, cls, ppe_index);
|
|
|
|
break;
|
|
|
|
case FLOW_CLS_DESTROY:
|
|
|
|
err = mtk_flow_offload_destroy(eth, cls);
|
|
|
|
@@ -556,6 +548,23 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
|
|
|
|
+{
|
|
|
|
+ struct flow_cls_offload *cls = type_data;
|
|
|
|
+ struct net_device *dev = cb_priv;
|
|
|
|
+ struct mtk_mac *mac = netdev_priv(dev);
|
|
|
|
+ struct mtk_eth *eth = mac->hw;
|
|
|
|
+
|
|
|
|
+ if (!tc_can_offload(dev))
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ if (type != TC_SETUP_CLSFLOWER)
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ return mtk_flow_offload_cmd(eth, cls, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
|
|
|
|
{
|
|
|
|
struct mtk_mac *mac = netdev_priv(dev);
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
|
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
|
|
|
@@ -13,6 +13,8 @@
|
|
|
|
#include <linux/mfd/syscon.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/soc/mediatek/mtk_wed.h>
|
|
|
|
+#include <net/flow_offload.h>
|
|
|
|
+#include <net/pkt_cls.h>
|
|
|
|
#include "mtk_eth_soc.h"
|
|
|
|
#include "mtk_wed_regs.h"
|
|
|
|
#include "mtk_wed.h"
|
|
|
|
@@ -41,6 +43,11 @@
|
|
|
|
static struct mtk_wed_hw *hw_list[2];
|
|
|
|
static DEFINE_MUTEX(hw_lock);
|
|
|
|
|
|
|
|
+struct mtk_wed_flow_block_priv {
|
|
|
|
+ struct mtk_wed_hw *hw;
|
|
|
|
+ struct net_device *dev;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
static void
|
|
|
|
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
|
|
|
|
{
|
|
|
|
@@ -1752,6 +1759,99 @@ out:
|
|
|
|
mutex_unlock(&hw_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static int
|
|
|
|
+mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
|
|
|
|
+{
|
|
|
|
+ struct mtk_wed_flow_block_priv *priv = cb_priv;
|
|
|
|
+ struct flow_cls_offload *cls = type_data;
|
|
|
|
+ struct mtk_wed_hw *hw = priv->hw;
|
|
|
|
+
|
|
|
|
+ if (!tc_can_offload(priv->dev))
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ if (type != TC_SETUP_CLSFLOWER)
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
|
|
|
|
+ struct flow_block_offload *f)
|
|
|
|
+{
|
|
|
|
+ struct mtk_wed_flow_block_priv *priv;
|
|
|
|
+ static LIST_HEAD(block_cb_list);
|
|
|
|
+ struct flow_block_cb *block_cb;
|
|
|
|
+ struct mtk_eth *eth = hw->eth;
|
|
|
|
+ flow_setup_cb_t *cb;
|
|
|
|
+
|
|
|
|
+ if (!eth->soc->offload_version)
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ cb = mtk_wed_setup_tc_block_cb;
|
|
|
|
+ f->driver_block_list = &block_cb_list;
|
|
|
|
+
|
|
|
|
+ switch (f->command) {
|
|
|
|
+ case FLOW_BLOCK_BIND:
|
|
|
|
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
|
|
|
|
+ if (block_cb) {
|
|
|
|
+ flow_block_cb_incref(block_cb);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
+ if (!priv)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ priv->hw = hw;
|
|
|
|
+ priv->dev = dev;
|
|
|
|
+ block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
|
|
|
|
+ if (IS_ERR(block_cb)) {
|
|
|
|
+ kfree(priv);
|
|
|
|
+ return PTR_ERR(block_cb);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ flow_block_cb_incref(block_cb);
|
|
|
|
+ flow_block_cb_add(block_cb, f);
|
|
|
|
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
|
|
|
|
+ return 0;
|
|
|
|
+ case FLOW_BLOCK_UNBIND:
|
|
|
|
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
|
|
|
|
+ if (!block_cb)
|
|
|
|
+ return -ENOENT;
|
|
|
|
+
|
|
|
|
+ if (!flow_block_cb_decref(block_cb)) {
|
|
|
|
+ flow_block_cb_remove(block_cb, f);
|
|
|
|
+ list_del(&block_cb->driver_list);
|
|
|
|
+ kfree(block_cb->cb_priv);
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+ default:
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
|
|
|
|
+ enum tc_setup_type type, void *type_data)
|
|
|
|
+{
|
|
|
|
+ struct mtk_wed_hw *hw = wed->hw;
|
|
|
|
+
|
|
|
|
+ if (hw->version < 2)
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ switch (type) {
|
|
|
|
+ case TC_SETUP_BLOCK:
|
|
|
|
+ case TC_SETUP_FT:
|
|
|
|
+ return mtk_wed_setup_tc_block(hw, dev, type_data);
|
|
|
|
+ default:
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
|
|
|
|
void __iomem *wdma, phys_addr_t wdma_phy,
|
|
|
|
int index)
|
|
|
|
@@ -1771,6 +1871,7 @@ void mtk_wed_add_hw(struct device_node *
|
|
|
|
.irq_set_mask = mtk_wed_irq_set_mask,
|
|
|
|
.detach = mtk_wed_detach,
|
|
|
|
.ppe_check = mtk_wed_ppe_check,
|
|
|
|
+ .setup_tc = mtk_wed_setup_tc,
|
|
|
|
};
|
|
|
|
struct device_node *eth_np = eth->dev->of_node;
|
|
|
|
struct platform_device *pdev;
|
|
|
|
--- a/include/linux/soc/mediatek/mtk_wed.h
|
|
|
|
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
|
|
|
@@ -6,6 +6,7 @@
|
|
|
|
#include <linux/regmap.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
+#include <linux/netdevice.h>
|
|
|
|
|
|
|
|
#define MTK_WED_TX_QUEUES 2
|
|
|
|
#define MTK_WED_RX_QUEUES 2
|
|
|
|
@@ -180,6 +181,8 @@ struct mtk_wed_ops {
|
|
|
|
|
|
|
|
u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
|
|
|
|
void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
|
|
|
|
+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
|
|
|
|
+ enum tc_setup_type type, void *type_data);
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
|
|
|
|
@@ -238,6 +241,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
|
|
|
|
(_dev)->ops->msg_update(_dev, _id, _msg, _len)
|
|
|
|
#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
|
|
|
|
#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
|
|
|
|
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
|
|
|
|
+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
|
|
|
|
#else
|
|
|
|
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
|
|
|
|
{
|
|
|
|
@@ -256,6 +261,7 @@ static inline bool mtk_wed_device_active
|
|
|
|
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
|
|
|
|
#define mtk_wed_device_stop(_dev) do {} while (0)
|
|
|
|
#define mtk_wed_device_dma_reset(_dev) do {} while (0)
|
|
|
|
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|