mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-23 23:42:43 +00:00
kernel: backport upstream mtk_eth_soc patches
Includes MT7986 ethernet support Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
0e0058a870
commit
7676808434
@ -0,0 +1,56 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Fri, 8 Apr 2022 10:59:45 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc/wed: fix sparse endian warnings
|
||||
|
||||
Descriptor fields are little-endian
|
||||
|
||||
Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)")
|
||||
Reported-by: kernel test robot <lkp@intel.com>
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -144,16 +144,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi
|
||||
|
||||
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
|
||||
u32 txd_size;
|
||||
+ u32 ctrl;
|
||||
|
||||
txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
|
||||
|
||||
- desc->buf0 = buf_phys;
|
||||
- desc->buf1 = buf_phys + txd_size;
|
||||
- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
|
||||
- txd_size) |
|
||||
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
|
||||
- MTK_WED_BUF_SIZE - txd_size) |
|
||||
- MTK_WDMA_DESC_CTRL_LAST_SEG1;
|
||||
+ desc->buf0 = cpu_to_le32(buf_phys);
|
||||
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
|
||||
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
|
||||
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
|
||||
+ MTK_WED_BUF_SIZE - txd_size) |
|
||||
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
|
||||
+ desc->ctrl = cpu_to_le32(ctrl);
|
||||
desc->info = 0;
|
||||
desc++;
|
||||
|
||||
@@ -184,12 +185,14 @@ mtk_wed_free_buffer(struct mtk_wed_devic
|
||||
|
||||
for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
|
||||
void *page = page_list[page_idx++];
|
||||
+ dma_addr_t buf_addr;
|
||||
|
||||
if (!page)
|
||||
break;
|
||||
|
||||
- dma_unmap_page(dev->hw->dev, desc[i].buf0,
|
||||
- PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
+ buf_addr = le32_to_cpu(desc[i].buf0);
|
||||
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
|
||||
+ DMA_BIDIRECTIONAL);
|
||||
__free_page(page);
|
||||
}
|
||||
|
@ -0,0 +1,25 @@
|
||||
From: Yang Yingliang <yangyingliang@huawei.com>
|
||||
Date: Fri, 8 Apr 2022 11:22:46 +0800
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix return value check in
|
||||
mtk_wed_add_hw()
|
||||
|
||||
If syscon_regmap_lookup_by_phandle() fails, it never return NULL pointer,
|
||||
change the check to IS_ERR().
|
||||
|
||||
Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)")
|
||||
Reported-by: Hulk Robot <hulkci@huawei.com>
|
||||
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -816,7 +816,7 @@ void mtk_wed_add_hw(struct device_node *
|
||||
return;
|
||||
|
||||
regs = syscon_regmap_lookup_by_phandle(np, NULL);
|
||||
- if (!regs)
|
||||
+ if (IS_ERR(regs))
|
||||
return;
|
||||
|
||||
rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
|
@ -0,0 +1,35 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Mon, 11 Apr 2022 12:13:25 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: use standard property for
|
||||
cci-control-port
|
||||
|
||||
Rely on standard cci-control-port property to identify CCI port
|
||||
reference.
|
||||
Update mt7622 dts binding.
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
|
||||
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
|
||||
@@ -962,7 +962,7 @@
|
||||
power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
|
||||
mediatek,ethsys = <ðsys>;
|
||||
mediatek,sgmiisys = <&sgmiisys>;
|
||||
- mediatek,cci-control = <&cci_control2>;
|
||||
+ cci-control-port = <&cci_control2>;
|
||||
mediatek,wed = <&wed0>, <&wed1>;
|
||||
mediatek,pcie-mirror = <&pcie_mirror>;
|
||||
mediatek,hifsys = <&hifsys>;
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3165,7 +3165,7 @@ static int mtk_probe(struct platform_dev
|
||||
struct regmap *cci;
|
||||
|
||||
cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
|
||||
- "mediatek,cci-control");
|
||||
+ "cci-control-port");
|
||||
/* enable CPU/bus coherency */
|
||||
if (!IS_ERR(cci))
|
||||
regmap_write(cci, 0, 3);
|
@ -0,0 +1,33 @@
|
||||
From: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Date: Tue, 12 Apr 2022 12:24:19 +0300
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: use after free in
|
||||
__mtk_ppe_check_skb()
|
||||
|
||||
The __mtk_foe_entry_clear() function frees "entry" so we have to use
|
||||
the _safe() version of hlist_for_each_entry() to prevent a use after
|
||||
free.
|
||||
|
||||
Fixes: 33fc42de3327 ("net: ethernet: mtk_eth_soc: support creating mac address based offload entries")
|
||||
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
@@ -600,6 +600,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe
|
||||
struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
|
||||
struct mtk_flow_entry *entry;
|
||||
struct mtk_foe_bridge key = {};
|
||||
+ struct hlist_node *n;
|
||||
struct ethhdr *eh;
|
||||
bool found = false;
|
||||
u8 *tag;
|
||||
@@ -609,7 +610,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe
|
||||
if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
|
||||
goto out;
|
||||
|
||||
- hlist_for_each_entry(entry, head, list) {
|
||||
+ hlist_for_each_entry_safe(entry, n, head, list) {
|
||||
if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
|
||||
if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
|
||||
MTK_FOE_STATE_BIND))
|
@ -0,0 +1,22 @@
|
||||
From: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Date: Thu, 21 Apr 2022 18:49:02 +0300
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: add check for allocation failure
|
||||
|
||||
Check if the kzalloc() failed.
|
||||
|
||||
Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)")
|
||||
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -827,6 +827,8 @@ void mtk_wed_add_hw(struct device_node *
|
||||
goto unlock;
|
||||
|
||||
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
|
||||
+ if (!hw)
|
||||
+ goto unlock;
|
||||
hw->node = np;
|
||||
hw->regs = regs;
|
||||
hw->eth = eth;
|
@ -0,0 +1,26 @@
|
||||
From: Jakub Kicinski <kuba@kernel.org>
|
||||
Date: Fri, 20 May 2022 12:56:03 -0700
|
||||
Subject: [PATCH] eth: mtk_eth_soc: silence the GCC 12 array-bounds warning
|
||||
|
||||
GCC 12 gets upset because in mtk_foe_entry_commit_subflow()
|
||||
this driver allocates a partial structure. The writes are
|
||||
within bounds.
|
||||
|
||||
Silence these warnings for now, our build bot runs GCC 12
|
||||
so we won't allow any new instances.
|
||||
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/Makefile
|
||||
+++ b/drivers/net/ethernet/mediatek/Makefile
|
||||
@@ -11,3 +11,8 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) +
|
||||
endif
|
||||
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
|
||||
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
|
||||
+
|
||||
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
|
||||
+ifndef KBUILD_EXTRA_WARN
|
||||
+CFLAGS_mtk_ppe.o += -Wno-array-bounds
|
||||
+endif
|
@ -0,0 +1,52 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:26 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on GFP_KERNEL for
|
||||
dma_alloc_coherent whenever possible
|
||||
|
||||
Rely on GFP_KERNEL for dma descriptors mappings in mtk_tx_alloc(),
|
||||
mtk_rx_alloc() and mtk_init_fq_dma() since they are run in non-irq
|
||||
context.
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -834,7 +834,7 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
|
||||
cnt * sizeof(struct mtk_tx_dma),
|
||||
ð->phy_scratch_ring,
|
||||
- GFP_ATOMIC);
|
||||
+ GFP_KERNEL);
|
||||
if (unlikely(!eth->scratch_ring))
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1609,7 +1609,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
goto no_tx_mem;
|
||||
|
||||
ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
||||
- &ring->phys, GFP_ATOMIC);
|
||||
+ &ring->phys, GFP_KERNEL);
|
||||
if (!ring->dma)
|
||||
goto no_tx_mem;
|
||||
|
||||
@@ -1627,8 +1627,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
*/
|
||||
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
||||
- &ring->phys_pdma,
|
||||
- GFP_ATOMIC);
|
||||
+ &ring->phys_pdma, GFP_KERNEL);
|
||||
if (!ring->dma_pdma)
|
||||
goto no_tx_mem;
|
||||
|
||||
@@ -1740,7 +1739,7 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
|
||||
ring->dma = dma_alloc_coherent(eth->dma_dev,
|
||||
rx_dma_size * sizeof(*ring->dma),
|
||||
- &ring->phys, GFP_ATOMIC);
|
||||
+ &ring->phys, GFP_KERNEL);
|
||||
if (!ring->dma)
|
||||
return -ENOMEM;
|
||||
|
@ -0,0 +1,206 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:27 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: move tx dma desc configuration in
|
||||
mtk_tx_set_dma_desc
|
||||
|
||||
Move tx dma descriptor configuration in mtk_tx_set_dma_desc routine.
|
||||
This is a preliminary patch to introduce mt7986 ethernet support since
|
||||
it relies on a different tx dma descriptor layout.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -960,18 +960,51 @@ static void setup_tx_buf(struct mtk_eth
|
||||
}
|
||||
}
|
||||
|
||||
+static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc,
|
||||
+ struct mtk_tx_dma_desc_info *info)
|
||||
+{
|
||||
+ struct mtk_mac *mac = netdev_priv(dev);
|
||||
+ u32 data;
|
||||
+
|
||||
+ WRITE_ONCE(desc->txd1, info->addr);
|
||||
+
|
||||
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
|
||||
+ if (info->last)
|
||||
+ data |= TX_DMA_LS0;
|
||||
+ WRITE_ONCE(desc->txd3, data);
|
||||
+
|
||||
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
|
||||
+ if (info->first) {
|
||||
+ if (info->gso)
|
||||
+ data |= TX_DMA_TSO;
|
||||
+ /* tx checksum offload */
|
||||
+ if (info->csum)
|
||||
+ data |= TX_DMA_CHKSUM;
|
||||
+ /* vlan header offload */
|
||||
+ if (info->vlan)
|
||||
+ data |= TX_DMA_INS_VLAN | info->vlan_tci;
|
||||
+ }
|
||||
+ WRITE_ONCE(desc->txd4, data);
|
||||
+}
|
||||
+
|
||||
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
||||
int tx_num, struct mtk_tx_ring *ring, bool gso)
|
||||
{
|
||||
+ struct mtk_tx_dma_desc_info txd_info = {
|
||||
+ .size = skb_headlen(skb),
|
||||
+ .gso = gso,
|
||||
+ .csum = skb->ip_summed == CHECKSUM_PARTIAL,
|
||||
+ .vlan = skb_vlan_tag_present(skb),
|
||||
+ .vlan_tci = skb_vlan_tag_get(skb),
|
||||
+ .first = true,
|
||||
+ .last = !skb_is_nonlinear(skb),
|
||||
+ };
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
struct mtk_tx_dma *itxd, *txd;
|
||||
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
|
||||
struct mtk_tx_buf *itx_buf, *tx_buf;
|
||||
- dma_addr_t mapped_addr;
|
||||
- unsigned int nr_frags;
|
||||
int i, n_desc = 1;
|
||||
- u32 txd4 = 0, fport;
|
||||
int k = 0;
|
||||
|
||||
itxd = ring->next_free;
|
||||
@@ -979,49 +1012,32 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
if (itxd == ring->last_free)
|
||||
return -ENOMEM;
|
||||
|
||||
- /* set the forward port */
|
||||
- fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
|
||||
- txd4 |= fport;
|
||||
-
|
||||
itx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
||||
memset(itx_buf, 0, sizeof(*itx_buf));
|
||||
|
||||
- if (gso)
|
||||
- txd4 |= TX_DMA_TSO;
|
||||
-
|
||||
- /* TX Checksum offload */
|
||||
- if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
- txd4 |= TX_DMA_CHKSUM;
|
||||
-
|
||||
- /* VLAN header offload */
|
||||
- if (skb_vlan_tag_present(skb))
|
||||
- txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
|
||||
-
|
||||
- mapped_addr = dma_map_single(eth->dma_dev, skb->data,
|
||||
- skb_headlen(skb), DMA_TO_DEVICE);
|
||||
- if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
|
||||
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
|
||||
+ DMA_TO_DEVICE);
|
||||
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
- WRITE_ONCE(itxd->txd1, mapped_addr);
|
||||
+ mtk_tx_set_dma_desc(dev, itxd, &txd_info);
|
||||
+
|
||||
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
|
||||
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
|
||||
MTK_TX_FLAGS_FPORT1;
|
||||
- setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
|
||||
+ setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
|
||||
k++);
|
||||
|
||||
/* TX SG offload */
|
||||
txd = itxd;
|
||||
txd_pdma = qdma_to_pdma(ring, txd);
|
||||
- nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
|
||||
- for (i = 0; i < nr_frags; i++) {
|
||||
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
unsigned int offset = 0;
|
||||
int frag_size = skb_frag_size(frag);
|
||||
|
||||
while (frag_size) {
|
||||
- bool last_frag = false;
|
||||
- unsigned int frag_map_size;
|
||||
bool new_desc = true;
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
|
||||
@@ -1036,23 +1052,17 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
new_desc = false;
|
||||
}
|
||||
|
||||
-
|
||||
- frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
|
||||
- mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
|
||||
- frag_map_size,
|
||||
- DMA_TO_DEVICE);
|
||||
- if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
|
||||
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
|
||||
+ txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
|
||||
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
|
||||
+ !(frag_size - txd_info.size);
|
||||
+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
|
||||
+ offset, txd_info.size,
|
||||
+ DMA_TO_DEVICE);
|
||||
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
|
||||
goto err_dma;
|
||||
|
||||
- if (i == nr_frags - 1 &&
|
||||
- (frag_size - frag_map_size) == 0)
|
||||
- last_frag = true;
|
||||
-
|
||||
- WRITE_ONCE(txd->txd1, mapped_addr);
|
||||
- WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
|
||||
- TX_DMA_PLEN0(frag_map_size) |
|
||||
- last_frag * TX_DMA_LS0));
|
||||
- WRITE_ONCE(txd->txd4, fport);
|
||||
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
|
||||
|
||||
tx_buf = mtk_desc_to_tx_buf(ring, txd);
|
||||
if (new_desc)
|
||||
@@ -1062,20 +1072,17 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
|
||||
MTK_TX_FLAGS_FPORT1;
|
||||
|
||||
- setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
|
||||
- frag_map_size, k++);
|
||||
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
|
||||
+ txd_info.size, k++);
|
||||
|
||||
- frag_size -= frag_map_size;
|
||||
- offset += frag_map_size;
|
||||
+ frag_size -= txd_info.size;
|
||||
+ offset += txd_info.size;
|
||||
}
|
||||
}
|
||||
|
||||
/* store skb to cleanup */
|
||||
itx_buf->skb = skb;
|
||||
|
||||
- WRITE_ONCE(itxd->txd4, txd4);
|
||||
- WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
|
||||
- (!nr_frags * TX_DMA_LS0)));
|
||||
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
if (k & 0x1)
|
||||
txd_pdma->txd2 |= TX_DMA_LS0;
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -842,6 +842,17 @@ enum mkt_eth_capabilities {
|
||||
MTK_MUX_U3_GMAC2_TO_QPHY | \
|
||||
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
|
||||
|
||||
+struct mtk_tx_dma_desc_info {
|
||||
+ dma_addr_t addr;
|
||||
+ u32 size;
|
||||
+ u16 vlan_tci;
|
||||
+ u8 gso:1;
|
||||
+ u8 csum:1;
|
||||
+ u8 vlan:1;
|
||||
+ u8 first:1;
|
||||
+ u8 last:1;
|
||||
+};
|
||||
+
|
||||
/* struct mtk_eth_data - This is the structure holding all differences
|
||||
* among various plaforms
|
||||
* @ana_rgc3: The offset for register ANA_RGC3 related to
|
@ -0,0 +1,167 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:28 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: add txd_size to mtk_soc_data
|
||||
|
||||
In order to remove mtk_tx_dma size dependency, introduce txd_size in
|
||||
mtk_soc_data data structure. Rely on txd_size in mtk_init_fq_dma() and
|
||||
mtk_dma_free() routines.
|
||||
This is a preliminary patch to add mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -826,20 +826,20 @@ static inline bool mtk_rx_get_desc(struc
|
||||
/* the qdma core needs scratch memory to be setup */
|
||||
static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
{
|
||||
+ const struct mtk_soc_data *soc = eth->soc;
|
||||
dma_addr_t phy_ring_tail;
|
||||
int cnt = MTK_DMA_SIZE;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
|
||||
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
|
||||
- cnt * sizeof(struct mtk_tx_dma),
|
||||
+ cnt * soc->txrx.txd_size,
|
||||
ð->phy_scratch_ring,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!eth->scratch_ring))
|
||||
return -ENOMEM;
|
||||
|
||||
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
|
||||
- GFP_KERNEL);
|
||||
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
|
||||
if (unlikely(!eth->scratch_head))
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -849,16 +849,19 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
- phy_ring_tail = eth->phy_scratch_ring +
|
||||
- (sizeof(struct mtk_tx_dma) * (cnt - 1));
|
||||
+ phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
- eth->scratch_ring[i].txd1 =
|
||||
- (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
|
||||
+ struct mtk_tx_dma *txd;
|
||||
+
|
||||
+ txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size;
|
||||
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
||||
if (i < cnt - 1)
|
||||
- eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
|
||||
- ((i + 1) * sizeof(struct mtk_tx_dma)));
|
||||
- eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
|
||||
+ txd->txd2 = eth->phy_scratch_ring +
|
||||
+ (i + 1) * soc->txrx.txd_size;
|
||||
+
|
||||
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
||||
+ txd->txd4 = 0;
|
||||
}
|
||||
|
||||
mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
|
||||
@@ -2149,6 +2152,7 @@ static int mtk_dma_init(struct mtk_eth *
|
||||
|
||||
static void mtk_dma_free(struct mtk_eth *eth)
|
||||
{
|
||||
+ const struct mtk_soc_data *soc = eth->soc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++)
|
||||
@@ -2156,9 +2160,8 @@ static void mtk_dma_free(struct mtk_eth
|
||||
netdev_reset_queue(eth->netdev[i]);
|
||||
if (eth->scratch_ring) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
- MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
|
||||
- eth->scratch_ring,
|
||||
- eth->phy_scratch_ring);
|
||||
+ MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
+ eth->scratch_ring, eth->phy_scratch_ring);
|
||||
eth->scratch_ring = NULL;
|
||||
eth->phy_scratch_ring = 0;
|
||||
}
|
||||
@@ -3368,6 +3371,9 @@ static const struct mtk_soc_data mt2701_
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7623_CLKS_BITMAP,
|
||||
.required_pctl = true,
|
||||
+ .txrx = {
|
||||
+ .txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ },
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7621_data = {
|
||||
@@ -3376,6 +3382,9 @@ static const struct mtk_soc_data mt7621_
|
||||
.required_clks = MT7621_CLKS_BITMAP,
|
||||
.required_pctl = false,
|
||||
.offload_version = 2,
|
||||
+ .txrx = {
|
||||
+ .txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ },
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7622_data = {
|
||||
@@ -3385,6 +3394,9 @@ static const struct mtk_soc_data mt7622_
|
||||
.required_clks = MT7622_CLKS_BITMAP,
|
||||
.required_pctl = false,
|
||||
.offload_version = 2,
|
||||
+ .txrx = {
|
||||
+ .txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ },
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7623_data = {
|
||||
@@ -3393,6 +3405,9 @@ static const struct mtk_soc_data mt7623_
|
||||
.required_clks = MT7623_CLKS_BITMAP,
|
||||
.required_pctl = true,
|
||||
.offload_version = 2,
|
||||
+ .txrx = {
|
||||
+ .txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ },
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7629_data = {
|
||||
@@ -3401,6 +3416,9 @@ static const struct mtk_soc_data mt7629_
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7629_CLKS_BITMAP,
|
||||
.required_pctl = false,
|
||||
+ .txrx = {
|
||||
+ .txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ },
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data rt5350_data = {
|
||||
@@ -3408,6 +3426,9 @@ static const struct mtk_soc_data rt5350_
|
||||
.hw_features = MTK_HW_FEATURES_MT7628,
|
||||
.required_clks = MT7628_CLKS_BITMAP,
|
||||
.required_pctl = false,
|
||||
+ .txrx = {
|
||||
+ .txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ },
|
||||
};
|
||||
|
||||
const struct of_device_id of_mtk_match[] = {
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -863,6 +863,7 @@ struct mtk_tx_dma_desc_info {
|
||||
* the target SoC
|
||||
* @required_pctl A bool value to show whether the SoC requires
|
||||
* the extra setup for those pins used by GMAC.
|
||||
+ * @txd_size Tx DMA descriptor size.
|
||||
*/
|
||||
struct mtk_soc_data {
|
||||
u32 ana_rgc3;
|
||||
@@ -871,6 +872,9 @@ struct mtk_soc_data {
|
||||
bool required_pctl;
|
||||
u8 offload_version;
|
||||
netdev_features_t hw_features;
|
||||
+ struct {
|
||||
+ u32 txd_size;
|
||||
+ } txrx;
|
||||
};
|
||||
|
||||
/* currently no SoC has more than 2 macs */
|
@ -0,0 +1,78 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:29 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in
|
||||
mtk_tx_alloc/mtk_tx_clean
|
||||
|
||||
This is a preliminary patch to add mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -1610,8 +1610,10 @@ static int mtk_napi_rx(struct napi_struc
|
||||
|
||||
static int mtk_tx_alloc(struct mtk_eth *eth)
|
||||
{
|
||||
+ const struct mtk_soc_data *soc = eth->soc;
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
- int i, sz = sizeof(*ring->dma);
|
||||
+ int i, sz = soc->txrx.txd_size;
|
||||
+ struct mtk_tx_dma *txd;
|
||||
|
||||
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
|
||||
GFP_KERNEL);
|
||||
@@ -1627,8 +1629,10 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
int next = (i + 1) % MTK_DMA_SIZE;
|
||||
u32 next_ptr = ring->phys + next * sz;
|
||||
|
||||
- ring->dma[i].txd2 = next_ptr;
|
||||
- ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
||||
+ txd = (void *)ring->dma + i * sz;
|
||||
+ txd->txd2 = next_ptr;
|
||||
+ txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
||||
+ txd->txd4 = 0;
|
||||
}
|
||||
|
||||
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
|
||||
@@ -1650,7 +1654,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
ring->dma_size = MTK_DMA_SIZE;
|
||||
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
|
||||
ring->next_free = &ring->dma[0];
|
||||
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
|
||||
+ ring->last_free = (void *)txd;
|
||||
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
|
||||
ring->thresh = MAX_SKB_FRAGS;
|
||||
|
||||
@@ -1683,6 +1687,7 @@ no_tx_mem:
|
||||
|
||||
static void mtk_tx_clean(struct mtk_eth *eth)
|
||||
{
|
||||
+ const struct mtk_soc_data *soc = eth->soc;
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
int i;
|
||||
|
||||
@@ -1695,17 +1700,15 @@ static void mtk_tx_clean(struct mtk_eth
|
||||
|
||||
if (ring->dma) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
- MTK_DMA_SIZE * sizeof(*ring->dma),
|
||||
- ring->dma,
|
||||
- ring->phys);
|
||||
+ MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
+ ring->dma, ring->phys);
|
||||
ring->dma = NULL;
|
||||
}
|
||||
|
||||
if (ring->dma_pdma) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
- MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
|
||||
- ring->dma_pdma,
|
||||
- ring->phys_pdma);
|
||||
+ MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
+ ring->dma_pdma, ring->phys_pdma);
|
||||
ring->dma_pdma = NULL;
|
||||
}
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:30 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in
|
||||
mtk_desc_to_tx_buf
|
||||
|
||||
This is a preliminary patch to add mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -879,10 +879,11 @@ static inline void *mtk_qdma_phys_to_vir
|
||||
return ret + (desc - ring->phys);
|
||||
}
|
||||
|
||||
-static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
||||
- struct mtk_tx_dma *txd)
|
||||
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
||||
+ struct mtk_tx_dma *txd,
|
||||
+ u32 txd_size)
|
||||
{
|
||||
- int idx = txd - ring->dma;
|
||||
+ int idx = ((void *)txd - (void *)ring->dma) / txd_size;
|
||||
|
||||
return &ring->buf[idx];
|
||||
}
|
||||
@@ -1004,6 +1005,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
};
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
+ const struct mtk_soc_data *soc = eth->soc;
|
||||
struct mtk_tx_dma *itxd, *txd;
|
||||
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
|
||||
struct mtk_tx_buf *itx_buf, *tx_buf;
|
||||
@@ -1015,7 +1017,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
if (itxd == ring->last_free)
|
||||
return -ENOMEM;
|
||||
|
||||
- itx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
||||
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
|
||||
memset(itx_buf, 0, sizeof(*itx_buf));
|
||||
|
||||
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
|
||||
@@ -1043,7 +1045,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
while (frag_size) {
|
||||
bool new_desc = true;
|
||||
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
|
||||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
|
||||
(i & 0x1)) {
|
||||
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
|
||||
txd_pdma = qdma_to_pdma(ring, txd);
|
||||
@@ -1067,7 +1069,8 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
|
||||
mtk_tx_set_dma_desc(dev, txd, &txd_info);
|
||||
|
||||
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
|
||||
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
|
||||
+ soc->txrx.txd_size);
|
||||
if (new_desc)
|
||||
memset(tx_buf, 0, sizeof(*tx_buf));
|
||||
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
|
||||
@@ -1086,7 +1089,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
/* store skb to cleanup */
|
||||
itx_buf->skb = skb;
|
||||
|
||||
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
if (k & 0x1)
|
||||
txd_pdma->txd2 |= TX_DMA_LS0;
|
||||
else
|
||||
@@ -1104,7 +1107,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
*/
|
||||
wmb();
|
||||
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
|
||||
!netdev_xmit_more())
|
||||
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
|
||||
@@ -1118,13 +1121,13 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
|
||||
err_dma:
|
||||
do {
|
||||
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
||||
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
|
||||
|
||||
/* unmap dma */
|
||||
mtk_tx_unmap(eth, tx_buf, false);
|
||||
|
||||
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
||||
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
||||
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
|
||||
|
||||
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
|
||||
@@ -1435,7 +1438,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
||||
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
|
||||
break;
|
||||
|
||||
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
|
||||
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
|
||||
+ eth->soc->txrx.txd_size);
|
||||
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
|
||||
mac = 1;
|
||||
|
@ -0,0 +1,39 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:31 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in txd_to_idx
|
||||
|
||||
This is a preliminary patch to add mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -894,9 +894,10 @@ static struct mtk_tx_dma *qdma_to_pdma(s
|
||||
return ring->dma_pdma - ring->dma + dma;
|
||||
}
|
||||
|
||||
-static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
|
||||
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma,
|
||||
+ u32 txd_size)
|
||||
{
|
||||
- return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
|
||||
+ return ((void *)dma - (void *)ring->dma) / txd_size;
|
||||
}
|
||||
|
||||
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
|
||||
@@ -1112,8 +1113,10 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
!netdev_xmit_more())
|
||||
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
|
||||
} else {
|
||||
- int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
|
||||
- ring->dma_size);
|
||||
+ int next_idx;
|
||||
+
|
||||
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
|
||||
+ ring->dma_size);
|
||||
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
|
||||
}
|
||||
|
@ -0,0 +1,102 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:32 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: add rxd_size to mtk_soc_data
|
||||
|
||||
Similar to tx counterpart, introduce rxd_size in mtk_soc_data data
|
||||
structure.
|
||||
This is a preliminary patch to add mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -1758,7 +1758,7 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
}
|
||||
|
||||
ring->dma = dma_alloc_coherent(eth->dma_dev,
|
||||
- rx_dma_size * sizeof(*ring->dma),
|
||||
+ rx_dma_size * eth->soc->txrx.rxd_size,
|
||||
&ring->phys, GFP_KERNEL);
|
||||
if (!ring->dma)
|
||||
return -ENOMEM;
|
||||
@@ -1816,9 +1816,8 @@ static void mtk_rx_clean(struct mtk_eth
|
||||
|
||||
if (ring->dma) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
- ring->dma_size * sizeof(*ring->dma),
|
||||
- ring->dma,
|
||||
- ring->phys);
|
||||
+ ring->dma_size * eth->soc->txrx.rxd_size,
|
||||
+ ring->dma, ring->phys);
|
||||
ring->dma = NULL;
|
||||
}
|
||||
}
|
||||
@@ -3383,6 +3382,7 @@ static const struct mtk_soc_data mt2701_
|
||||
.required_pctl = true,
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ .rxd_size = sizeof(struct mtk_rx_dma),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3394,6 +3394,7 @@ static const struct mtk_soc_data mt7621_
|
||||
.offload_version = 2,
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ .rxd_size = sizeof(struct mtk_rx_dma),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3406,6 +3407,7 @@ static const struct mtk_soc_data mt7622_
|
||||
.offload_version = 2,
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ .rxd_size = sizeof(struct mtk_rx_dma),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3417,6 +3419,7 @@ static const struct mtk_soc_data mt7623_
|
||||
.offload_version = 2,
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ .rxd_size = sizeof(struct mtk_rx_dma),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3428,6 +3431,7 @@ static const struct mtk_soc_data mt7629_
|
||||
.required_pctl = false,
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ .rxd_size = sizeof(struct mtk_rx_dma),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3438,6 +3442,7 @@ static const struct mtk_soc_data rt5350_
|
||||
.required_pctl = false,
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
+ .rxd_size = sizeof(struct mtk_rx_dma),
|
||||
},
|
||||
};
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -864,6 +864,7 @@ struct mtk_tx_dma_desc_info {
|
||||
* @required_pctl A bool value to show whether the SoC requires
|
||||
* the extra setup for those pins used by GMAC.
|
||||
* @txd_size Tx DMA descriptor size.
|
||||
+ * @rxd_size Rx DMA descriptor size.
|
||||
*/
|
||||
struct mtk_soc_data {
|
||||
u32 ana_rgc3;
|
||||
@@ -874,6 +875,7 @@ struct mtk_soc_data {
|
||||
netdev_features_t hw_features;
|
||||
struct {
|
||||
u32 txd_size;
|
||||
+ u32 rxd_size;
|
||||
} txrx;
|
||||
};
|
||||
|
@ -0,0 +1,46 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:33 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size field in
|
||||
mtk_poll_tx/mtk_poll_rx
|
||||
|
||||
This is a preliminary to ad mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -1253,9 +1253,12 @@ static struct mtk_rx_ring *mtk_get_rx_ri
|
||||
return ð->rx_ring[0];
|
||||
|
||||
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
|
||||
+ struct mtk_rx_dma *rxd;
|
||||
+
|
||||
ring = ð->rx_ring[i];
|
||||
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
||||
- if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
|
||||
+ rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
|
||||
+ if (rxd->rxd2 & RX_DMA_DONE) {
|
||||
ring->calc_idx_update = true;
|
||||
return ring;
|
||||
}
|
||||
@@ -1306,7 +1309,7 @@ static int mtk_poll_rx(struct napi_struc
|
||||
goto rx_done;
|
||||
|
||||
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
||||
- rxd = &ring->dma[idx];
|
||||
+ rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
|
||||
data = ring->data[idx];
|
||||
|
||||
if (!mtk_rx_get_desc(&trxd, rxd))
|
||||
@@ -1495,7 +1498,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
|
||||
|
||||
mtk_tx_unmap(eth, tx_buf, true);
|
||||
|
||||
- desc = &ring->dma[cpu];
|
||||
+ desc = (void *)ring->dma + cpu * eth->soc->txrx.txd_size;
|
||||
ring->last_free = desc;
|
||||
atomic_inc(&ring->free_count);
|
||||
|
@ -0,0 +1,68 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:34 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on rxd_size field in
|
||||
mtk_rx_alloc/mtk_rx_clean
|
||||
|
||||
Remove mtk_rx_dma structure layout dependency in mtk_rx_alloc/mtk_rx_clean.
|
||||
Initialize to 0 rxd3 and rxd4 in mtk_rx_alloc.
|
||||
This is a preliminary patch to add mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -1767,18 +1767,25 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rx_dma_size; i++) {
|
||||
+ struct mtk_rx_dma *rxd;
|
||||
+
|
||||
dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
|
||||
ring->data[i] + NET_SKB_PAD + eth->ip_align,
|
||||
ring->buf_size,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
- ring->dma[i].rxd1 = (unsigned int)dma_addr;
|
||||
+
|
||||
+ rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size;
|
||||
+ rxd->rxd1 = (unsigned int)dma_addr;
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||
- ring->dma[i].rxd2 = RX_DMA_LSO;
|
||||
+ rxd->rxd2 = RX_DMA_LSO;
|
||||
else
|
||||
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
|
||||
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
|
||||
+
|
||||
+ rxd->rxd3 = 0;
|
||||
+ rxd->rxd4 = 0;
|
||||
}
|
||||
ring->dma_size = rx_dma_size;
|
||||
ring->calc_idx_update = false;
|
||||
@@ -1803,14 +1810,17 @@ static void mtk_rx_clean(struct mtk_eth
|
||||
|
||||
if (ring->data && ring->dma) {
|
||||
for (i = 0; i < ring->dma_size; i++) {
|
||||
+ struct mtk_rx_dma *rxd;
|
||||
+
|
||||
if (!ring->data[i])
|
||||
continue;
|
||||
- if (!ring->dma[i].rxd1)
|
||||
+
|
||||
+ rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size;
|
||||
+ if (!rxd->rxd1)
|
||||
continue;
|
||||
- dma_unmap_single(eth->dma_dev,
|
||||
- ring->dma[i].rxd1,
|
||||
- ring->buf_size,
|
||||
- DMA_FROM_DEVICE);
|
||||
+
|
||||
+ dma_unmap_single(eth->dma_dev, rxd->rxd1,
|
||||
+ ring->buf_size, DMA_FROM_DEVICE);
|
||||
skb_free_frag(ring->data[i]);
|
||||
}
|
||||
kfree(ring->data);
|
@ -0,0 +1,814 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:35 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce device register map
|
||||
|
||||
Introduce reg_map structure to add the capability to support different
|
||||
register definitions. Move register definitions in mtk_regmap structure.
|
||||
This is a preliminary patch to introduce mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -34,6 +34,59 @@ MODULE_PARM_DESC(msg_level, "Message lev
|
||||
#define MTK_ETHTOOL_STAT(x) { #x, \
|
||||
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
|
||||
|
||||
+static const struct mtk_reg_map mtk_reg_map = {
|
||||
+ .tx_irq_mask = 0x1a1c,
|
||||
+ .tx_irq_status = 0x1a18,
|
||||
+ .pdma = {
|
||||
+ .rx_ptr = 0x0900,
|
||||
+ .rx_cnt_cfg = 0x0904,
|
||||
+ .pcrx_ptr = 0x0908,
|
||||
+ .glo_cfg = 0x0a04,
|
||||
+ .rst_idx = 0x0a08,
|
||||
+ .delay_irq = 0x0a0c,
|
||||
+ .irq_status = 0x0a20,
|
||||
+ .irq_mask = 0x0a28,
|
||||
+ .int_grp = 0x0a50,
|
||||
+ },
|
||||
+ .qdma = {
|
||||
+ .qtx_cfg = 0x1800,
|
||||
+ .rx_ptr = 0x1900,
|
||||
+ .rx_cnt_cfg = 0x1904,
|
||||
+ .qcrx_ptr = 0x1908,
|
||||
+ .glo_cfg = 0x1a04,
|
||||
+ .rst_idx = 0x1a08,
|
||||
+ .delay_irq = 0x1a0c,
|
||||
+ .fc_th = 0x1a10,
|
||||
+ .int_grp = 0x1a20,
|
||||
+ .hred = 0x1a44,
|
||||
+ .ctx_ptr = 0x1b00,
|
||||
+ .dtx_ptr = 0x1b04,
|
||||
+ .crx_ptr = 0x1b10,
|
||||
+ .drx_ptr = 0x1b14,
|
||||
+ .fq_head = 0x1b20,
|
||||
+ .fq_tail = 0x1b24,
|
||||
+ .fq_count = 0x1b28,
|
||||
+ .fq_blen = 0x1b2c,
|
||||
+ },
|
||||
+ .gdm1_cnt = 0x2400,
|
||||
+};
|
||||
+
|
||||
+static const struct mtk_reg_map mt7628_reg_map = {
|
||||
+ .tx_irq_mask = 0x0a28,
|
||||
+ .tx_irq_status = 0x0a20,
|
||||
+ .pdma = {
|
||||
+ .rx_ptr = 0x0900,
|
||||
+ .rx_cnt_cfg = 0x0904,
|
||||
+ .pcrx_ptr = 0x0908,
|
||||
+ .glo_cfg = 0x0a04,
|
||||
+ .rst_idx = 0x0a08,
|
||||
+ .delay_irq = 0x0a0c,
|
||||
+ .irq_status = 0x0a20,
|
||||
+ .irq_mask = 0x0a28,
|
||||
+ .int_grp = 0x0a50,
|
||||
+ },
|
||||
+};
|
||||
+
|
||||
/* strings used by ethtool */
|
||||
static const struct mtk_ethtool_stats {
|
||||
char str[ETH_GSTRING_LEN];
|
||||
@@ -618,8 +671,8 @@ static inline void mtk_tx_irq_disable(st
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(ð->tx_irq_lock, flags);
|
||||
- val = mtk_r32(eth, eth->tx_int_mask_reg);
|
||||
- mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
|
||||
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
|
||||
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
|
||||
spin_unlock_irqrestore(ð->tx_irq_lock, flags);
|
||||
}
|
||||
|
||||
@@ -629,8 +682,8 @@ static inline void mtk_tx_irq_enable(str
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(ð->tx_irq_lock, flags);
|
||||
- val = mtk_r32(eth, eth->tx_int_mask_reg);
|
||||
- mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
|
||||
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
|
||||
+ mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
|
||||
spin_unlock_irqrestore(ð->tx_irq_lock, flags);
|
||||
}
|
||||
|
||||
@@ -640,8 +693,8 @@ static inline void mtk_rx_irq_disable(st
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(ð->rx_irq_lock, flags);
|
||||
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
|
||||
- mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
|
||||
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
|
||||
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
|
||||
spin_unlock_irqrestore(ð->rx_irq_lock, flags);
|
||||
}
|
||||
|
||||
@@ -651,8 +704,8 @@ static inline void mtk_rx_irq_enable(str
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(ð->rx_irq_lock, flags);
|
||||
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
|
||||
- mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
|
||||
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
|
||||
+ mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
|
||||
spin_unlock_irqrestore(ð->rx_irq_lock, flags);
|
||||
}
|
||||
|
||||
@@ -703,39 +756,39 @@ void mtk_stats_update_mac(struct mtk_mac
|
||||
hw_stats->rx_checksum_errors +=
|
||||
mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
|
||||
} else {
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
unsigned int offs = hw_stats->reg_offset;
|
||||
u64 stats;
|
||||
|
||||
- hw_stats->rx_bytes += mtk_r32(mac->hw,
|
||||
- MTK_GDM1_RX_GBCNT_L + offs);
|
||||
- stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
|
||||
+ hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
|
||||
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
|
||||
if (stats)
|
||||
hw_stats->rx_bytes += (stats << 32);
|
||||
hw_stats->rx_packets +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
|
||||
hw_stats->rx_overflow +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
|
||||
hw_stats->rx_fcs_errors +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
|
||||
hw_stats->rx_short_errors +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
|
||||
hw_stats->rx_long_errors +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
|
||||
hw_stats->rx_checksum_errors +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
|
||||
hw_stats->rx_flow_control_packets +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
|
||||
hw_stats->tx_skip +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
|
||||
hw_stats->tx_collisions +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
|
||||
hw_stats->tx_bytes +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
|
||||
- stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
|
||||
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
|
||||
if (stats)
|
||||
hw_stats->tx_bytes += (stats << 32);
|
||||
hw_stats->tx_packets +=
|
||||
- mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
|
||||
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
|
||||
}
|
||||
|
||||
u64_stats_update_end(&hw_stats->syncp);
|
||||
@@ -864,10 +917,10 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
txd->txd4 = 0;
|
||||
}
|
||||
|
||||
- mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
|
||||
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
|
||||
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
|
||||
- mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
|
||||
+ mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
|
||||
+ mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
|
||||
+ mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
|
||||
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1111,7 +1164,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
|
||||
!netdev_xmit_more())
|
||||
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
|
||||
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
|
||||
} else {
|
||||
int next_idx;
|
||||
|
||||
@@ -1425,6 +1478,7 @@ rx_done:
|
||||
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
|
||||
unsigned int *done, unsigned int *bytes)
|
||||
{
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
struct mtk_tx_dma *desc;
|
||||
struct sk_buff *skb;
|
||||
@@ -1432,7 +1486,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
||||
u32 cpu, dma;
|
||||
|
||||
cpu = ring->last_free_ptr;
|
||||
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
|
||||
+ dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
|
||||
|
||||
desc = mtk_qdma_phys_to_virt(ring, cpu);
|
||||
|
||||
@@ -1467,7 +1521,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
||||
}
|
||||
|
||||
ring->last_free_ptr = cpu;
|
||||
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
|
||||
+ mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
|
||||
|
||||
return budget;
|
||||
}
|
||||
@@ -1560,24 +1614,25 @@ static void mtk_handle_status_irq(struct
|
||||
static int mtk_napi_tx(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
int tx_done = 0;
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
mtk_handle_status_irq(eth);
|
||||
- mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
|
||||
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
|
||||
tx_done = mtk_poll_tx(eth, budget);
|
||||
|
||||
if (unlikely(netif_msg_intr(eth))) {
|
||||
dev_info(eth->dev,
|
||||
"done tx %d, intr 0x%08x/0x%x\n", tx_done,
|
||||
- mtk_r32(eth, eth->tx_int_status_reg),
|
||||
- mtk_r32(eth, eth->tx_int_mask_reg));
|
||||
+ mtk_r32(eth, reg_map->tx_irq_status),
|
||||
+ mtk_r32(eth, reg_map->tx_irq_mask));
|
||||
}
|
||||
|
||||
if (tx_done == budget)
|
||||
return budget;
|
||||
|
||||
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
|
||||
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
|
||||
return budget;
|
||||
|
||||
if (napi_complete_done(napi, tx_done))
|
||||
@@ -1589,6 +1644,7 @@ static int mtk_napi_tx(struct napi_struc
|
||||
static int mtk_napi_rx(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
int rx_done_total = 0;
|
||||
|
||||
mtk_handle_status_irq(eth);
|
||||
@@ -1596,21 +1652,21 @@ static int mtk_napi_rx(struct napi_struc
|
||||
do {
|
||||
int rx_done;
|
||||
|
||||
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
|
||||
+ mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
|
||||
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
|
||||
rx_done_total += rx_done;
|
||||
|
||||
if (unlikely(netif_msg_intr(eth))) {
|
||||
dev_info(eth->dev,
|
||||
"done rx %d, intr 0x%08x/0x%x\n", rx_done,
|
||||
- mtk_r32(eth, MTK_PDMA_INT_STATUS),
|
||||
- mtk_r32(eth, MTK_PDMA_INT_MASK));
|
||||
+ mtk_r32(eth, reg_map->pdma.irq_status),
|
||||
+ mtk_r32(eth, reg_map->pdma.irq_mask));
|
||||
}
|
||||
|
||||
if (rx_done_total == budget)
|
||||
return budget;
|
||||
|
||||
- } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
|
||||
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
|
||||
|
||||
if (napi_complete_done(napi, rx_done_total))
|
||||
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
|
||||
@@ -1673,20 +1729,20 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
*/
|
||||
wmb();
|
||||
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
|
||||
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
|
||||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
|
||||
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
|
||||
mtk_w32(eth,
|
||||
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
|
||||
- MTK_QTX_CRX_PTR);
|
||||
- mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
|
||||
+ soc->reg_map->qdma.crx_ptr);
|
||||
+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
|
||||
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
|
||||
- MTK_QTX_CFG(0));
|
||||
+ soc->reg_map->qdma.qtx_cfg);
|
||||
} else {
|
||||
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
|
||||
mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
|
||||
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
|
||||
- mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
|
||||
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1725,6 +1781,7 @@ static void mtk_tx_clean(struct mtk_eth
|
||||
|
||||
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
{
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
struct mtk_rx_ring *ring;
|
||||
int rx_data_len, rx_dma_size;
|
||||
int i;
|
||||
@@ -1790,16 +1847,18 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
ring->dma_size = rx_dma_size;
|
||||
ring->calc_idx_update = false;
|
||||
ring->calc_idx = rx_dma_size - 1;
|
||||
- ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
|
||||
+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
|
||||
/* make sure that all changes to the dma ring are flushed before we
|
||||
* continue
|
||||
*/
|
||||
wmb();
|
||||
|
||||
- mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
|
||||
- mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
|
||||
+ mtk_w32(eth, ring->phys,
|
||||
+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
|
||||
+ mtk_w32(eth, rx_dma_size,
|
||||
+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
|
||||
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
|
||||
- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
|
||||
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2105,9 +2164,9 @@ static int mtk_dma_busy_wait(struct mtk_
|
||||
u32 val;
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
- reg = MTK_QDMA_GLO_CFG;
|
||||
+ reg = eth->soc->reg_map->qdma.glo_cfg;
|
||||
else
|
||||
- reg = MTK_PDMA_GLO_CFG;
|
||||
+ reg = eth->soc->reg_map->pdma.glo_cfg;
|
||||
|
||||
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
|
||||
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
|
||||
@@ -2165,8 +2224,8 @@ static int mtk_dma_init(struct mtk_eth *
|
||||
* automatically
|
||||
*/
|
||||
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
|
||||
- FC_THRES_MIN, MTK_QDMA_FC_THRES);
|
||||
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
|
||||
+ FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
|
||||
+ mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -2240,13 +2299,14 @@ static irqreturn_t mtk_handle_irq_tx(int
|
||||
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
|
||||
{
|
||||
struct mtk_eth *eth = _eth;
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
|
||||
- if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
|
||||
- if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
|
||||
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
|
||||
+ if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
|
||||
mtk_handle_irq_rx(irq, _eth);
|
||||
}
|
||||
- if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
|
||||
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
|
||||
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
|
||||
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
|
||||
mtk_handle_irq_tx(irq, _eth);
|
||||
}
|
||||
|
||||
@@ -2270,6 +2330,7 @@ static void mtk_poll_controller(struct n
|
||||
static int mtk_start_dma(struct mtk_eth *eth)
|
||||
{
|
||||
u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
int err;
|
||||
|
||||
err = mtk_dma_init(eth);
|
||||
@@ -2284,16 +2345,15 @@ static int mtk_start_dma(struct mtk_eth
|
||||
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
|
||||
MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
|
||||
MTK_RX_BT_32DWORDS,
|
||||
- MTK_QDMA_GLO_CFG);
|
||||
-
|
||||
+ reg_map->qdma.glo_cfg);
|
||||
mtk_w32(eth,
|
||||
MTK_RX_DMA_EN | rx_2b_offset |
|
||||
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
|
||||
- MTK_PDMA_GLO_CFG);
|
||||
+ reg_map->pdma.glo_cfg);
|
||||
} else {
|
||||
mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
|
||||
MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
|
||||
- MTK_PDMA_GLO_CFG);
|
||||
+ reg_map->pdma.glo_cfg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -2417,8 +2477,8 @@ static int mtk_stop(struct net_device *d
|
||||
cancel_work_sync(ð->tx_dim.work);
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
|
||||
- mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
|
||||
+ mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
|
||||
+ mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
|
||||
|
||||
mtk_dma_free(eth);
|
||||
|
||||
@@ -2472,6 +2532,7 @@ static void mtk_dim_rx(struct work_struc
|
||||
{
|
||||
struct dim *dim = container_of(work, struct dim, work);
|
||||
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
struct dim_cq_moder cur_profile;
|
||||
u32 val, cur;
|
||||
|
||||
@@ -2479,7 +2540,7 @@ static void mtk_dim_rx(struct work_struc
|
||||
dim->profile_ix);
|
||||
spin_lock_bh(ð->dim_lock);
|
||||
|
||||
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
|
||||
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
|
||||
val &= MTK_PDMA_DELAY_TX_MASK;
|
||||
val |= MTK_PDMA_DELAY_RX_EN;
|
||||
|
||||
@@ -2489,9 +2550,9 @@ static void mtk_dim_rx(struct work_struc
|
||||
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
|
||||
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
|
||||
|
||||
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
|
||||
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
|
||||
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
|
||||
|
||||
spin_unlock_bh(ð->dim_lock);
|
||||
|
||||
@@ -2502,6 +2563,7 @@ static void mtk_dim_tx(struct work_struc
|
||||
{
|
||||
struct dim *dim = container_of(work, struct dim, work);
|
||||
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
struct dim_cq_moder cur_profile;
|
||||
u32 val, cur;
|
||||
|
||||
@@ -2509,7 +2571,7 @@ static void mtk_dim_tx(struct work_struc
|
||||
dim->profile_ix);
|
||||
spin_lock_bh(ð->dim_lock);
|
||||
|
||||
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
|
||||
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
|
||||
val &= MTK_PDMA_DELAY_RX_MASK;
|
||||
val |= MTK_PDMA_DELAY_TX_EN;
|
||||
|
||||
@@ -2519,9 +2581,9 @@ static void mtk_dim_tx(struct work_struc
|
||||
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
|
||||
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
|
||||
|
||||
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
|
||||
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
|
||||
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
|
||||
|
||||
spin_unlock_bh(ð->dim_lock);
|
||||
|
||||
@@ -2532,6 +2594,7 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
{
|
||||
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
|
||||
ETHSYS_DMA_AG_MAP_PPE;
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
int i, val, ret;
|
||||
|
||||
if (test_and_set_bit(MTK_HW_INIT, ð->state))
|
||||
@@ -2606,10 +2669,10 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
mtk_rx_irq_disable(eth, ~0);
|
||||
|
||||
/* FE int grouping */
|
||||
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
|
||||
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
|
||||
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
|
||||
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
|
||||
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
|
||||
+ mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
|
||||
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
|
||||
+ mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
|
||||
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
|
||||
|
||||
return 0;
|
||||
@@ -3148,14 +3211,6 @@ static int mtk_probe(struct platform_dev
|
||||
if (IS_ERR(eth->base))
|
||||
return PTR_ERR(eth->base);
|
||||
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
- eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
|
||||
- eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
|
||||
- } else {
|
||||
- eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
|
||||
- eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
|
||||
- }
|
||||
-
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
|
||||
eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
|
||||
eth->ip_align = NET_IP_ALIGN;
|
||||
@@ -3389,6 +3444,7 @@ static int mtk_remove(struct platform_de
|
||||
}
|
||||
|
||||
static const struct mtk_soc_data mt2701_data = {
|
||||
+ .reg_map = &mtk_reg_map,
|
||||
.caps = MT7623_CAPS | MTK_HWLRO,
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7623_CLKS_BITMAP,
|
||||
@@ -3400,6 +3456,7 @@ static const struct mtk_soc_data mt2701_
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7621_data = {
|
||||
+ .reg_map = &mtk_reg_map,
|
||||
.caps = MT7621_CAPS,
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7621_CLKS_BITMAP,
|
||||
@@ -3412,6 +3469,7 @@ static const struct mtk_soc_data mt7621_
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7622_data = {
|
||||
+ .reg_map = &mtk_reg_map,
|
||||
.ana_rgc3 = 0x2028,
|
||||
.caps = MT7622_CAPS | MTK_HWLRO,
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
@@ -3425,6 +3483,7 @@ static const struct mtk_soc_data mt7622_
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7623_data = {
|
||||
+ .reg_map = &mtk_reg_map,
|
||||
.caps = MT7623_CAPS | MTK_HWLRO,
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7623_CLKS_BITMAP,
|
||||
@@ -3437,6 +3496,7 @@ static const struct mtk_soc_data mt7623_
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7629_data = {
|
||||
+ .reg_map = &mtk_reg_map,
|
||||
.ana_rgc3 = 0x128,
|
||||
.caps = MT7629_CAPS | MTK_HWLRO,
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
@@ -3449,6 +3509,7 @@ static const struct mtk_soc_data mt7629_
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data rt5350_data = {
|
||||
+ .reg_map = &mt7628_reg_map,
|
||||
.caps = MT7628_CAPS,
|
||||
.hw_features = MTK_HW_FEATURES_MT7628,
|
||||
.required_clks = MT7628_CLKS_BITMAP,
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -48,6 +48,8 @@
|
||||
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
|
||||
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
|
||||
|
||||
+#define MTK_QRX_OFFSET 0x10
|
||||
+
|
||||
#define MTK_MAX_RX_RING_NUM 4
|
||||
#define MTK_HW_LRO_DMA_SIZE 8
|
||||
|
||||
@@ -100,18 +102,6 @@
|
||||
/* Unicast Filter MAC Address Register - High */
|
||||
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
|
||||
|
||||
-/* PDMA RX Base Pointer Register */
|
||||
-#define MTK_PRX_BASE_PTR0 0x900
|
||||
-#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
|
||||
-
|
||||
-/* PDMA RX Maximum Count Register */
|
||||
-#define MTK_PRX_MAX_CNT0 0x904
|
||||
-#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
|
||||
-
|
||||
-/* PDMA RX CPU Pointer Register */
|
||||
-#define MTK_PRX_CRX_IDX0 0x908
|
||||
-#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
|
||||
-
|
||||
/* PDMA HW LRO Control Registers */
|
||||
#define MTK_PDMA_LRO_CTRL_DW0 0x980
|
||||
#define MTK_LRO_EN BIT(0)
|
||||
@@ -126,18 +116,19 @@
|
||||
#define MTK_ADMA_MODE BIT(15)
|
||||
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
|
||||
|
||||
-/* PDMA Global Configuration Register */
|
||||
-#define MTK_PDMA_GLO_CFG 0xa04
|
||||
+#define MTK_RX_DMA_LRO_EN BIT(8)
|
||||
#define MTK_MULTI_EN BIT(10)
|
||||
#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
|
||||
|
||||
+/* PDMA Global Configuration Register */
|
||||
+#define MTK_PDMA_LRO_SDL 0x3000
|
||||
+#define MTK_RX_CFG_SDL_OFFSET 16
|
||||
+
|
||||
/* PDMA Reset Index Register */
|
||||
-#define MTK_PDMA_RST_IDX 0xa08
|
||||
#define MTK_PST_DRX_IDX0 BIT(16)
|
||||
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
|
||||
|
||||
/* PDMA Delay Interrupt Register */
|
||||
-#define MTK_PDMA_DELAY_INT 0xa0c
|
||||
#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
|
||||
#define MTK_PDMA_DELAY_RX_EN BIT(15)
|
||||
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
|
||||
@@ -151,19 +142,9 @@
|
||||
#define MTK_PDMA_DELAY_PINT_MASK 0x7f
|
||||
#define MTK_PDMA_DELAY_PTIME_MASK 0xff
|
||||
|
||||
-/* PDMA Interrupt Status Register */
|
||||
-#define MTK_PDMA_INT_STATUS 0xa20
|
||||
-
|
||||
-/* PDMA Interrupt Mask Register */
|
||||
-#define MTK_PDMA_INT_MASK 0xa28
|
||||
-
|
||||
/* PDMA HW LRO Alter Flow Delta Register */
|
||||
#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
|
||||
|
||||
-/* PDMA Interrupt grouping registers */
|
||||
-#define MTK_PDMA_INT_GRP1 0xa50
|
||||
-#define MTK_PDMA_INT_GRP2 0xa54
|
||||
-
|
||||
/* PDMA HW LRO IP Setting Registers */
|
||||
#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
|
||||
#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
|
||||
@@ -185,26 +166,9 @@
|
||||
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
|
||||
|
||||
/* QDMA TX Queue Configuration Registers */
|
||||
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
|
||||
#define QDMA_RES_THRES 4
|
||||
|
||||
-/* QDMA TX Queue Scheduler Registers */
|
||||
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
|
||||
-
|
||||
-/* QDMA RX Base Pointer Register */
|
||||
-#define MTK_QRX_BASE_PTR0 0x1900
|
||||
-
|
||||
-/* QDMA RX Maximum Count Register */
|
||||
-#define MTK_QRX_MAX_CNT0 0x1904
|
||||
-
|
||||
-/* QDMA RX CPU Pointer Register */
|
||||
-#define MTK_QRX_CRX_IDX0 0x1908
|
||||
-
|
||||
-/* QDMA RX DMA Pointer Register */
|
||||
-#define MTK_QRX_DRX_IDX0 0x190C
|
||||
-
|
||||
/* QDMA Global Configuration Register */
|
||||
-#define MTK_QDMA_GLO_CFG 0x1A04
|
||||
#define MTK_RX_2B_OFFSET BIT(31)
|
||||
#define MTK_RX_BT_32DWORDS (3 << 11)
|
||||
#define MTK_NDP_CO_PRO BIT(10)
|
||||
@@ -216,20 +180,12 @@
|
||||
#define MTK_TX_DMA_EN BIT(0)
|
||||
#define MTK_DMA_BUSY_TIMEOUT_US 1000000
|
||||
|
||||
-/* QDMA Reset Index Register */
|
||||
-#define MTK_QDMA_RST_IDX 0x1A08
|
||||
-
|
||||
-/* QDMA Delay Interrupt Register */
|
||||
-#define MTK_QDMA_DELAY_INT 0x1A0C
|
||||
-
|
||||
/* QDMA Flow Control Register */
|
||||
-#define MTK_QDMA_FC_THRES 0x1A10
|
||||
#define FC_THRES_DROP_MODE BIT(20)
|
||||
#define FC_THRES_DROP_EN (7 << 16)
|
||||
#define FC_THRES_MIN 0x4444
|
||||
|
||||
/* QDMA Interrupt Status Register */
|
||||
-#define MTK_QDMA_INT_STATUS 0x1A18
|
||||
#define MTK_RX_DONE_DLY BIT(30)
|
||||
#define MTK_TX_DONE_DLY BIT(28)
|
||||
#define MTK_RX_DONE_INT3 BIT(19)
|
||||
@@ -244,55 +200,8 @@
|
||||
#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
|
||||
|
||||
/* QDMA Interrupt grouping registers */
|
||||
-#define MTK_QDMA_INT_GRP1 0x1a20
|
||||
-#define MTK_QDMA_INT_GRP2 0x1a24
|
||||
#define MTK_RLS_DONE_INT BIT(0)
|
||||
|
||||
-/* QDMA Interrupt Status Register */
|
||||
-#define MTK_QDMA_INT_MASK 0x1A1C
|
||||
-
|
||||
-/* QDMA Interrupt Mask Register */
|
||||
-#define MTK_QDMA_HRED2 0x1A44
|
||||
-
|
||||
-/* QDMA TX Forward CPU Pointer Register */
|
||||
-#define MTK_QTX_CTX_PTR 0x1B00
|
||||
-
|
||||
-/* QDMA TX Forward DMA Pointer Register */
|
||||
-#define MTK_QTX_DTX_PTR 0x1B04
|
||||
-
|
||||
-/* QDMA TX Release CPU Pointer Register */
|
||||
-#define MTK_QTX_CRX_PTR 0x1B10
|
||||
-
|
||||
-/* QDMA TX Release DMA Pointer Register */
|
||||
-#define MTK_QTX_DRX_PTR 0x1B14
|
||||
-
|
||||
-/* QDMA FQ Head Pointer Register */
|
||||
-#define MTK_QDMA_FQ_HEAD 0x1B20
|
||||
-
|
||||
-/* QDMA FQ Head Pointer Register */
|
||||
-#define MTK_QDMA_FQ_TAIL 0x1B24
|
||||
-
|
||||
-/* QDMA FQ Free Page Counter Register */
|
||||
-#define MTK_QDMA_FQ_CNT 0x1B28
|
||||
-
|
||||
-/* QDMA FQ Free Page Buffer Length Register */
|
||||
-#define MTK_QDMA_FQ_BLEN 0x1B2C
|
||||
-
|
||||
-/* GMA1 counter / statics register */
|
||||
-#define MTK_GDM1_RX_GBCNT_L 0x2400
|
||||
-#define MTK_GDM1_RX_GBCNT_H 0x2404
|
||||
-#define MTK_GDM1_RX_GPCNT 0x2408
|
||||
-#define MTK_GDM1_RX_OERCNT 0x2410
|
||||
-#define MTK_GDM1_RX_FERCNT 0x2414
|
||||
-#define MTK_GDM1_RX_SERCNT 0x2418
|
||||
-#define MTK_GDM1_RX_LENCNT 0x241c
|
||||
-#define MTK_GDM1_RX_CERCNT 0x2420
|
||||
-#define MTK_GDM1_RX_FCCNT 0x2424
|
||||
-#define MTK_GDM1_TX_SKIPCNT 0x2428
|
||||
-#define MTK_GDM1_TX_COLCNT 0x242c
|
||||
-#define MTK_GDM1_TX_GBCNT_L 0x2430
|
||||
-#define MTK_GDM1_TX_GBCNT_H 0x2434
|
||||
-#define MTK_GDM1_TX_GPCNT 0x2438
|
||||
#define MTK_STAT_OFFSET 0x40
|
||||
|
||||
#define MTK_WDMA0_BASE 0x2800
|
||||
@@ -853,8 +762,46 @@ struct mtk_tx_dma_desc_info {
|
||||
u8 last:1;
|
||||
};
|
||||
|
||||
+struct mtk_reg_map {
|
||||
+ u32 tx_irq_mask;
|
||||
+ u32 tx_irq_status;
|
||||
+ struct {
|
||||
+ u32 rx_ptr; /* rx base pointer */
|
||||
+ u32 rx_cnt_cfg; /* rx max count configuration */
|
||||
+ u32 pcrx_ptr; /* rx cpu pointer */
|
||||
+ u32 glo_cfg; /* global configuration */
|
||||
+ u32 rst_idx; /* reset index */
|
||||
+ u32 delay_irq; /* delay interrupt */
|
||||
+ u32 irq_status; /* interrupt status */
|
||||
+ u32 irq_mask; /* interrupt mask */
|
||||
+ u32 int_grp;
|
||||
+ } pdma;
|
||||
+ struct {
|
||||
+ u32 qtx_cfg; /* tx queue configuration */
|
||||
+ u32 rx_ptr; /* rx base pointer */
|
||||
+ u32 rx_cnt_cfg; /* rx max count configuration */
|
||||
+ u32 qcrx_ptr; /* rx cpu pointer */
|
||||
+ u32 glo_cfg; /* global configuration */
|
||||
+ u32 rst_idx; /* reset index */
|
||||
+ u32 delay_irq; /* delay interrupt */
|
||||
+ u32 fc_th; /* flow control */
|
||||
+ u32 int_grp;
|
||||
+ u32 hred; /* interrupt mask */
|
||||
+ u32 ctx_ptr; /* tx acquire cpu pointer */
|
||||
+ u32 dtx_ptr; /* tx acquire dma pointer */
|
||||
+ u32 crx_ptr; /* tx release cpu pointer */
|
||||
+ u32 drx_ptr; /* tx release dma pointer */
|
||||
+ u32 fq_head; /* fq head pointer */
|
||||
+ u32 fq_tail; /* fq tail pointer */
|
||||
+ u32 fq_count; /* fq free page count */
|
||||
+ u32 fq_blen; /* fq free page buffer length */
|
||||
+ } qdma;
|
||||
+ u32 gdm1_cnt;
|
||||
+};
|
||||
+
|
||||
/* struct mtk_eth_data - This is the structure holding all differences
|
||||
* among various plaforms
|
||||
+ * @reg_map Soc register map.
|
||||
* @ana_rgc3: The offset for register ANA_RGC3 related to
|
||||
* sgmiisys syscon
|
||||
* @caps Flags shown the extra capability for the SoC
|
||||
@@ -867,6 +814,7 @@ struct mtk_tx_dma_desc_info {
|
||||
* @rxd_size Rx DMA descriptor size.
|
||||
*/
|
||||
struct mtk_soc_data {
|
||||
+ const struct mtk_reg_map *reg_map;
|
||||
u32 ana_rgc3;
|
||||
u32 caps;
|
||||
u32 required_clks;
|
||||
@@ -994,8 +942,6 @@ struct mtk_eth {
|
||||
u32 tx_bytes;
|
||||
struct dim tx_dim;
|
||||
|
||||
- u32 tx_int_mask_reg;
|
||||
- u32 tx_int_status_reg;
|
||||
u32 rx_dma_l4_valid;
|
||||
int ip_align;
|
||||
|
@ -0,0 +1,917 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:36 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support
|
||||
|
||||
Introduce MTK_NETSYS_V2 support. MTK_NETSYS_V2 defines 32B TX/RX DMA
|
||||
descriptors.
|
||||
This is a preliminary patch to add mt7986 ethernet support.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -862,8 +862,8 @@ static inline int mtk_max_buf_size(int f
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
-static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
|
||||
- struct mtk_rx_dma *dma_rxd)
|
||||
+static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
|
||||
+ struct mtk_rx_dma_v2 *dma_rxd)
|
||||
{
|
||||
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
|
||||
if (!(rxd->rxd2 & RX_DMA_DONE))
|
||||
@@ -872,6 +872,10 @@ static inline bool mtk_rx_get_desc(struc
|
||||
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
|
||||
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
|
||||
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
|
||||
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
|
||||
+ }
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -905,7 +909,7 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
- struct mtk_tx_dma *txd;
|
||||
+ struct mtk_tx_dma_v2 *txd;
|
||||
|
||||
txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size;
|
||||
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
||||
@@ -915,6 +919,12 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
|
||||
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
||||
txd->txd4 = 0;
|
||||
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
|
||||
+ txd->txd5 = 0;
|
||||
+ txd->txd6 = 0;
|
||||
+ txd->txd7 = 0;
|
||||
+ txd->txd8 = 0;
|
||||
+ }
|
||||
}
|
||||
|
||||
mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
|
||||
@@ -1018,10 +1028,12 @@ static void setup_tx_buf(struct mtk_eth
|
||||
}
|
||||
}
|
||||
|
||||
-static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc,
|
||||
- struct mtk_tx_dma_desc_info *info)
|
||||
+static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
|
||||
+ struct mtk_tx_dma_desc_info *info)
|
||||
{
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
+ struct mtk_eth *eth = mac->hw;
|
||||
+ struct mtk_tx_dma *desc = txd;
|
||||
u32 data;
|
||||
|
||||
WRITE_ONCE(desc->txd1, info->addr);
|
||||
@@ -1045,6 +1057,59 @@ static void mtk_tx_set_dma_desc(struct n
|
||||
WRITE_ONCE(desc->txd4, data);
|
||||
}
|
||||
|
||||
+static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
|
||||
+ struct mtk_tx_dma_desc_info *info)
|
||||
+{
|
||||
+ struct mtk_mac *mac = netdev_priv(dev);
|
||||
+ struct mtk_tx_dma_v2 *desc = txd;
|
||||
+ struct mtk_eth *eth = mac->hw;
|
||||
+ u32 data;
|
||||
+
|
||||
+ WRITE_ONCE(desc->txd1, info->addr);
|
||||
+
|
||||
+ data = TX_DMA_PLEN0(info->size);
|
||||
+ if (info->last)
|
||||
+ data |= TX_DMA_LS0;
|
||||
+ WRITE_ONCE(desc->txd3, data);
|
||||
+
|
||||
+ if (!info->qid && mac->id)
|
||||
+ info->qid = MTK_QDMA_GMAC2_QID;
|
||||
+
|
||||
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
|
||||
+ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
|
||||
+ WRITE_ONCE(desc->txd4, data);
|
||||
+
|
||||
+ data = 0;
|
||||
+ if (info->first) {
|
||||
+ if (info->gso)
|
||||
+ data |= TX_DMA_TSO_V2;
|
||||
+ /* tx checksum offload */
|
||||
+ if (info->csum)
|
||||
+ data |= TX_DMA_CHKSUM_V2;
|
||||
+ }
|
||||
+ WRITE_ONCE(desc->txd5, data);
|
||||
+
|
||||
+ data = 0;
|
||||
+ if (info->first && info->vlan)
|
||||
+ data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
|
||||
+ WRITE_ONCE(desc->txd6, data);
|
||||
+
|
||||
+ WRITE_ONCE(desc->txd7, 0);
|
||||
+ WRITE_ONCE(desc->txd8, 0);
|
||||
+}
|
||||
+
|
||||
+static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
|
||||
+ struct mtk_tx_dma_desc_info *info)
|
||||
+{
|
||||
+ struct mtk_mac *mac = netdev_priv(dev);
|
||||
+ struct mtk_eth *eth = mac->hw;
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ mtk_tx_set_dma_desc_v2(dev, txd, info);
|
||||
+ else
|
||||
+ mtk_tx_set_dma_desc_v1(dev, txd, info);
|
||||
+}
|
||||
+
|
||||
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
||||
int tx_num, struct mtk_tx_ring *ring, bool gso)
|
||||
{
|
||||
@@ -1053,6 +1118,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
.gso = gso,
|
||||
.csum = skb->ip_summed == CHECKSUM_PARTIAL,
|
||||
.vlan = skb_vlan_tag_present(skb),
|
||||
+ .qid = skb->mark & MTK_QDMA_TX_MASK,
|
||||
.vlan_tci = skb_vlan_tag_get(skb),
|
||||
.first = true,
|
||||
.last = !skb_is_nonlinear(skb),
|
||||
@@ -1112,7 +1178,9 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
}
|
||||
|
||||
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
|
||||
- txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
|
||||
+ txd_info.size = min_t(unsigned int, frag_size,
|
||||
+ soc->txrx.dma_max_len);
|
||||
+ txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
|
||||
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
|
||||
!(frag_size - txd_info.size);
|
||||
txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
|
||||
@@ -1193,17 +1261,16 @@ err_dma:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
|
||||
+static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
|
||||
{
|
||||
- int i, nfrags;
|
||||
+ int i, nfrags = 1;
|
||||
skb_frag_t *frag;
|
||||
|
||||
- nfrags = 1;
|
||||
if (skb_is_gso(skb)) {
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
|
||||
- MTK_TX_DMA_BUF_LEN);
|
||||
+ eth->soc->txrx.dma_max_len);
|
||||
}
|
||||
} else {
|
||||
nfrags += skb_shinfo(skb)->nr_frags;
|
||||
@@ -1255,7 +1322,7 @@ static netdev_tx_t mtk_start_xmit(struct
|
||||
if (unlikely(test_bit(MTK_RESETTING, ð->state)))
|
||||
goto drop;
|
||||
|
||||
- tx_num = mtk_cal_txd_req(skb);
|
||||
+ tx_num = mtk_cal_txd_req(eth, skb);
|
||||
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
|
||||
netif_stop_queue(dev);
|
||||
netif_err(eth, tx_queued, dev,
|
||||
@@ -1347,7 +1414,7 @@ static int mtk_poll_rx(struct napi_struc
|
||||
int idx;
|
||||
struct sk_buff *skb;
|
||||
u8 *data, *new_data;
|
||||
- struct mtk_rx_dma *rxd, trxd;
|
||||
+ struct mtk_rx_dma_v2 *rxd, trxd;
|
||||
int done = 0, bytes = 0;
|
||||
|
||||
while (done < budget) {
|
||||
@@ -1355,7 +1422,7 @@ static int mtk_poll_rx(struct napi_struc
|
||||
unsigned int pktlen;
|
||||
dma_addr_t dma_addr;
|
||||
u32 hash, reason;
|
||||
- int mac;
|
||||
+ int mac = 0;
|
||||
|
||||
ring = mtk_get_rx_ring(eth);
|
||||
if (unlikely(!ring))
|
||||
@@ -1365,16 +1432,15 @@ static int mtk_poll_rx(struct napi_struc
|
||||
rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
|
||||
data = ring->data[idx];
|
||||
|
||||
- if (!mtk_rx_get_desc(&trxd, rxd))
|
||||
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
|
||||
break;
|
||||
|
||||
/* find out which mac the packet come from. values start at 1 */
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
|
||||
- (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
|
||||
- mac = 0;
|
||||
- else
|
||||
- mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
|
||||
- RX_DMA_FPORT_MASK) - 1;
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
|
||||
+ else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
|
||||
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
|
||||
+ mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
|
||||
|
||||
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
|
||||
!eth->netdev[mac]))
|
||||
@@ -1417,7 +1483,7 @@ static int mtk_poll_rx(struct napi_struc
|
||||
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
|
||||
skb->dev = netdev;
|
||||
skb_put(skb, pktlen);
|
||||
- if (trxd.rxd4 & eth->rx_dma_l4_valid)
|
||||
+ if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb_checksum_none_assert(skb);
|
||||
@@ -1435,10 +1501,25 @@ static int mtk_poll_rx(struct napi_struc
|
||||
mtk_ppe_check_skb(eth->ppe, skb,
|
||||
trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
|
||||
|
||||
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
|
||||
- (trxd.rxd2 & RX_DMA_VTAG))
|
||||
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
- RX_DMA_VID(trxd.rxd3));
|
||||
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ if (trxd.rxd3 & RX_DMA_VTAG_V2)
|
||||
+ __vlan_hwaccel_put_tag(skb,
|
||||
+ htons(RX_DMA_VPID(trxd.rxd4)),
|
||||
+ RX_DMA_VID(trxd.rxd4));
|
||||
+ } else if (trxd.rxd2 & RX_DMA_VTAG) {
|
||||
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
+ RX_DMA_VID(trxd.rxd3));
|
||||
+ }
|
||||
+
|
||||
+ /* If the device is attached to a dsa switch, the special
|
||||
+ * tag inserted in VLAN field by hw switch can * be offloaded
|
||||
+ * by RX HW VLAN offload. Clear vlan info.
|
||||
+ */
|
||||
+ if (netdev_uses_dsa(netdev))
|
||||
+ __vlan_hwaccel_clear_tag(skb);
|
||||
+ }
|
||||
+
|
||||
skb_record_rx_queue(skb, 0);
|
||||
napi_gro_receive(napi, skb);
|
||||
|
||||
@@ -1450,7 +1531,7 @@ release_desc:
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||
rxd->rxd2 = RX_DMA_LSO;
|
||||
else
|
||||
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
|
||||
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
|
||||
|
||||
ring->calc_idx = idx;
|
||||
|
||||
@@ -1652,7 +1733,8 @@ static int mtk_napi_rx(struct napi_struc
|
||||
do {
|
||||
int rx_done;
|
||||
|
||||
- mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
|
||||
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
|
||||
+ reg_map->pdma.irq_status);
|
||||
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
|
||||
rx_done_total += rx_done;
|
||||
|
||||
@@ -1666,10 +1748,11 @@ static int mtk_napi_rx(struct napi_struc
|
||||
if (rx_done_total == budget)
|
||||
return budget;
|
||||
|
||||
- } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
|
||||
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
|
||||
+ eth->soc->txrx.rx_irq_done_mask);
|
||||
|
||||
if (napi_complete_done(napi, rx_done_total))
|
||||
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
|
||||
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
|
||||
return rx_done_total;
|
||||
}
|
||||
@@ -1679,7 +1762,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
const struct mtk_soc_data *soc = eth->soc;
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
int i, sz = soc->txrx.txd_size;
|
||||
- struct mtk_tx_dma *txd;
|
||||
+ struct mtk_tx_dma_v2 *txd;
|
||||
|
||||
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
|
||||
GFP_KERNEL);
|
||||
@@ -1699,13 +1782,19 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
txd->txd2 = next_ptr;
|
||||
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
||||
txd->txd4 = 0;
|
||||
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
|
||||
+ txd->txd5 = 0;
|
||||
+ txd->txd6 = 0;
|
||||
+ txd->txd7 = 0;
|
||||
+ txd->txd8 = 0;
|
||||
+ }
|
||||
}
|
||||
|
||||
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
|
||||
* only as the framework. The real HW descriptors are the PDMA
|
||||
* descriptors in ring->dma_pdma.
|
||||
*/
|
||||
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
||||
&ring->phys_pdma, GFP_KERNEL);
|
||||
if (!ring->dma_pdma)
|
||||
@@ -1785,13 +1874,11 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
struct mtk_rx_ring *ring;
|
||||
int rx_data_len, rx_dma_size;
|
||||
int i;
|
||||
- u32 offset = 0;
|
||||
|
||||
if (rx_flag == MTK_RX_FLAGS_QDMA) {
|
||||
if (ring_no)
|
||||
return -EINVAL;
|
||||
ring = ð->rx_ring_qdma;
|
||||
- offset = 0x1000;
|
||||
} else {
|
||||
ring = ð->rx_ring[ring_no];
|
||||
}
|
||||
@@ -1824,7 +1911,7 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rx_dma_size; i++) {
|
||||
- struct mtk_rx_dma *rxd;
|
||||
+ struct mtk_rx_dma_v2 *rxd;
|
||||
|
||||
dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
|
||||
ring->data[i] + NET_SKB_PAD + eth->ip_align,
|
||||
@@ -1839,26 +1926,47 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||
rxd->rxd2 = RX_DMA_LSO;
|
||||
else
|
||||
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
|
||||
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
|
||||
|
||||
rxd->rxd3 = 0;
|
||||
rxd->rxd4 = 0;
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ rxd->rxd5 = 0;
|
||||
+ rxd->rxd6 = 0;
|
||||
+ rxd->rxd7 = 0;
|
||||
+ rxd->rxd8 = 0;
|
||||
+ }
|
||||
}
|
||||
ring->dma_size = rx_dma_size;
|
||||
ring->calc_idx_update = false;
|
||||
ring->calc_idx = rx_dma_size - 1;
|
||||
- ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
|
||||
+ if (rx_flag == MTK_RX_FLAGS_QDMA)
|
||||
+ ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
|
||||
+ ring_no * MTK_QRX_OFFSET;
|
||||
+ else
|
||||
+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
|
||||
+ ring_no * MTK_QRX_OFFSET;
|
||||
/* make sure that all changes to the dma ring are flushed before we
|
||||
* continue
|
||||
*/
|
||||
wmb();
|
||||
|
||||
- mtk_w32(eth, ring->phys,
|
||||
- reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
|
||||
- mtk_w32(eth, rx_dma_size,
|
||||
- reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
|
||||
- mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
|
||||
- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
|
||||
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
|
||||
+ mtk_w32(eth, ring->phys,
|
||||
+ reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
|
||||
+ mtk_w32(eth, rx_dma_size,
|
||||
+ reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
|
||||
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
|
||||
+ reg_map->qdma.rst_idx);
|
||||
+ } else {
|
||||
+ mtk_w32(eth, ring->phys,
|
||||
+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
|
||||
+ mtk_w32(eth, rx_dma_size,
|
||||
+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
|
||||
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
|
||||
+ reg_map->pdma.rst_idx);
|
||||
+ }
|
||||
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2277,7 +2385,7 @@ static irqreturn_t mtk_handle_irq_rx(int
|
||||
eth->rx_events++;
|
||||
if (likely(napi_schedule_prep(ð->rx_napi))) {
|
||||
__napi_schedule(ð->rx_napi);
|
||||
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
|
||||
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -2301,8 +2409,10 @@ static irqreturn_t mtk_handle_irq(int ir
|
||||
struct mtk_eth *eth = _eth;
|
||||
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
|
||||
- if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
|
||||
- if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
|
||||
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
|
||||
+ eth->soc->txrx.rx_irq_done_mask) {
|
||||
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
|
||||
+ eth->soc->txrx.rx_irq_done_mask)
|
||||
mtk_handle_irq_rx(irq, _eth);
|
||||
}
|
||||
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
|
||||
@@ -2320,16 +2430,16 @@ static void mtk_poll_controller(struct n
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
|
||||
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
|
||||
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
|
||||
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
mtk_handle_irq_rx(eth->irq[2], dev);
|
||||
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
|
||||
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
|
||||
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mtk_start_dma(struct mtk_eth *eth)
|
||||
{
|
||||
- u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
|
||||
+ u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
|
||||
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
int err;
|
||||
|
||||
@@ -2340,12 +2450,19 @@ static int mtk_start_dma(struct mtk_eth
|
||||
}
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
- mtk_w32(eth,
|
||||
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
|
||||
- MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
|
||||
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
|
||||
- MTK_RX_BT_32DWORDS,
|
||||
- reg_map->qdma.glo_cfg);
|
||||
+ val = mtk_r32(eth, reg_map->qdma.glo_cfg);
|
||||
+ val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
|
||||
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
|
||||
+ MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
|
||||
+ MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
|
||||
+ MTK_CHK_DDONE_EN;
|
||||
+ else
|
||||
+ val |= MTK_RX_BT_32DWORDS;
|
||||
+ mtk_w32(eth, val, reg_map->qdma.glo_cfg);
|
||||
+
|
||||
mtk_w32(eth,
|
||||
MTK_RX_DMA_EN | rx_2b_offset |
|
||||
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
|
||||
@@ -2417,7 +2534,7 @@ static int mtk_open(struct net_device *d
|
||||
napi_enable(ð->tx_napi);
|
||||
napi_enable(ð->rx_napi);
|
||||
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
|
||||
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
|
||||
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
refcount_set(ð->dma_refcnt, 1);
|
||||
}
|
||||
else
|
||||
@@ -2469,7 +2586,7 @@ static int mtk_stop(struct net_device *d
|
||||
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
|
||||
|
||||
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
|
||||
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
|
||||
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
napi_disable(ð->tx_napi);
|
||||
napi_disable(ð->rx_napi);
|
||||
|
||||
@@ -2629,9 +2746,25 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
return 0;
|
||||
}
|
||||
|
||||
- /* Non-MT7628 handling... */
|
||||
- ethsys_reset(eth, RSTCTRL_FE);
|
||||
- ethsys_reset(eth, RSTCTRL_PPE);
|
||||
+ val = RSTCTRL_FE | RSTCTRL_PPE;
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
|
||||
+
|
||||
+ val |= RSTCTRL_ETH;
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
|
||||
+ val |= RSTCTRL_PPE1;
|
||||
+ }
|
||||
+
|
||||
+ ethsys_reset(eth, val);
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
|
||||
+ 0x3ffffff);
|
||||
+
|
||||
+ /* Set FE to PDMAv2 if necessary */
|
||||
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
|
||||
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
|
||||
+ }
|
||||
|
||||
if (eth->pctl) {
|
||||
/* Set GE2 driving and slew rate */
|
||||
@@ -2670,11 +2803,47 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
|
||||
/* FE int grouping */
|
||||
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
|
||||
- mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
|
||||
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
|
||||
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
|
||||
- mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
|
||||
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
|
||||
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
|
||||
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ /* PSE should not drop port8 and port9 packets */
|
||||
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
|
||||
+
|
||||
+ /* PSE Free Queue Flow Control */
|
||||
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
|
||||
+
|
||||
+ /* PSE config input queue threshold */
|
||||
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
|
||||
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
|
||||
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
|
||||
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
|
||||
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
|
||||
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
|
||||
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
|
||||
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
|
||||
+
|
||||
+ /* PSE config output queue threshold */
|
||||
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
|
||||
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
|
||||
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
|
||||
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
|
||||
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
|
||||
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
|
||||
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
|
||||
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
|
||||
+
|
||||
+ /* GDM and CDM Threshold */
|
||||
+ mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
|
||||
+ mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
|
||||
+ mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
|
||||
+ mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
|
||||
+ mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
|
||||
+ mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
|
||||
err_disable_pm:
|
||||
@@ -3211,12 +3380,8 @@ static int mtk_probe(struct platform_dev
|
||||
if (IS_ERR(eth->base))
|
||||
return PTR_ERR(eth->base);
|
||||
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
|
||||
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||
eth->ip_align = NET_IP_ALIGN;
|
||||
- } else {
|
||||
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
|
||||
- }
|
||||
|
||||
spin_lock_init(ð->page_lock);
|
||||
spin_lock_init(ð->tx_irq_lock);
|
||||
@@ -3452,6 +3617,10 @@ static const struct mtk_soc_data mt2701_
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma),
|
||||
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
|
||||
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
|
||||
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
+ .dma_len_offset = 16,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3465,6 +3634,10 @@ static const struct mtk_soc_data mt7621_
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma),
|
||||
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
|
||||
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
|
||||
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
+ .dma_len_offset = 16,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3479,6 +3652,10 @@ static const struct mtk_soc_data mt7622_
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma),
|
||||
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
|
||||
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
|
||||
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
+ .dma_len_offset = 16,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3492,6 +3669,10 @@ static const struct mtk_soc_data mt7623_
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma),
|
||||
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
|
||||
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
|
||||
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
+ .dma_len_offset = 16,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3505,6 +3686,10 @@ static const struct mtk_soc_data mt7629_
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma),
|
||||
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
|
||||
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
|
||||
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
+ .dma_len_offset = 16,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3517,6 +3702,10 @@ static const struct mtk_soc_data rt5350_
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma),
|
||||
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
|
||||
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
|
||||
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
+ .dma_len_offset = 16,
|
||||
},
|
||||
};
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -23,6 +23,7 @@
|
||||
#define MTK_MAX_RX_LENGTH 1536
|
||||
#define MTK_MAX_RX_LENGTH_2K 2048
|
||||
#define MTK_TX_DMA_BUF_LEN 0x3fff
|
||||
+#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
||||
#define MTK_DMA_SIZE 512
|
||||
#define MTK_NAPI_WEIGHT 64
|
||||
#define MTK_MAC_COUNT 2
|
||||
@@ -83,6 +84,10 @@
|
||||
#define MTK_CDMQ_IG_CTRL 0x1400
|
||||
#define MTK_CDMQ_STAG_EN BIT(0)
|
||||
|
||||
+/* CDMP Ingress Control Register */
|
||||
+#define MTK_CDMP_IG_CTRL 0x400
|
||||
+#define MTK_CDMP_STAG_EN BIT(0)
|
||||
+
|
||||
/* CDMP Exgress Control Register */
|
||||
#define MTK_CDMP_EG_CTRL 0x404
|
||||
|
||||
@@ -102,13 +107,38 @@
|
||||
/* Unicast Filter MAC Address Register - High */
|
||||
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
|
||||
|
||||
+/* FE global misc reg*/
|
||||
+#define MTK_FE_GLO_MISC 0x124
|
||||
+
|
||||
+/* PSE Free Queue Flow Control */
|
||||
+#define PSE_FQFC_CFG1 0x100
|
||||
+#define PSE_FQFC_CFG2 0x104
|
||||
+#define PSE_DROP_CFG 0x108
|
||||
+
|
||||
+/* PSE Input Queue Reservation Register*/
|
||||
+#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
|
||||
+
|
||||
+/* PSE Output Queue Threshold Register*/
|
||||
+#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
|
||||
+
|
||||
+/* GDM and CDM Threshold */
|
||||
+#define MTK_GDM2_THRES 0x1530
|
||||
+#define MTK_CDMW0_THRES 0x164c
|
||||
+#define MTK_CDMW1_THRES 0x1650
|
||||
+#define MTK_CDME0_THRES 0x1654
|
||||
+#define MTK_CDME1_THRES 0x1658
|
||||
+#define MTK_CDMM_THRES 0x165c
|
||||
+
|
||||
/* PDMA HW LRO Control Registers */
|
||||
#define MTK_PDMA_LRO_CTRL_DW0 0x980
|
||||
#define MTK_LRO_EN BIT(0)
|
||||
#define MTK_L3_CKS_UPD_EN BIT(7)
|
||||
+#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
|
||||
#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
|
||||
#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
|
||||
+#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
|
||||
#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
|
||||
+#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
|
||||
|
||||
#define MTK_PDMA_LRO_CTRL_DW1 0x984
|
||||
#define MTK_PDMA_LRO_CTRL_DW2 0x988
|
||||
@@ -180,6 +210,13 @@
|
||||
#define MTK_TX_DMA_EN BIT(0)
|
||||
#define MTK_DMA_BUSY_TIMEOUT_US 1000000
|
||||
|
||||
+/* QDMA V2 Global Configuration Register */
|
||||
+#define MTK_CHK_DDONE_EN BIT(28)
|
||||
+#define MTK_DMAD_WR_WDONE BIT(26)
|
||||
+#define MTK_WCOMP_EN BIT(24)
|
||||
+#define MTK_RESV_BUF (0x40 << 16)
|
||||
+#define MTK_MUTLI_CNT (0x4 << 12)
|
||||
+
|
||||
/* QDMA Flow Control Register */
|
||||
#define FC_THRES_DROP_MODE BIT(20)
|
||||
#define FC_THRES_DROP_EN (7 << 16)
|
||||
@@ -199,11 +236,32 @@
|
||||
#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
|
||||
#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
|
||||
|
||||
+#define MTK_RX_DONE_INT_V2 BIT(14)
|
||||
+
|
||||
/* QDMA Interrupt grouping registers */
|
||||
#define MTK_RLS_DONE_INT BIT(0)
|
||||
|
||||
#define MTK_STAT_OFFSET 0x40
|
||||
|
||||
+/* QDMA TX NUM */
|
||||
+#define MTK_QDMA_TX_NUM 16
|
||||
+#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
|
||||
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
|
||||
+#define MTK_QDMA_GMAC2_QID 8
|
||||
+
|
||||
+#define MTK_TX_DMA_BUF_SHIFT 8
|
||||
+
|
||||
+/* QDMA V2 descriptor txd6 */
|
||||
+#define TX_DMA_INS_VLAN_V2 BIT(16)
|
||||
+/* QDMA V2 descriptor txd5 */
|
||||
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
|
||||
+#define TX_DMA_TSO_V2 BIT(31)
|
||||
+
|
||||
+/* QDMA V2 descriptor txd4 */
|
||||
+#define TX_DMA_FPORT_SHIFT_V2 8
|
||||
+#define TX_DMA_FPORT_MASK_V2 0xf
|
||||
+#define TX_DMA_SWC_V2 BIT(30)
|
||||
+
|
||||
#define MTK_WDMA0_BASE 0x2800
|
||||
#define MTK_WDMA1_BASE 0x2c00
|
||||
|
||||
@@ -217,10 +275,9 @@
|
||||
/* QDMA descriptor txd3 */
|
||||
#define TX_DMA_OWNER_CPU BIT(31)
|
||||
#define TX_DMA_LS0 BIT(30)
|
||||
-#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
|
||||
-#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
|
||||
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
|
||||
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
|
||||
#define TX_DMA_SWC BIT(14)
|
||||
-#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
|
||||
|
||||
/* PDMA on MT7628 */
|
||||
#define TX_DMA_DONE BIT(31)
|
||||
@@ -230,12 +287,14 @@
|
||||
/* QDMA descriptor rxd2 */
|
||||
#define RX_DMA_DONE BIT(31)
|
||||
#define RX_DMA_LSO BIT(30)
|
||||
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
|
||||
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
|
||||
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
|
||||
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
|
||||
#define RX_DMA_VTAG BIT(15)
|
||||
|
||||
/* QDMA descriptor rxd3 */
|
||||
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
|
||||
+#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
|
||||
+#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
|
||||
+#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
|
||||
|
||||
/* QDMA descriptor rxd4 */
|
||||
#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
|
||||
@@ -246,10 +305,15 @@
|
||||
/* QDMA descriptor rxd4 */
|
||||
#define RX_DMA_L4_VALID BIT(24)
|
||||
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
|
||||
-#define RX_DMA_FPORT_SHIFT 19
|
||||
-#define RX_DMA_FPORT_MASK 0x7
|
||||
#define RX_DMA_SPECIAL_TAG BIT(22)
|
||||
|
||||
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
|
||||
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
|
||||
+
|
||||
+/* PDMA V2 descriptor rxd3 */
|
||||
+#define RX_DMA_VTAG_V2 BIT(0)
|
||||
+#define RX_DMA_L4_VALID_V2 BIT(2)
|
||||
+
|
||||
/* PHY Indirect Access Control registers */
|
||||
#define MTK_PHY_IAC 0x10004
|
||||
#define PHY_IAC_ACCESS BIT(31)
|
||||
@@ -370,6 +434,16 @@
|
||||
#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
|
||||
|
||||
/* ethernet reset control register */
|
||||
+#define ETHSYS_RSTCTRL 0x34
|
||||
+#define RSTCTRL_FE BIT(6)
|
||||
+#define RSTCTRL_PPE BIT(31)
|
||||
+#define RSTCTRL_PPE1 BIT(30)
|
||||
+#define RSTCTRL_ETH BIT(23)
|
||||
+
|
||||
+/* ethernet reset check idle register */
|
||||
+#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
|
||||
+
|
||||
+/* ethernet reset control register */
|
||||
#define ETHSYS_RSTCTRL 0x34
|
||||
#define RSTCTRL_FE BIT(6)
|
||||
#define RSTCTRL_PPE BIT(31)
|
||||
@@ -453,6 +527,17 @@ struct mtk_rx_dma {
|
||||
unsigned int rxd4;
|
||||
} __packed __aligned(4);
|
||||
|
||||
+struct mtk_rx_dma_v2 {
|
||||
+ unsigned int rxd1;
|
||||
+ unsigned int rxd2;
|
||||
+ unsigned int rxd3;
|
||||
+ unsigned int rxd4;
|
||||
+ unsigned int rxd5;
|
||||
+ unsigned int rxd6;
|
||||
+ unsigned int rxd7;
|
||||
+ unsigned int rxd8;
|
||||
+} __packed __aligned(4);
|
||||
+
|
||||
struct mtk_tx_dma {
|
||||
unsigned int txd1;
|
||||
unsigned int txd2;
|
||||
@@ -460,6 +545,17 @@ struct mtk_tx_dma {
|
||||
unsigned int txd4;
|
||||
} __packed __aligned(4);
|
||||
|
||||
+struct mtk_tx_dma_v2 {
|
||||
+ unsigned int txd1;
|
||||
+ unsigned int txd2;
|
||||
+ unsigned int txd3;
|
||||
+ unsigned int txd4;
|
||||
+ unsigned int txd5;
|
||||
+ unsigned int txd6;
|
||||
+ unsigned int txd7;
|
||||
+ unsigned int txd8;
|
||||
+} __packed __aligned(4);
|
||||
+
|
||||
struct mtk_eth;
|
||||
struct mtk_mac;
|
||||
|
||||
@@ -646,7 +742,9 @@ enum mkt_eth_capabilities {
|
||||
MTK_SHARED_INT_BIT,
|
||||
MTK_TRGMII_MT7621_CLK_BIT,
|
||||
MTK_QDMA_BIT,
|
||||
+ MTK_NETSYS_V2_BIT,
|
||||
MTK_SOC_MT7628_BIT,
|
||||
+ MTK_RSTCTRL_PPE1_BIT,
|
||||
|
||||
/* MUX BITS*/
|
||||
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
|
||||
@@ -678,7 +776,9 @@ enum mkt_eth_capabilities {
|
||||
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
|
||||
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
|
||||
#define MTK_QDMA BIT(MTK_QDMA_BIT)
|
||||
+#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
|
||||
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
|
||||
+#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
|
||||
|
||||
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
|
||||
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
|
||||
@@ -755,6 +855,7 @@ struct mtk_tx_dma_desc_info {
|
||||
dma_addr_t addr;
|
||||
u32 size;
|
||||
u16 vlan_tci;
|
||||
+ u16 qid;
|
||||
u8 gso:1;
|
||||
u8 csum:1;
|
||||
u8 vlan:1;
|
||||
@@ -812,6 +913,10 @@ struct mtk_reg_map {
|
||||
* the extra setup for those pins used by GMAC.
|
||||
* @txd_size Tx DMA descriptor size.
|
||||
* @rxd_size Rx DMA descriptor size.
|
||||
+ * @rx_irq_done_mask Rx irq done register mask.
|
||||
+ * @rx_dma_l4_valid Rx DMA valid register mask.
|
||||
+ * @dma_max_len Max DMA tx/rx buffer length.
|
||||
+ * @dma_len_offset Tx/Rx DMA length field offset.
|
||||
*/
|
||||
struct mtk_soc_data {
|
||||
const struct mtk_reg_map *reg_map;
|
||||
@@ -824,6 +929,10 @@ struct mtk_soc_data {
|
||||
struct {
|
||||
u32 txd_size;
|
||||
u32 rxd_size;
|
||||
+ u32 rx_irq_done_mask;
|
||||
+ u32 rx_dma_l4_valid;
|
||||
+ u32 dma_max_len;
|
||||
+ u32 dma_len_offset;
|
||||
} txrx;
|
||||
};
|
||||
|
||||
@@ -942,7 +1051,6 @@ struct mtk_eth {
|
||||
u32 tx_bytes;
|
||||
struct dim tx_dim;
|
||||
|
||||
- u32 rx_dma_l4_valid;
|
||||
int ip_align;
|
||||
|
||||
struct mtk_ppe *ppe;
|
@ -0,0 +1,135 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:37 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: convert ring dma pointer to void
|
||||
|
||||
Simplify the code converting {tx,rx} ring dma pointer to void
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -935,18 +935,15 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
return 0;
|
||||
}
|
||||
|
||||
-static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
|
||||
+static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
|
||||
{
|
||||
- void *ret = ring->dma;
|
||||
-
|
||||
- return ret + (desc - ring->phys);
|
||||
+ return ring->dma + (desc - ring->phys);
|
||||
}
|
||||
|
||||
static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
||||
- struct mtk_tx_dma *txd,
|
||||
- u32 txd_size)
|
||||
+ void *txd, u32 txd_size)
|
||||
{
|
||||
- int idx = ((void *)txd - (void *)ring->dma) / txd_size;
|
||||
+ int idx = (txd - ring->dma) / txd_size;
|
||||
|
||||
return &ring->buf[idx];
|
||||
}
|
||||
@@ -954,13 +951,12 @@ static struct mtk_tx_buf *mtk_desc_to_tx
|
||||
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
|
||||
struct mtk_tx_dma *dma)
|
||||
{
|
||||
- return ring->dma_pdma - ring->dma + dma;
|
||||
+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
|
||||
}
|
||||
|
||||
-static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma,
|
||||
- u32 txd_size)
|
||||
+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
|
||||
{
|
||||
- return ((void *)dma - (void *)ring->dma) / txd_size;
|
||||
+ return (dma - ring->dma) / txd_size;
|
||||
}
|
||||
|
||||
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
|
||||
@@ -1377,7 +1373,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
|
||||
|
||||
ring = ð->rx_ring[i];
|
||||
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
||||
- rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
|
||||
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
|
||||
if (rxd->rxd2 & RX_DMA_DONE) {
|
||||
ring->calc_idx_update = true;
|
||||
return ring;
|
||||
@@ -1429,7 +1425,7 @@ static int mtk_poll_rx(struct napi_struc
|
||||
goto rx_done;
|
||||
|
||||
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
||||
- rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
|
||||
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
|
||||
data = ring->data[idx];
|
||||
|
||||
if (!mtk_rx_get_desc(eth, &trxd, rxd))
|
||||
@@ -1633,7 +1629,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
|
||||
|
||||
mtk_tx_unmap(eth, tx_buf, true);
|
||||
|
||||
- desc = (void *)ring->dma + cpu * eth->soc->txrx.txd_size;
|
||||
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;
|
||||
ring->last_free = desc;
|
||||
atomic_inc(&ring->free_count);
|
||||
|
||||
@@ -1778,7 +1774,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
int next = (i + 1) % MTK_DMA_SIZE;
|
||||
u32 next_ptr = ring->phys + next * sz;
|
||||
|
||||
- txd = (void *)ring->dma + i * sz;
|
||||
+ txd = ring->dma + i * sz;
|
||||
txd->txd2 = next_ptr;
|
||||
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
||||
txd->txd4 = 0;
|
||||
@@ -1808,7 +1804,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
|
||||
ring->dma_size = MTK_DMA_SIZE;
|
||||
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
|
||||
- ring->next_free = &ring->dma[0];
|
||||
+ ring->next_free = ring->dma;
|
||||
ring->last_free = (void *)txd;
|
||||
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
|
||||
ring->thresh = MAX_SKB_FRAGS;
|
||||
@@ -1920,7 +1916,7 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
- rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size;
|
||||
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
|
||||
rxd->rxd1 = (unsigned int)dma_addr;
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||
@@ -1982,7 +1978,7 @@ static void mtk_rx_clean(struct mtk_eth
|
||||
if (!ring->data[i])
|
||||
continue;
|
||||
|
||||
- rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size;
|
||||
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
|
||||
if (!rxd->rxd1)
|
||||
continue;
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -688,7 +688,7 @@ struct mtk_tx_buf {
|
||||
* are present
|
||||
*/
|
||||
struct mtk_tx_ring {
|
||||
- struct mtk_tx_dma *dma;
|
||||
+ void *dma;
|
||||
struct mtk_tx_buf *buf;
|
||||
dma_addr_t phys;
|
||||
struct mtk_tx_dma *next_free;
|
||||
@@ -718,7 +718,7 @@ enum mtk_rx_flags {
|
||||
* @calc_idx: The current head of ring
|
||||
*/
|
||||
struct mtk_rx_ring {
|
||||
- struct mtk_rx_dma *dma;
|
||||
+ void *dma;
|
||||
u8 **data;
|
||||
dma_addr_t phys;
|
||||
u16 frag_size;
|
@ -0,0 +1,33 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:38 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: convert scratch_ring pointer to
|
||||
void
|
||||
|
||||
Simplify the code converting scratch_ring pointer to void
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -911,7 +911,7 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct mtk_tx_dma_v2 *txd;
|
||||
|
||||
- txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size;
|
||||
+ txd = eth->scratch_ring + i * soc->txrx.txd_size;
|
||||
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
||||
if (i < cnt - 1)
|
||||
txd->txd2 = eth->phy_scratch_ring +
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -1028,7 +1028,7 @@ struct mtk_eth {
|
||||
struct mtk_rx_ring rx_ring_qdma;
|
||||
struct napi_struct tx_napi;
|
||||
struct napi_struct rx_napi;
|
||||
- struct mtk_tx_dma *scratch_ring;
|
||||
+ void *scratch_ring;
|
||||
dma_addr_t phy_scratch_ring;
|
||||
void *scratch_head;
|
||||
struct clk *clks[MTK_CLK_MAX];
|
@ -0,0 +1,138 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Fri, 20 May 2022 20:11:39 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce support for mt7986
|
||||
chipset
|
||||
|
||||
Add support for mt7986-eth driver available on mt7986 soc.
|
||||
|
||||
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -87,6 +87,43 @@ static const struct mtk_reg_map mt7628_r
|
||||
},
|
||||
};
|
||||
|
||||
+static const struct mtk_reg_map mt7986_reg_map = {
|
||||
+ .tx_irq_mask = 0x461c,
|
||||
+ .tx_irq_status = 0x4618,
|
||||
+ .pdma = {
|
||||
+ .rx_ptr = 0x6100,
|
||||
+ .rx_cnt_cfg = 0x6104,
|
||||
+ .pcrx_ptr = 0x6108,
|
||||
+ .glo_cfg = 0x6204,
|
||||
+ .rst_idx = 0x6208,
|
||||
+ .delay_irq = 0x620c,
|
||||
+ .irq_status = 0x6220,
|
||||
+ .irq_mask = 0x6228,
|
||||
+ .int_grp = 0x6250,
|
||||
+ },
|
||||
+ .qdma = {
|
||||
+ .qtx_cfg = 0x4400,
|
||||
+ .rx_ptr = 0x4500,
|
||||
+ .rx_cnt_cfg = 0x4504,
|
||||
+ .qcrx_ptr = 0x4508,
|
||||
+ .glo_cfg = 0x4604,
|
||||
+ .rst_idx = 0x4608,
|
||||
+ .delay_irq = 0x460c,
|
||||
+ .fc_th = 0x4610,
|
||||
+ .int_grp = 0x4620,
|
||||
+ .hred = 0x4644,
|
||||
+ .ctx_ptr = 0x4700,
|
||||
+ .dtx_ptr = 0x4704,
|
||||
+ .crx_ptr = 0x4710,
|
||||
+ .drx_ptr = 0x4714,
|
||||
+ .fq_head = 0x4720,
|
||||
+ .fq_tail = 0x4724,
|
||||
+ .fq_count = 0x4728,
|
||||
+ .fq_blen = 0x472c,
|
||||
+ },
|
||||
+ .gdm1_cnt = 0x1c00,
|
||||
+};
|
||||
+
|
||||
/* strings used by ethtool */
|
||||
static const struct mtk_ethtool_stats {
|
||||
char str[ETH_GSTRING_LEN];
|
||||
@@ -110,7 +147,7 @@ static const char * const mtk_clks_sourc
|
||||
"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
|
||||
"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
|
||||
"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
|
||||
- "sgmii_ck", "eth2pll",
|
||||
+ "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
|
||||
};
|
||||
|
||||
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
|
||||
@@ -3689,6 +3726,21 @@ static const struct mtk_soc_data mt7629_
|
||||
},
|
||||
};
|
||||
|
||||
+static const struct mtk_soc_data mt7986_data = {
|
||||
+ .reg_map = &mt7986_reg_map,
|
||||
+ .ana_rgc3 = 0x128,
|
||||
+ .caps = MT7986_CAPS,
|
||||
+ .required_clks = MT7986_CLKS_BITMAP,
|
||||
+ .required_pctl = false,
|
||||
+ .txrx = {
|
||||
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
|
||||
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
|
||||
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
|
||||
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||
+ .dma_len_offset = 8,
|
||||
+ },
|
||||
+};
|
||||
+
|
||||
static const struct mtk_soc_data rt5350_data = {
|
||||
.reg_map = &mt7628_reg_map,
|
||||
.caps = MT7628_CAPS,
|
||||
@@ -3711,6 +3763,7 @@ const struct of_device_id of_mtk_match[]
|
||||
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
|
||||
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
|
||||
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
|
||||
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
|
||||
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
|
||||
{},
|
||||
};
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -623,6 +623,10 @@ enum mtk_clks_map {
|
||||
MTK_CLK_SGMII2_CDR_FB,
|
||||
MTK_CLK_SGMII_CK,
|
||||
MTK_CLK_ETH2PLL,
|
||||
+ MTK_CLK_WOCPU0,
|
||||
+ MTK_CLK_WOCPU1,
|
||||
+ MTK_CLK_NETSYS0,
|
||||
+ MTK_CLK_NETSYS1,
|
||||
MTK_CLK_MAX
|
||||
};
|
||||
|
||||
@@ -653,6 +657,16 @@ enum mtk_clks_map {
|
||||
BIT(MTK_CLK_SGMII2_CDR_FB) | \
|
||||
BIT(MTK_CLK_SGMII_CK) | \
|
||||
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
|
||||
+#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
|
||||
+ BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
|
||||
+ BIT(MTK_CLK_SGMII_TX_250M) | \
|
||||
+ BIT(MTK_CLK_SGMII_RX_250M) | \
|
||||
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
|
||||
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
|
||||
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
|
||||
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
|
||||
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
|
||||
+ BIT(MTK_CLK_SGMII2_CDR_FB))
|
||||
|
||||
enum mtk_dev_state {
|
||||
MTK_HW_INIT,
|
||||
@@ -851,6 +865,10 @@ enum mkt_eth_capabilities {
|
||||
MTK_MUX_U3_GMAC2_TO_QPHY | \
|
||||
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
|
||||
|
||||
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
|
||||
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
|
||||
+ MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
|
||||
+
|
||||
struct mtk_tx_dma_desc_info {
|
||||
dma_addr_t addr;
|
||||
u32 size;
|
@ -0,0 +1,25 @@
|
||||
From: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Date: Thu, 19 May 2022 17:08:00 +0300
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix error code in
|
||||
mtk_flow_offload_replace()
|
||||
|
||||
Preserve the error code from mtk_foe_entry_commit(). Do not return
|
||||
success.
|
||||
|
||||
Fixes: c4f033d9e03e ("net: ethernet: mtk_eth_soc: rework hardware flow table management")
|
||||
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
@@ -434,7 +434,8 @@ mtk_flow_offload_replace(struct mtk_eth
|
||||
memcpy(&entry->data, &foe, sizeof(entry->data));
|
||||
entry->wed_index = wed_index;
|
||||
|
||||
- if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
|
||||
+ err = mtk_foe_entry_commit(eth->ppe, entry);
|
||||
+ if (err < 0)
|
||||
goto free;
|
||||
|
||||
err = rhashtable_insert_fast(ð->flow_table, &entry->node,
|
@ -0,0 +1,25 @@
|
||||
From: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Date: Thu, 26 May 2022 11:02:42 +0300
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: out of bounds read in
|
||||
mtk_hwlro_get_fdir_entry()
|
||||
|
||||
The "fsp->location" variable comes from user via ethtool_get_rxnfc().
|
||||
Check that it is valid to prevent an out of bounds read.
|
||||
|
||||
Fixes: 7aab747e5563 ("net: ethernet: mediatek: add ethtool functions to configure RX flows of HW LRO")
|
||||
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -2230,6 +2230,9 @@ static int mtk_hwlro_get_fdir_entry(stru
|
||||
struct ethtool_rx_flow_spec *fsp =
|
||||
(struct ethtool_rx_flow_spec *)&cmd->fs;
|
||||
|
||||
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
|
||||
+ return -EINVAL;
|
||||
+
|
||||
/* only tcp dst ipv4 is meaningful, others are meaningless */
|
||||
fsp->flow_type = TCP_V4_FLOW;
|
||||
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
|
@ -0,0 +1,47 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Mon, 6 Jun 2022 21:49:00 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: enable rx cksum offload for
|
||||
MTK_NETSYS_V2
|
||||
|
||||
Enable rx checksum offload for mt7986 chipset.
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Link: https://lore.kernel.org/r/c8699805c18f7fd38315fcb8da2787676d83a32c.1654544585.git.lorenzo@kernel.org
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -1451,8 +1451,8 @@ static int mtk_poll_rx(struct napi_struc
|
||||
int done = 0, bytes = 0;
|
||||
|
||||
while (done < budget) {
|
||||
+ unsigned int pktlen, *rxdcsum;
|
||||
struct net_device *netdev;
|
||||
- unsigned int pktlen;
|
||||
dma_addr_t dma_addr;
|
||||
u32 hash, reason;
|
||||
int mac = 0;
|
||||
@@ -1516,7 +1516,13 @@ static int mtk_poll_rx(struct napi_struc
|
||||
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
|
||||
skb->dev = netdev;
|
||||
skb_put(skb, pktlen);
|
||||
- if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ rxdcsum = &trxd.rxd3;
|
||||
+ else
|
||||
+ rxdcsum = &trxd.rxd4;
|
||||
+
|
||||
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb_checksum_none_assert(skb);
|
||||
@@ -3739,6 +3745,7 @@ static const struct mtk_soc_data mt7986_
|
||||
.txd_size = sizeof(struct mtk_tx_dma_v2),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma_v2),
|
||||
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
|
||||
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||
.dma_len_offset = 8,
|
||||
},
|
@ -0,0 +1,61 @@
|
||||
From: Chen Lin <chen45464546@163.com>
|
||||
Date: Wed, 8 Jun 2022 20:46:53 +0800
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface
|
||||
netdev[napi]_alloc_frag
|
||||
|
||||
When rx_flag == MTK_RX_FLAGS_HWLRO,
|
||||
rx_data_len = MTK_MAX_LRO_RX_LENGTH(4096 * 3) > PAGE_SIZE.
|
||||
netdev_alloc_frag is for alloction of page fragment only.
|
||||
Reference to other drivers and Documentation/vm/page_frags.rst
|
||||
|
||||
Branch to use __get_free_pages when ring->frag_size > PAGE_SIZE.
|
||||
|
||||
Signed-off-by: Chen Lin <chen45464546@163.com>
|
||||
Link: https://lore.kernel.org/r/1654692413-2598-1-git-send-email-chen45464546@163.com
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -917,6 +917,17 @@ static bool mtk_rx_get_desc(struct mtk_e
|
||||
return true;
|
||||
}
|
||||
|
||||
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
|
||||
+{
|
||||
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
|
||||
+ unsigned long data;
|
||||
+
|
||||
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
|
||||
+ get_order(size));
|
||||
+
|
||||
+ return (void *)data;
|
||||
+}
|
||||
+
|
||||
/* the qdma core needs scratch memory to be setup */
|
||||
static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
{
|
||||
@@ -1485,7 +1496,10 @@ static int mtk_poll_rx(struct napi_struc
|
||||
goto release_desc;
|
||||
|
||||
/* alloc new buffer */
|
||||
- new_data = napi_alloc_frag(ring->frag_size);
|
||||
+ if (ring->frag_size <= PAGE_SIZE)
|
||||
+ new_data = napi_alloc_frag(ring->frag_size);
|
||||
+ else
|
||||
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
|
||||
if (unlikely(!new_data)) {
|
||||
netdev->stats.rx_dropped++;
|
||||
goto release_desc;
|
||||
@@ -1938,7 +1952,10 @@ static int mtk_rx_alloc(struct mtk_eth *
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rx_dma_size; i++) {
|
||||
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
+ if (ring->frag_size <= PAGE_SIZE)
|
||||
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
+ else
|
||||
+ ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
|
||||
if (!ring->data[i])
|
||||
return -ENOMEM;
|
||||
}
|
@ -10,17 +10,17 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -2186,8 +2186,8 @@ static irqreturn_t mtk_handle_irq_rx(int
|
||||
@@ -2443,8 +2443,8 @@ static irqreturn_t mtk_handle_irq_rx(int
|
||||
|
||||
eth->rx_events++;
|
||||
if (likely(napi_schedule_prep(ð->rx_napi))) {
|
||||
- __napi_schedule(ð->rx_napi);
|
||||
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
|
||||
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
+ __napi_schedule(ð->rx_napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -2199,8 +2199,8 @@ static irqreturn_t mtk_handle_irq_tx(int
|
||||
@@ -2456,8 +2456,8 @@ static irqreturn_t mtk_handle_irq_tx(int
|
||||
|
||||
eth->tx_events++;
|
||||
if (likely(napi_schedule_prep(ð->tx_napi))) {
|
||||
@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -3313,6 +3313,8 @@ static int mtk_probe(struct platform_dev
|
||||
@@ -3623,6 +3623,8 @@ static int mtk_probe(struct platform_dev
|
||||
* for NAPI to work
|
||||
*/
|
||||
init_dummy_netdev(ð->dummy_dev);
|
||||
|
@ -14,11 +14,11 @@ Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3364,6 +3364,7 @@ static const struct mtk_soc_data mt2701_
|
||||
@@ -3675,6 +3675,7 @@ static const struct mtk_soc_data mt2701_
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7623_CLKS_BITMAP,
|
||||
.required_pctl = true,
|
||||
+ .offload_version = 2,
|
||||
};
|
||||
|
||||
static const struct mtk_soc_data mt7621_data = {
|
||||
.txrx = {
|
||||
.txd_size = sizeof(struct mtk_tx_dma),
|
||||
.rxd_size = sizeof(struct mtk_rx_dma),
|
||||
|
@ -20,7 +20,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -106,13 +106,35 @@ static int _mtk_mdio_write(struct mtk_et
|
||||
@@ -196,13 +196,35 @@ static int _mtk_mdio_write(struct mtk_et
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -63,7 +63,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
|
||||
ret = mtk_mdio_busy_wait(eth);
|
||||
if (ret < 0)
|
||||
@@ -129,12 +151,33 @@ static int _mtk_mdio_read(struct mtk_eth
|
||||
@@ -219,12 +241,33 @@ static int _mtk_mdio_read(struct mtk_eth
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -103,7 +103,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
|
||||
ret = mtk_mdio_busy_wait(eth);
|
||||
if (ret < 0)
|
||||
@@ -593,6 +636,7 @@ static int mtk_mdio_init(struct mtk_eth
|
||||
@@ -683,6 +726,7 @@ static int mtk_mdio_init(struct mtk_eth
|
||||
eth->mii_bus->name = "mdio";
|
||||
eth->mii_bus->read = mtk_mdio_read;
|
||||
eth->mii_bus->write = mtk_mdio_write;
|
||||
@ -113,7 +113,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -349,9 +349,12 @@
|
||||
@@ -322,9 +322,12 @@
|
||||
#define PHY_IAC_ADDR_MASK GENMASK(24, 20)
|
||||
#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
|
||||
#define PHY_IAC_CMD_MASK GENMASK(19, 18)
|
||||
|
@ -1,6 +1,6 @@
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -577,6 +577,7 @@ static void mtk_validate(struct phylink_
|
||||
@@ -667,6 +667,7 @@ static void mtk_validate(struct phylink_
|
||||
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
|
||||
phylink_set(mask, 1000baseT_Full);
|
||||
phylink_set(mask, 1000baseX_Full);
|
||||
|
Loading…
Reference in New Issue
Block a user