mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-30 10:39:04 +00:00
521efb62eb
6c256218e59e wifi: mt76: dma: use napi_build_skb 679254c50f27 mt7915: add CONFIG_MT76_LEDS to cflags 15b9dd6b1b6a wifi: mt76: mt7915: call mt7915_mcu_set_thermal_throttling() only after init_work 8e5c21fe7c5c wifi: mt76: mt7915: rework mt7915_mcu_set_thermal_throttling 87cb74fe42d9 wifi: mt76: mt7915: rework mt7915_thermal_temp_store() c6f24b83eba5 wifi: mt76: mt7915: add error message in mt7915_thermal_set_cur_throttle_state() 99e96b89ee4d wifi: mt76: mt7915: add chip id condition in mt7915_check_eeprom() 833cd420480f wifi: mt76: mt7921: fix channel switch fail in monitor mode f1f8bae6092d wifi: mt76: mt7921: add ack signal support f47087a6dd62 wifi: mt76: mt7996: fix chainmask calculation in mt7996_set_antenna() 2f3b0acc1588 wifi: mt76: mt7996: update register for CFEND_RATE 7e9540dcbd70 wifi: mt76: mt7996: do not hardcode vht beamform cap a37e427d0959 wifi: mt76: connac: fix POWER_CTRL command name typo 98aa346042bd wifi: mt76: mt7915: remove BW160 and BW80+80 support 94fed6a43541 wifi: mt76: mt7921: fix invalid remain_on_channel duration 3c162384d80a wifi: mt76: introduce mt76_queue_is_wed_rx utility routine a409a9454587 wifi: mt76: mt7915: fix memory leak in mt7915_mcu_exit 8b27ecd3a684 wifi: mt76: mt7996: fix memory leak in mt7996_mcu_exit 683760461dd0 wifi: mt76: dma: free rx_head in mt76_dma_rx_cleanup 0c750cf08f85 wifi: mt76: dma: fix memory leak running mt76_dma_tx_cleanup 5de9ae29bea2 wifi: mt76: mt7915: avoid mcu_restart function pointer dad96dd3e62d wifi: mt76: mt7603: avoid mcu_restart function pointer 19d36dd9c8ea wifi: mt76: mt7615: avoid mcu_restart function pointer 6fe2c2383d3d wifi: mt76: mt7921: avoid mcu_restart function pointer 9df89143bf71 wifi: mt76: mt7915: get rid of wed rx_buf_ring page_frag_cache 8d51d11760cb wifi: mt76: fix switch default case in mt7996_reverse_frag0_hdr_trans 0d8057dbd51c wifi: mt76: mt7921u: add support for Comfast CF-952AX ddbf4e933d54 wifi: mt76: mt7915: set sku initial value to zero 06a8904e954e wifi: mt76: mt7915: wed: enable red per-band token drop 724a337caef9 wifi: mt76: mt7915: fix WED TxS reporting 747ca943a5bb wifi: mt76: add flexible polling wait-interval support 133d7859977a wifi: mt76: mt7921: reduce polling time in pmctrl 5fe319a0550e wifi: mt76: add memory barrier to SDIO queue kick 822f060b9d19 wifi: mt76: mt7921: fix rx filter incorrect by drv/fw inconsistent c6794954a723 wifi: mt76: mt7915: fix memory leak in mt7915_mmio_wed_init_rx_buf 9686cd7cc65c wifi: mt76: switch to page_pool allocator 04da4eaa8235 wifi: mt76: enable page_pool stats 1af4a911ebcb wifi: mt76: mt7915: release rxwi in mt7915_wed_release_rx_buf e8c10835cf06 wifi: mt76: fix compile error without CONFIG_PAGE_POOL_STATS 0cf0ede7cc42 net: ethernet: mtk_wed: add reset to rx_ring_setup callback 715b3ed9708a net: ethernet: mtk_wed: add reset to tx_ring_setup callback 9107381d0ff3 wifi: mt76: mt7921: fix error code of return in mt7921_acpi_read 36d2a5bf7802 wifi: mt76: mt7996: rely on mt76_connac2_mac_tx_rate_val c67f57d2cda2 wifi: mt76: dma: add reset to mt76_dma_wed_setup signature 3dace36e2941 wifi: mt76: dma: reset wed queues in mt76_dma_rx_reset 4b229d2da562 wifi: mt76: mt7915: add mt7915 wed reset callbacks f83958376085 wifi: mt76: mt7915: complete wed reset support 321edbb414dc wifi: mt76: mt7996: rely on mt76_connac_txp_common structure bdb7dc38a6d1 wifi: mt76: mt7996: rely on mt76_connac_txp_skb_unmap 8688756305c6 wifi: mt76: mt7996: rely on mt76_connac_tx_complete_skb fbf986dbd4c0 wifi: mt76: mt7996: rely on mt76_connac2_mac_decode_he_radiotap adc556cbce37 wifi: mt76: mt7996: avoid mcu_restart function pointer 5eb4e2303be4 wifi: mt76: remove __mt76_mcu_restart macro e7a61c5f70f5 wifi: mt76: add EHT phy type b375845abc10 wifi: mt76: connac: add CMD_CBW_320MHZ 68b17a243332 wifi: mt76: connac: add helpers for EHT capability 02ec1f61b3a2 wifi: mt76: connac: add cmd id related to EHT support 9209294cd81b wifi: mt76: increase wcid size to 1088 5e85136c9b2f wifi: mt76: add EHT rate stats for ethtool a171f672fdeb wifi: mt76: mt7996: add variants support eda8fd62c105 wifi: mt76: mt7996: add helpers for wtbl and interface limit 4a5a9f4cdc3b wifi: mt76: mt7996: rework capability init 06b73c155680 wifi: mt76: mt7996: add EHT capability init ae71a1b8294f wifi: mt76: mt7996: add support for EHT rate report 65bdfae2991d wifi: mt76: mt7996: enable EHT support in firmware b2360d59747c wifi: mt76: mt7996: add EHT beamforming support Signed-off-by: Felix Fietkau <nbd@nbd.name>
738 lines
18 KiB
Diff
738 lines
18 KiB
Diff
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
Date: Sat, 5 Nov 2022 23:36:19 +0100
|
|
Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support
|
|
|
|
Introduce WO chip support to mtk wed driver. MTK WED WO is used to
|
|
implement RX Wireless Ethernet Dispatch and offload traffic received by
|
|
wlan nic to the wired interface.
|
|
|
|
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
|
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
|
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
|
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
---
|
|
create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
|
|
|
--- a/drivers/net/ethernet/mediatek/Makefile
|
|
+++ b/drivers/net/ethernet/mediatek/Makefile
|
|
@@ -5,7 +5,7 @@
|
|
|
|
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
|
|
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
|
|
-mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
|
|
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
|
|
ifdef CONFIG_DEBUG_FS
|
|
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
|
|
endif
|
|
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
|
@@ -16,6 +16,7 @@
|
|
#include "mtk_wed_regs.h"
|
|
#include "mtk_wed.h"
|
|
#include "mtk_ppe.h"
|
|
+#include "mtk_wed_wo.h"
|
|
|
|
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
|
|
|
|
@@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de
|
|
|
|
mtk_wed_free_buffer(dev);
|
|
mtk_wed_free_tx_rings(dev);
|
|
+ if (hw->version != 1)
|
|
+ mtk_wed_wo_deinit(hw);
|
|
|
|
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
|
|
struct device_node *wlan_node;
|
|
@@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de
|
|
}
|
|
|
|
mtk_wed_hw_init_early(dev);
|
|
- if (hw->hifsys)
|
|
+ if (hw->version == 1)
|
|
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
|
|
BIT(hw->index), 0);
|
|
+ else
|
|
+ ret = mtk_wed_wo_init(hw);
|
|
|
|
out:
|
|
mutex_unlock(&hw_lock);
|
|
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/netdevice.h>
|
|
|
|
struct mtk_eth;
|
|
+struct mtk_wed_wo;
|
|
|
|
struct mtk_wed_hw {
|
|
struct device_node *node;
|
|
@@ -22,6 +23,7 @@ struct mtk_wed_hw {
|
|
struct regmap *mirror;
|
|
struct dentry *debugfs_dir;
|
|
struct mtk_wed_device *wed_dev;
|
|
+ struct mtk_wed_wo *wed_wo;
|
|
u32 debugfs_reg;
|
|
u32 num_flows;
|
|
u8 version;
|
|
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
|
@@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_
|
|
if (id == MTK_WED_MODULE_ID_WO)
|
|
hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
|
|
|
|
- dev_kfree_skb(skb);
|
|
- return 0;
|
|
+ return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
|
|
}
|
|
|
|
static int
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
|
@@ -0,0 +1,508 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-only
|
|
+/* Copyright (C) 2022 MediaTek Inc.
|
|
+ *
|
|
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
+ * Sujuan Chen <sujuan.chen@mediatek.com>
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/of_platform.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/mfd/syscon.h>
|
|
+#include <linux/of_irq.h>
|
|
+#include <linux/bitfield.h>
|
|
+
|
|
+#include "mtk_wed.h"
|
|
+#include "mtk_wed_regs.h"
|
|
+#include "mtk_wed_wo.h"
|
|
+
|
|
+static u32
|
|
+mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
|
|
+{
|
|
+ u32 val;
|
|
+
|
|
+ if (regmap_read(wo->mmio.regs, reg, &val))
|
|
+ val = ~0;
|
|
+
|
|
+ return val;
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
|
|
+{
|
|
+ regmap_write(wo->mmio.regs, reg, val);
|
|
+}
|
|
+
|
|
+static u32
|
|
+mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
|
|
+{
|
|
+ u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
|
|
+
|
|
+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
|
|
+{
|
|
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
|
|
+{
|
|
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&wo->mmio.lock, flags);
|
|
+ wo->mmio.irq_mask &= ~mask;
|
|
+ wo->mmio.irq_mask |= val;
|
|
+ if (set)
|
|
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
|
|
+ spin_unlock_irqrestore(&wo->mmio.lock, flags);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
|
|
+{
|
|
+ mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
|
|
+ tasklet_schedule(&wo->mmio.irq_tasklet);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
|
|
+{
|
|
+ mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
|
|
+{
|
|
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
|
|
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
|
+ u32 val)
|
|
+{
|
|
+ wmb();
|
|
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
|
|
+}
|
|
+
|
|
+static void *
|
|
+mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
|
|
+ bool flush)
|
|
+{
|
|
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
|
|
+ int index = (q->tail + 1) % q->n_desc;
|
|
+ struct mtk_wed_wo_queue_entry *entry;
|
|
+ struct mtk_wed_wo_queue_desc *desc;
|
|
+ void *buf;
|
|
+
|
|
+ if (!q->queued)
|
|
+ return NULL;
|
|
+
|
|
+ if (flush)
|
|
+ q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
|
|
+ else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
|
|
+ return NULL;
|
|
+
|
|
+ q->tail = index;
|
|
+ q->queued--;
|
|
+
|
|
+ desc = &q->desc[index];
|
|
+ entry = &q->entry[index];
|
|
+ buf = entry->buf;
|
|
+ if (len)
|
|
+ *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
|
|
+ le32_to_cpu(READ_ONCE(desc->ctrl)));
|
|
+ if (buf)
|
|
+ dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
|
|
+ DMA_FROM_DEVICE);
|
|
+ entry->buf = NULL;
|
|
+
|
|
+ return buf;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
|
+ gfp_t gfp, bool rx)
|
|
+{
|
|
+ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
|
+ int n_buf = 0;
|
|
+
|
|
+ spin_lock_bh(&q->lock);
|
|
+ while (q->queued < q->n_desc) {
|
|
+ void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
|
|
+ struct mtk_wed_wo_queue_entry *entry;
|
|
+ dma_addr_t addr;
|
|
+
|
|
+ if (!buf)
|
|
+ break;
|
|
+
|
|
+ addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
|
|
+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
|
|
+ skb_free_frag(buf);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ q->head = (q->head + 1) % q->n_desc;
|
|
+ entry = &q->entry[q->head];
|
|
+ entry->addr = addr;
|
|
+ entry->len = q->buf_size;
|
|
+ q->entry[q->head].buf = buf;
|
|
+
|
|
+ if (rx) {
|
|
+ struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
|
|
+ u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
|
|
+ FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
|
|
+ entry->len);
|
|
+
|
|
+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
|
|
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
|
|
+ }
|
|
+ q->queued++;
|
|
+ n_buf++;
|
|
+ }
|
|
+ spin_unlock_bh(&q->lock);
|
|
+
|
|
+ return n_buf;
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
|
|
+{
|
|
+ mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
|
|
+ mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
|
+{
|
|
+ for (;;) {
|
|
+ struct mtk_wed_mcu_hdr *hdr;
|
|
+ struct sk_buff *skb;
|
|
+ void *data;
|
|
+ u32 len;
|
|
+
|
|
+ data = mtk_wed_wo_dequeue(wo, q, &len, false);
|
|
+ if (!data)
|
|
+ break;
|
|
+
|
|
+ skb = build_skb(data, q->buf_size);
|
|
+ if (!skb) {
|
|
+ skb_free_frag(data);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ __skb_put(skb, len);
|
|
+ if (mtk_wed_mcu_check_msg(wo, skb)) {
|
|
+ dev_kfree_skb(skb);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
|
|
+ if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
|
|
+ mtk_wed_mcu_rx_event(wo, skb);
|
|
+ else
|
|
+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
|
|
+ }
|
|
+
|
|
+ if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
|
|
+ u32 index = (q->head - 1) % q->n_desc;
|
|
+
|
|
+ mtk_wed_wo_queue_kick(wo, q, index);
|
|
+ }
|
|
+}
|
|
+
|
|
+static irqreturn_t
|
|
+mtk_wed_wo_irq_handler(int irq, void *data)
|
|
+{
|
|
+ struct mtk_wed_wo *wo = data;
|
|
+
|
|
+ mtk_wed_wo_set_isr(wo, 0);
|
|
+ tasklet_schedule(&wo->mmio.irq_tasklet);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
|
|
+{
|
|
+ struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
|
|
+ u32 intr, mask;
|
|
+
|
|
+ /* disable interrupts */
|
|
+ mtk_wed_wo_set_isr(wo, 0);
|
|
+
|
|
+ intr = mtk_wed_wo_get_isr(wo);
|
|
+ intr &= wo->mmio.irq_mask;
|
|
+ mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
|
|
+ mtk_wed_wo_irq_disable(wo, mask);
|
|
+
|
|
+ if (intr & MTK_WED_WO_RXCH_INT_MASK) {
|
|
+ mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
|
|
+ mtk_wed_wo_rx_complete(wo);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* mtk wed wo hw queues */
|
|
+
|
|
+static int
|
|
+mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
|
+ int n_desc, int buf_size, int index,
|
|
+ struct mtk_wed_wo_queue_regs *regs)
|
|
+{
|
|
+ spin_lock_init(&q->lock);
|
|
+ q->regs = *regs;
|
|
+ q->n_desc = n_desc;
|
|
+ q->buf_size = buf_size;
|
|
+
|
|
+ q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
|
|
+ &q->desc_dma, GFP_KERNEL);
|
|
+ if (!q->desc)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
|
|
+ GFP_KERNEL);
|
|
+ if (!q->entry)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
|
+{
|
|
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
|
|
+ dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
|
|
+ q->desc_dma);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
|
+{
|
|
+ struct page *page;
|
|
+ int i;
|
|
+
|
|
+ spin_lock_bh(&q->lock);
|
|
+ for (i = 0; i < q->n_desc; i++) {
|
|
+ struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
|
|
+
|
|
+ dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
|
|
+ DMA_TO_DEVICE);
|
|
+ skb_free_frag(entry->buf);
|
|
+ entry->buf = NULL;
|
|
+ }
|
|
+ spin_unlock_bh(&q->lock);
|
|
+
|
|
+ if (!q->cache.va)
|
|
+ return;
|
|
+
|
|
+ page = virt_to_page(q->cache.va);
|
|
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
|
|
+ memset(&q->cache, 0, sizeof(q->cache));
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
|
+{
|
|
+ struct page *page;
|
|
+
|
|
+ spin_lock_bh(&q->lock);
|
|
+ for (;;) {
|
|
+ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
|
|
+
|
|
+ if (!buf)
|
|
+ break;
|
|
+
|
|
+ skb_free_frag(buf);
|
|
+ }
|
|
+ spin_unlock_bh(&q->lock);
|
|
+
|
|
+ if (!q->cache.va)
|
|
+ return;
|
|
+
|
|
+ page = virt_to_page(q->cache.va);
|
|
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
|
|
+ memset(&q->cache, 0, sizeof(q->cache));
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
|
+{
|
|
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
|
|
+ mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
|
|
+ mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
|
|
+}
|
|
+
|
|
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
|
+ struct sk_buff *skb)
|
|
+{
|
|
+ struct mtk_wed_wo_queue_entry *entry;
|
|
+ struct mtk_wed_wo_queue_desc *desc;
|
|
+ int ret = 0, index;
|
|
+ u32 ctrl;
|
|
+
|
|
+ spin_lock_bh(&q->lock);
|
|
+
|
|
+ q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
|
|
+ index = (q->head + 1) % q->n_desc;
|
|
+ if (q->tail == index) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ entry = &q->entry[index];
|
|
+ if (skb->len > entry->len) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ desc = &q->desc[index];
|
|
+ q->head = index;
|
|
+
|
|
+ dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
|
|
+ DMA_TO_DEVICE);
|
|
+ memcpy(entry->buf, skb->data, skb->len);
|
|
+ dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
|
|
+ DMA_TO_DEVICE);
|
|
+
|
|
+ ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
|
|
+ MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
|
|
+ WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
|
|
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
|
|
+
|
|
+ mtk_wed_wo_queue_kick(wo, q, q->head);
|
|
+ mtk_wed_wo_kickout(wo);
|
|
+out:
|
|
+ spin_unlock_bh(&q->lock);
|
|
+
|
|
+ dev_kfree_skb(skb);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
|
|
+{
|
|
+ struct mtk_wed_wo_queue_regs regs;
|
|
+ struct device_node *np;
|
|
+ int ret;
|
|
+
|
|
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
|
|
+ if (!np)
|
|
+ return -ENODEV;
|
|
+
|
|
+ wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
|
|
+ if (IS_ERR_OR_NULL(wo->mmio.regs))
|
|
+ return PTR_ERR(wo->mmio.regs);
|
|
+
|
|
+ wo->mmio.irq = irq_of_parse_and_map(np, 0);
|
|
+ wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
|
|
+ spin_lock_init(&wo->mmio.lock);
|
|
+ tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
|
|
+
|
|
+ ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
|
|
+ mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
|
|
+ KBUILD_MODNAME, wo);
|
|
+ if (ret)
|
|
+ goto error;
|
|
+
|
|
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
|
|
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
|
|
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
|
|
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
|
|
+
|
|
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
|
|
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
|
|
+ ®s);
|
|
+ if (ret)
|
|
+ goto error;
|
|
+
|
|
+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
|
|
+ mtk_wed_wo_queue_reset(wo, &wo->q_tx);
|
|
+
|
|
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
|
|
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
|
|
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
|
|
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
|
|
+
|
|
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
|
|
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
|
|
+ ®s);
|
|
+ if (ret)
|
|
+ goto error;
|
|
+
|
|
+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
|
|
+ mtk_wed_wo_queue_reset(wo, &wo->q_rx);
|
|
+
|
|
+ /* rx queue irqmask */
|
|
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+error:
|
|
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
|
|
+{
|
|
+ /* disable interrupts */
|
|
+ mtk_wed_wo_set_isr(wo, 0);
|
|
+
|
|
+ tasklet_disable(&wo->mmio.irq_tasklet);
|
|
+
|
|
+ disable_irq(wo->mmio.irq);
|
|
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
|
|
+
|
|
+ mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
|
|
+ mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
|
|
+ mtk_wed_wo_queue_free(wo, &wo->q_tx);
|
|
+ mtk_wed_wo_queue_free(wo, &wo->q_rx);
|
|
+}
|
|
+
|
|
+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
|
|
+{
|
|
+ struct mtk_wed_wo *wo;
|
|
+ int ret;
|
|
+
|
|
+ wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
|
|
+ if (!wo)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ hw->wed_wo = wo;
|
|
+ wo->hw = hw;
|
|
+
|
|
+ ret = mtk_wed_wo_hardware_init(wo);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = mtk_wed_mcu_init(wo);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return mtk_wed_wo_exception_init(wo);
|
|
+}
|
|
+
|
|
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
|
|
+{
|
|
+ struct mtk_wed_wo *wo = hw->wed_wo;
|
|
+
|
|
+ mtk_wed_wo_hw_deinit(wo);
|
|
+}
|
|
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
|
@@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx {
|
|
#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
|
|
#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
|
|
|
|
+#define MTK_WED_WO_RING_SIZE 256
|
|
+#define MTK_WED_WO_CMD_LEN 1504
|
|
+
|
|
+#define MTK_WED_WO_TXCH_NUM 0
|
|
+#define MTK_WED_WO_RXCH_NUM 1
|
|
+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
|
|
+
|
|
+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
|
|
+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
|
|
+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
|
|
+#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
|
|
+ MTK_WED_WO_EXCEPTION_INT_MASK)
|
|
+
|
|
+#define MTK_WED_WO_CCIF_BUSY 0x004
|
|
+#define MTK_WED_WO_CCIF_START 0x008
|
|
+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
|
|
+#define MTK_WED_WO_CCIF_RCHNUM 0x010
|
|
+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
|
|
+
|
|
+#define MTK_WED_WO_CCIF_ACK 0x014
|
|
+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
|
|
+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
|
|
+#define MTK_WED_WO_CCIF_DUMMY1 0x020
|
|
+#define MTK_WED_WO_CCIF_DUMMY2 0x024
|
|
+#define MTK_WED_WO_CCIF_DUMMY3 0x028
|
|
+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
|
|
+#define MTK_WED_WO_CCIF_SHADOW1 0x030
|
|
+#define MTK_WED_WO_CCIF_SHADOW2 0x034
|
|
+#define MTK_WED_WO_CCIF_SHADOW3 0x038
|
|
+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
|
|
+#define MTK_WED_WO_CCIF_DUMMY5 0x050
|
|
+#define MTK_WED_WO_CCIF_DUMMY6 0x054
|
|
+#define MTK_WED_WO_CCIF_DUMMY7 0x058
|
|
+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
|
|
+#define MTK_WED_WO_CCIF_SHADOW5 0x060
|
|
+#define MTK_WED_WO_CCIF_SHADOW6 0x064
|
|
+#define MTK_WED_WO_CCIF_SHADOW7 0x068
|
|
+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
|
|
+
|
|
+#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
|
|
+#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
|
|
+#define MTK_WED_WO_CTL_BURST BIT(15)
|
|
+#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
|
|
+#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
|
|
+#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
|
|
+#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
|
|
+#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
|
|
+
|
|
struct mtk_wed_wo_memory_region {
|
|
const char *name;
|
|
void __iomem *addr;
|
|
@@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer {
|
|
u32 crc;
|
|
};
|
|
|
|
+struct mtk_wed_wo_queue_regs {
|
|
+ u32 desc_base;
|
|
+ u32 ring_size;
|
|
+ u32 cpu_idx;
|
|
+ u32 dma_idx;
|
|
+};
|
|
+
|
|
+struct mtk_wed_wo_queue_desc {
|
|
+ __le32 buf0;
|
|
+ __le32 ctrl;
|
|
+ __le32 buf1;
|
|
+ __le32 info;
|
|
+ __le32 reserved[4];
|
|
+} __packed __aligned(32);
|
|
+
|
|
+struct mtk_wed_wo_queue_entry {
|
|
+ dma_addr_t addr;
|
|
+ void *buf;
|
|
+ u32 len;
|
|
+};
|
|
+
|
|
+struct mtk_wed_wo_queue {
|
|
+ struct mtk_wed_wo_queue_regs regs;
|
|
+
|
|
+ struct page_frag_cache cache;
|
|
+ spinlock_t lock;
|
|
+
|
|
+ struct mtk_wed_wo_queue_desc *desc;
|
|
+ dma_addr_t desc_dma;
|
|
+
|
|
+ struct mtk_wed_wo_queue_entry *entry;
|
|
+
|
|
+ u16 head;
|
|
+ u16 tail;
|
|
+ int n_desc;
|
|
+ int queued;
|
|
+ int buf_size;
|
|
+
|
|
+};
|
|
+
|
|
struct mtk_wed_wo {
|
|
struct mtk_wed_hw *hw;
|
|
struct mtk_wed_wo_memory_region boot;
|
|
|
|
+ struct mtk_wed_wo_queue q_tx;
|
|
+ struct mtk_wed_wo_queue q_rx;
|
|
+
|
|
struct {
|
|
struct mutex mutex;
|
|
int timeout;
|
|
@@ -124,6 +215,15 @@ struct mtk_wed_wo {
|
|
struct sk_buff_head res_q;
|
|
wait_queue_head_t wait;
|
|
} mcu;
|
|
+
|
|
+ struct {
|
|
+ struct regmap *regs;
|
|
+
|
|
+ spinlock_t lock;
|
|
+ struct tasklet_struct irq_tasklet;
|
|
+ int irq;
|
|
+ u32 irq_mask;
|
|
+ } mmio;
|
|
};
|
|
|
|
static inline int
|
|
@@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st
|
|
int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
|
|
const void *data, int len, bool wait_resp);
|
|
int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
|
|
+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
|
|
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
|
|
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
|
|
+ struct sk_buff *skb);
|
|
|
|
#endif /* __MTK_WED_WO_H */
|