mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-22 06:57:57 +00:00
7efec0acca
Manually rebased:
bcm4908/patches-5.15/071-v6.1-0001-net-broadcom-bcm4908_enet-handle-EPROBE_DEFER-when-g.patch
bcm53xx/patches-5.15/180-usb-xhci-add-support-for-performing-fake-doorbell.patch
ipq40xx/patches-5.15/902-dts-ipq4019-ap-dk04.1.patch[*]
bcm27xx/patches-5.15/950-0600-xhci-quirks-add-link-TRB-quirk-for-VL805.patch
bcm27xx/patches-5.15/950-0606-usb-xhci-add-VLI_TRB_CACHE_BUG-quirk.patch
bcm27xx/patches-5.15/950-0717-usb-xhci-add-a-quirk-for-Superspeed-bulk-OUT-transfe.patch
Removed upstreamed:
backport-5.15/735-v6.5-net-bgmac-postpone-turning-IRQs-off-to-avoid-SoC-han.patch[1]
backport-5.15/817-v6.5-01-leds-trigger-netdev-Recheck-NETDEV_LED_MODE_LINKUP-o.patch[2]
pending-5.15/143-jffs2-reduce-stack-usage-in-jffs2_build_xattr_subsys.patch[3]
pending-5.15/160-workqueue-fix-enum-type-for-gcc-13.patch[4]
bcm53xx/patches-5.15/036-v6.5-0003-ARM-dts-BCM5301X-Drop-clock-names-from-the-SPI-node.patch[5]
bcm53xx/patches-5.15/036-v6.5-0015-ARM-dts-BCM5301X-fix-duplex-full-full-duplex.patch[6]
ipq807x/patches-5.15/0048-v6.1-clk-qcom-reset-Allow-specifying-custom-reset-delay.patch[7]
ipq807x/patches-5.15/0049-v6.2-clk-qcom-reset-support-resetting-multiple-bits.patch[8]
All other patches automatically rebased.
1. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=02474292a44205c1eb5a03634ead155a3c9134f4
2. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=86b93cbfe104e99fd3d25a49748b99fb88101573
3. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=79b9ab357b6f5675007f4c02ff8765cbd8dc06a2
4. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=d528faa9e828b9fc46dfb684a2a9fd8c2e860ed8
5. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=5899bc4058e89d5110a23797ff94439c53b77c25
6. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=95afd2c7c7d26087730dc938709e025a303e5499
7. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=40844343a8853a08b049d50c967e2a1e28f0ece6
8. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.15.123&id=6ad5ded420f5d96f7c65b68135f5787a1c7e58d7
Build system: x86/64
Build-tested: ramips/tplink_archer-a6-v3
Run-tested: ramips/tplink_archer-a6-v3
Co-authored-by: Hauke Mehrtens <hauke@hauke-m.de>
Signed-off-by: John Audia <therealgraysky@proton.me>
[rebased ipq40xx/patches-5.15/902-dts-ipq4019-ap-dk04.1.patch ]
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
(cherry picked from commit 8590531048
)
[Refreshed on top of openwrt-23.05]
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
1162 lines
38 KiB
Diff
1162 lines
38 KiB
Diff
From ba39b344e9240a4a5fd4ab8178200b85cd1809da Mon Sep 17 00:00:00 2001
|
|
From: Christian Marangi <ansuelsmth@gmail.com>
|
|
Date: Sat, 23 Jul 2022 16:29:32 +0200
|
|
Subject: [PATCH 4/5] net: ethernet: stmicro: stmmac: generate stmmac dma conf
|
|
before open
|
|
|
|
Rework the driver to generate the stmmac dma_conf before stmmac_open.
|
|
This permits a function to first check if it's possible to allocate a
|
|
new dma_config and then pass it directly to __stmmac_open and "open" the
|
|
interface with the new configuration.
|
|
|
|
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
|
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
|
---
|
|
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 462 +++++++++++-------
|
|
1 file changed, 289 insertions(+), 173 deletions(-)
|
|
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
@@ -1300,7 +1300,8 @@ static int stmmac_phy_setup(struct stmma
|
|
return 0;
|
|
}
|
|
|
|
-static void stmmac_display_rx_rings(struct stmmac_priv *priv)
|
|
+static void stmmac_display_rx_rings(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
u32 rx_cnt = priv->plat->rx_queues_to_use;
|
|
unsigned int desc_size;
|
|
@@ -1309,7 +1310,7 @@ static void stmmac_display_rx_rings(stru
|
|
|
|
/* Display RX rings */
|
|
for (queue = 0; queue < rx_cnt; queue++) {
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
|
|
pr_info("\tRX Queue %u rings\n", queue);
|
|
|
|
@@ -1322,12 +1323,13 @@ static void stmmac_display_rx_rings(stru
|
|
}
|
|
|
|
/* Display RX ring */
|
|
- stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true,
|
|
+ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
|
|
rx_q->dma_rx_phy, desc_size);
|
|
}
|
|
}
|
|
|
|
-static void stmmac_display_tx_rings(struct stmmac_priv *priv)
|
|
+static void stmmac_display_tx_rings(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
u32 tx_cnt = priv->plat->tx_queues_to_use;
|
|
unsigned int desc_size;
|
|
@@ -1336,7 +1338,7 @@ static void stmmac_display_tx_rings(stru
|
|
|
|
/* Display TX rings */
|
|
for (queue = 0; queue < tx_cnt; queue++) {
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
|
|
|
|
pr_info("\tTX Queue %d rings\n", queue);
|
|
|
|
@@ -1351,18 +1353,19 @@ static void stmmac_display_tx_rings(stru
|
|
desc_size = sizeof(struct dma_desc);
|
|
}
|
|
|
|
- stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false,
|
|
+ stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
|
|
tx_q->dma_tx_phy, desc_size);
|
|
}
|
|
}
|
|
|
|
-static void stmmac_display_rings(struct stmmac_priv *priv)
|
|
+static void stmmac_display_rings(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
/* Display RX ring */
|
|
- stmmac_display_rx_rings(priv);
|
|
+ stmmac_display_rx_rings(priv, dma_conf);
|
|
|
|
/* Display TX ring */
|
|
- stmmac_display_tx_rings(priv);
|
|
+ stmmac_display_tx_rings(priv, dma_conf);
|
|
}
|
|
|
|
static int stmmac_set_bfsize(int mtu, int bufsize)
|
|
@@ -1386,44 +1389,50 @@ static int stmmac_set_bfsize(int mtu, in
|
|
/**
|
|
* stmmac_clear_rx_descriptors - clear RX descriptors
|
|
* @priv: driver private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: RX queue index
|
|
* Description: this function is called to clear the RX descriptors
|
|
* in case of both basic and extended descriptors are used.
|
|
*/
|
|
-static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
|
|
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
int i;
|
|
|
|
/* Clear the RX descriptors */
|
|
- for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
|
|
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
|
|
if (priv->extend_desc)
|
|
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
|
|
priv->use_riwt, priv->mode,
|
|
- (i == priv->dma_conf.dma_rx_size - 1),
|
|
- priv->dma_conf.dma_buf_sz);
|
|
+ (i == dma_conf->dma_rx_size - 1),
|
|
+ dma_conf->dma_buf_sz);
|
|
else
|
|
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
|
|
priv->use_riwt, priv->mode,
|
|
- (i == priv->dma_conf.dma_rx_size - 1),
|
|
- priv->dma_conf.dma_buf_sz);
|
|
+ (i == dma_conf->dma_rx_size - 1),
|
|
+ dma_conf->dma_buf_sz);
|
|
}
|
|
|
|
/**
|
|
* stmmac_clear_tx_descriptors - clear tx descriptors
|
|
* @priv: driver private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: TX queue index.
|
|
* Description: this function is called to clear the TX descriptors
|
|
* in case of both basic and extended descriptors are used.
|
|
*/
|
|
-static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
|
|
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
|
|
int i;
|
|
|
|
/* Clear the TX descriptors */
|
|
- for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
|
|
- int last = (i == (priv->dma_conf.dma_tx_size - 1));
|
|
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
|
|
+ int last = (i == (dma_conf->dma_tx_size - 1));
|
|
struct dma_desc *p;
|
|
|
|
if (priv->extend_desc)
|
|
@@ -1440,10 +1449,12 @@ static void stmmac_clear_tx_descriptors(
|
|
/**
|
|
* stmmac_clear_descriptors - clear descriptors
|
|
* @priv: driver private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* Description: this function is called to clear the TX and RX descriptors
|
|
* in case of both basic and extended descriptors are used.
|
|
*/
|
|
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
|
|
+static void stmmac_clear_descriptors(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
|
|
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
|
|
@@ -1451,16 +1462,17 @@ static void stmmac_clear_descriptors(str
|
|
|
|
/* Clear the RX descriptors */
|
|
for (queue = 0; queue < rx_queue_cnt; queue++)
|
|
- stmmac_clear_rx_descriptors(priv, queue);
|
|
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
|
|
|
|
/* Clear the TX descriptors */
|
|
for (queue = 0; queue < tx_queue_cnt; queue++)
|
|
- stmmac_clear_tx_descriptors(priv, queue);
|
|
+ stmmac_clear_tx_descriptors(priv, dma_conf, queue);
|
|
}
|
|
|
|
/**
|
|
* stmmac_init_rx_buffers - init the RX descriptor buffer.
|
|
* @priv: driver private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @p: descriptor pointer
|
|
* @i: descriptor index
|
|
* @flags: gfp flag
|
|
@@ -1468,10 +1480,12 @@ static void stmmac_clear_descriptors(str
|
|
* Description: this function is called to allocate a receive buffer, perform
|
|
* the DMA mapping and init the descriptor.
|
|
*/
|
|
-static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
|
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ struct dma_desc *p,
|
|
int i, gfp_t flags, u32 queue)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
|
|
|
|
if (!buf->page) {
|
|
@@ -1496,7 +1510,7 @@ static int stmmac_init_rx_buffers(struct
|
|
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
|
|
|
|
stmmac_set_desc_addr(priv, p, buf->addr);
|
|
- if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
|
|
+ if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
|
|
stmmac_init_desc3(priv, p);
|
|
|
|
return 0;
|
|
@@ -1505,12 +1519,13 @@ static int stmmac_init_rx_buffers(struct
|
|
/**
|
|
* stmmac_free_rx_buffer - free RX dma buffers
|
|
* @priv: private structure
|
|
- * @queue: RX queue index
|
|
+ * @rx_q: RX queue
|
|
* @i: buffer index.
|
|
*/
|
|
-static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
|
|
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
|
|
+ struct stmmac_rx_queue *rx_q,
|
|
+ int i)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
|
|
|
|
if (buf->page)
|
|
@@ -1525,12 +1540,15 @@ static void stmmac_free_rx_buffer(struct
|
|
/**
|
|
* stmmac_free_tx_buffer - free RX dma buffers
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: RX queue index
|
|
* @i: buffer index.
|
|
*/
|
|
-static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
|
|
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue, int i)
|
|
{
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
|
|
|
|
if (tx_q->tx_skbuff_dma[i].buf &&
|
|
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
|
|
@@ -1569,23 +1587,28 @@ static void stmmac_free_tx_buffer(struct
|
|
/**
|
|
* dma_free_rx_skbufs - free RX dma buffers
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: RX queue index
|
|
*/
|
|
-static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
|
|
+static void dma_free_rx_skbufs(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
int i;
|
|
|
|
- for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
|
|
- stmmac_free_rx_buffer(priv, queue, i);
|
|
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
|
|
+ stmmac_free_rx_buffer(priv, rx_q, i);
|
|
}
|
|
|
|
-static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
|
|
- gfp_t flags)
|
|
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue, gfp_t flags)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
int i;
|
|
|
|
- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
|
|
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
|
|
struct dma_desc *p;
|
|
int ret;
|
|
|
|
@@ -1594,7 +1617,7 @@ static int stmmac_alloc_rx_buffers(struc
|
|
else
|
|
p = rx_q->dma_rx + i;
|
|
|
|
- ret = stmmac_init_rx_buffers(priv, p, i, flags,
|
|
+ ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
|
|
queue);
|
|
if (ret)
|
|
return ret;
|
|
@@ -1608,14 +1631,17 @@ static int stmmac_alloc_rx_buffers(struc
|
|
/**
|
|
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: RX queue index
|
|
*/
|
|
-static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
|
|
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
int i;
|
|
|
|
- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
|
|
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
|
|
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
|
|
|
|
if (!buf->xdp)
|
|
@@ -1626,12 +1652,14 @@ static void dma_free_rx_xskbufs(struct s
|
|
}
|
|
}
|
|
|
|
-static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
|
|
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
int i;
|
|
|
|
- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
|
|
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
|
|
struct stmmac_rx_buffer *buf;
|
|
dma_addr_t dma_addr;
|
|
struct dma_desc *p;
|
|
@@ -1666,22 +1694,25 @@ static struct xsk_buff_pool *stmmac_get_
|
|
/**
|
|
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
|
|
* @priv: driver private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: RX queue index
|
|
* @flags: gfp flag.
|
|
* Description: this function initializes the DMA RX descriptors
|
|
* and allocates the socket buffers. It supports the chained and ring
|
|
* modes.
|
|
*/
|
|
-static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
|
|
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue, gfp_t flags)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
int ret;
|
|
|
|
netif_dbg(priv, probe, priv->dev,
|
|
"(%s) dma_rx_phy=0x%08x\n", __func__,
|
|
(u32)rx_q->dma_rx_phy);
|
|
|
|
- stmmac_clear_rx_descriptors(priv, queue);
|
|
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
|
|
|
|
xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
|
|
|
|
@@ -1708,9 +1739,9 @@ static int __init_dma_rx_desc_rings(stru
|
|
/* RX XDP ZC buffer pool may not be populated, e.g.
|
|
* xdpsock TX-only.
|
|
*/
|
|
- stmmac_alloc_rx_buffers_zc(priv, queue);
|
|
+ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
|
|
} else {
|
|
- ret = stmmac_alloc_rx_buffers(priv, queue, flags);
|
|
+ ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
|
|
if (ret < 0)
|
|
return -ENOMEM;
|
|
}
|
|
@@ -1720,17 +1751,19 @@ static int __init_dma_rx_desc_rings(stru
|
|
if (priv->extend_desc)
|
|
stmmac_mode_init(priv, rx_q->dma_erx,
|
|
rx_q->dma_rx_phy,
|
|
- priv->dma_conf.dma_rx_size, 1);
|
|
+ dma_conf->dma_rx_size, 1);
|
|
else
|
|
stmmac_mode_init(priv, rx_q->dma_rx,
|
|
rx_q->dma_rx_phy,
|
|
- priv->dma_conf.dma_rx_size, 0);
|
|
+ dma_conf->dma_rx_size, 0);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
|
|
+static int init_dma_rx_desc_rings(struct net_device *dev,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ gfp_t flags)
|
|
{
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
u32 rx_count = priv->plat->rx_queues_to_use;
|
|
@@ -1742,7 +1775,7 @@ static int init_dma_rx_desc_rings(struct
|
|
"SKB addresses:\nskb\t\tskb data\tdma data\n");
|
|
|
|
for (queue = 0; queue < rx_count; queue++) {
|
|
- ret = __init_dma_rx_desc_rings(priv, queue, flags);
|
|
+ ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
|
|
if (ret)
|
|
goto err_init_rx_buffers;
|
|
}
|
|
@@ -1751,12 +1784,12 @@ static int init_dma_rx_desc_rings(struct
|
|
|
|
err_init_rx_buffers:
|
|
while (queue >= 0) {
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
|
|
if (rx_q->xsk_pool)
|
|
- dma_free_rx_xskbufs(priv, queue);
|
|
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
|
|
else
|
|
- dma_free_rx_skbufs(priv, queue);
|
|
+ dma_free_rx_skbufs(priv, dma_conf, queue);
|
|
|
|
rx_q->buf_alloc_num = 0;
|
|
rx_q->xsk_pool = NULL;
|
|
@@ -1773,14 +1806,17 @@ err_init_rx_buffers:
|
|
/**
|
|
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
|
|
* @priv: driver private structure
|
|
- * @queue : TX queue index
|
|
+ * @dma_conf: structure to take the dma data
|
|
+ * @queue: TX queue index
|
|
* Description: this function initializes the DMA TX descriptors
|
|
* and allocates the socket buffers. It supports the chained and ring
|
|
* modes.
|
|
*/
|
|
-static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
|
|
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
|
|
int i;
|
|
|
|
netif_dbg(priv, probe, priv->dev,
|
|
@@ -1792,16 +1828,16 @@ static int __init_dma_tx_desc_rings(stru
|
|
if (priv->extend_desc)
|
|
stmmac_mode_init(priv, tx_q->dma_etx,
|
|
tx_q->dma_tx_phy,
|
|
- priv->dma_conf.dma_tx_size, 1);
|
|
+ dma_conf->dma_tx_size, 1);
|
|
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
|
|
stmmac_mode_init(priv, tx_q->dma_tx,
|
|
tx_q->dma_tx_phy,
|
|
- priv->dma_conf.dma_tx_size, 0);
|
|
+ dma_conf->dma_tx_size, 0);
|
|
}
|
|
|
|
tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
|
|
|
|
- for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
|
|
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
|
|
struct dma_desc *p;
|
|
|
|
if (priv->extend_desc)
|
|
@@ -1823,7 +1859,8 @@ static int __init_dma_tx_desc_rings(stru
|
|
return 0;
|
|
}
|
|
|
|
-static int init_dma_tx_desc_rings(struct net_device *dev)
|
|
+static int init_dma_tx_desc_rings(struct net_device *dev,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
u32 tx_queue_cnt;
|
|
@@ -1832,7 +1869,7 @@ static int init_dma_tx_desc_rings(struct
|
|
tx_queue_cnt = priv->plat->tx_queues_to_use;
|
|
|
|
for (queue = 0; queue < tx_queue_cnt; queue++)
|
|
- __init_dma_tx_desc_rings(priv, queue);
|
|
+ __init_dma_tx_desc_rings(priv, dma_conf, queue);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1840,26 +1877,29 @@ static int init_dma_tx_desc_rings(struct
|
|
/**
|
|
* init_dma_desc_rings - init the RX/TX descriptor rings
|
|
* @dev: net device structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @flags: gfp flag.
|
|
* Description: this function initializes the DMA RX/TX descriptors
|
|
* and allocates the socket buffers. It supports the chained and ring
|
|
* modes.
|
|
*/
|
|
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
|
|
+static int init_dma_desc_rings(struct net_device *dev,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ gfp_t flags)
|
|
{
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
int ret;
|
|
|
|
- ret = init_dma_rx_desc_rings(dev, flags);
|
|
+ ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- ret = init_dma_tx_desc_rings(dev);
|
|
+ ret = init_dma_tx_desc_rings(dev, dma_conf);
|
|
|
|
- stmmac_clear_descriptors(priv);
|
|
+ stmmac_clear_descriptors(priv, dma_conf);
|
|
|
|
if (netif_msg_hw(priv))
|
|
- stmmac_display_rings(priv);
|
|
+ stmmac_display_rings(priv, dma_conf);
|
|
|
|
return ret;
|
|
}
|
|
@@ -1867,17 +1907,20 @@ static int init_dma_desc_rings(struct ne
|
|
/**
|
|
* dma_free_tx_skbufs - free TX dma buffers
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: TX queue index
|
|
*/
|
|
-static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
|
|
+static void dma_free_tx_skbufs(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
|
|
int i;
|
|
|
|
tx_q->xsk_frames_done = 0;
|
|
|
|
- for (i = 0; i < priv->dma_conf.dma_tx_size; i++)
|
|
- stmmac_free_tx_buffer(priv, queue, i);
|
|
+ for (i = 0; i < dma_conf->dma_tx_size; i++)
|
|
+ stmmac_free_tx_buffer(priv, dma_conf, queue, i);
|
|
|
|
if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
|
|
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
|
|
@@ -1896,34 +1939,37 @@ static void stmmac_free_tx_skbufs(struct
|
|
u32 queue;
|
|
|
|
for (queue = 0; queue < tx_queue_cnt; queue++)
|
|
- dma_free_tx_skbufs(priv, queue);
|
|
+ dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
|
|
}
|
|
|
|
/**
|
|
* __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: RX queue index
|
|
*/
|
|
-static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
|
|
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
|
|
/* Release the DMA RX socket buffers */
|
|
if (rx_q->xsk_pool)
|
|
- dma_free_rx_xskbufs(priv, queue);
|
|
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
|
|
else
|
|
- dma_free_rx_skbufs(priv, queue);
|
|
+ dma_free_rx_skbufs(priv, dma_conf, queue);
|
|
|
|
rx_q->buf_alloc_num = 0;
|
|
rx_q->xsk_pool = NULL;
|
|
|
|
/* Free DMA regions of consistent memory previously allocated */
|
|
if (!priv->extend_desc)
|
|
- dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
|
|
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
|
|
sizeof(struct dma_desc),
|
|
rx_q->dma_rx, rx_q->dma_rx_phy);
|
|
else
|
|
- dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
|
|
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
|
|
sizeof(struct dma_extended_desc),
|
|
rx_q->dma_erx, rx_q->dma_rx_phy);
|
|
|
|
@@ -1935,29 +1981,33 @@ static void __free_dma_rx_desc_resources
|
|
page_pool_destroy(rx_q->page_pool);
|
|
}
|
|
|
|
-static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
u32 rx_count = priv->plat->rx_queues_to_use;
|
|
u32 queue;
|
|
|
|
/* Free RX queue resources */
|
|
for (queue = 0; queue < rx_count; queue++)
|
|
- __free_dma_rx_desc_resources(priv, queue);
|
|
+ __free_dma_rx_desc_resources(priv, dma_conf, queue);
|
|
}
|
|
|
|
/**
|
|
* __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: TX queue index
|
|
*/
|
|
-static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
|
|
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
|
|
size_t size;
|
|
void *addr;
|
|
|
|
/* Release the DMA TX socket buffers */
|
|
- dma_free_tx_skbufs(priv, queue);
|
|
+ dma_free_tx_skbufs(priv, dma_conf, queue);
|
|
|
|
if (priv->extend_desc) {
|
|
size = sizeof(struct dma_extended_desc);
|
|
@@ -1970,7 +2020,7 @@ static void __free_dma_tx_desc_resources
|
|
addr = tx_q->dma_tx;
|
|
}
|
|
|
|
- size *= priv->dma_conf.dma_tx_size;
|
|
+ size *= dma_conf->dma_tx_size;
|
|
|
|
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
|
|
|
|
@@ -1978,28 +2028,32 @@ static void __free_dma_tx_desc_resources
|
|
kfree(tx_q->tx_skbuff);
|
|
}
|
|
|
|
-static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
|
|
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
u32 tx_count = priv->plat->tx_queues_to_use;
|
|
u32 queue;
|
|
|
|
/* Free TX queue resources */
|
|
for (queue = 0; queue < tx_count; queue++)
|
|
- __free_dma_tx_desc_resources(priv, queue);
|
|
+ __free_dma_tx_desc_resources(priv, dma_conf, queue);
|
|
}
|
|
|
|
/**
|
|
* __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: RX queue index
|
|
* Description: according to which descriptor can be used (extend or basic)
|
|
* this function allocates the resources for TX and RX paths. In case of
|
|
* reception, for example, it pre-allocated the RX socket buffer in order to
|
|
* allow zero-copy mechanism.
|
|
*/
|
|
-static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
|
|
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
|
|
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
|
struct stmmac_channel *ch = &priv->channel[queue];
|
|
bool xdp_prog = stmmac_xdp_is_enabled(priv);
|
|
struct page_pool_params pp_params = { 0 };
|
|
@@ -2011,8 +2065,8 @@ static int __alloc_dma_rx_desc_resources
|
|
rx_q->priv_data = priv;
|
|
|
|
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
|
|
- pp_params.pool_size = priv->dma_conf.dma_rx_size;
|
|
- num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE);
|
|
+ pp_params.pool_size = dma_conf->dma_rx_size;
|
|
+ num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
|
|
pp_params.order = ilog2(num_pages);
|
|
pp_params.nid = dev_to_node(priv->device);
|
|
pp_params.dev = priv->device;
|
|
@@ -2027,7 +2081,7 @@ static int __alloc_dma_rx_desc_resources
|
|
return ret;
|
|
}
|
|
|
|
- rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size,
|
|
+ rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
|
|
sizeof(*rx_q->buf_pool),
|
|
GFP_KERNEL);
|
|
if (!rx_q->buf_pool)
|
|
@@ -2035,7 +2089,7 @@ static int __alloc_dma_rx_desc_resources
|
|
|
|
if (priv->extend_desc) {
|
|
rx_q->dma_erx = dma_alloc_coherent(priv->device,
|
|
- priv->dma_conf.dma_rx_size *
|
|
+ dma_conf->dma_rx_size *
|
|
sizeof(struct dma_extended_desc),
|
|
&rx_q->dma_rx_phy,
|
|
GFP_KERNEL);
|
|
@@ -2044,7 +2098,7 @@ static int __alloc_dma_rx_desc_resources
|
|
|
|
} else {
|
|
rx_q->dma_rx = dma_alloc_coherent(priv->device,
|
|
- priv->dma_conf.dma_rx_size *
|
|
+ dma_conf->dma_rx_size *
|
|
sizeof(struct dma_desc),
|
|
&rx_q->dma_rx_phy,
|
|
GFP_KERNEL);
|
|
@@ -2069,7 +2123,8 @@ static int __alloc_dma_rx_desc_resources
|
|
return 0;
|
|
}
|
|
|
|
-static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
u32 rx_count = priv->plat->rx_queues_to_use;
|
|
u32 queue;
|
|
@@ -2077,7 +2132,7 @@ static int alloc_dma_rx_desc_resources(s
|
|
|
|
/* RX queues buffers and DMA */
|
|
for (queue = 0; queue < rx_count; queue++) {
|
|
- ret = __alloc_dma_rx_desc_resources(priv, queue);
|
|
+ ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
|
|
if (ret)
|
|
goto err_dma;
|
|
}
|
|
@@ -2085,7 +2140,7 @@ static int alloc_dma_rx_desc_resources(s
|
|
return 0;
|
|
|
|
err_dma:
|
|
- free_dma_rx_desc_resources(priv);
|
|
+ free_dma_rx_desc_resources(priv, dma_conf);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2093,28 +2148,31 @@ err_dma:
|
|
/**
|
|
* __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* @queue: TX queue index
|
|
* Description: according to which descriptor can be used (extend or basic)
|
|
* this function allocates the resources for TX and RX paths. In case of
|
|
* reception, for example, it pre-allocated the RX socket buffer in order to
|
|
* allow zero-copy mechanism.
|
|
*/
|
|
-static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
|
|
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf,
|
|
+ u32 queue)
|
|
{
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
|
|
size_t size;
|
|
void *addr;
|
|
|
|
tx_q->queue_index = queue;
|
|
tx_q->priv_data = priv;
|
|
|
|
- tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size,
|
|
+ tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
|
|
sizeof(*tx_q->tx_skbuff_dma),
|
|
GFP_KERNEL);
|
|
if (!tx_q->tx_skbuff_dma)
|
|
return -ENOMEM;
|
|
|
|
- tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size,
|
|
+ tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
|
|
sizeof(struct sk_buff *),
|
|
GFP_KERNEL);
|
|
if (!tx_q->tx_skbuff)
|
|
@@ -2127,7 +2185,7 @@ static int __alloc_dma_tx_desc_resources
|
|
else
|
|
size = sizeof(struct dma_desc);
|
|
|
|
- size *= priv->dma_conf.dma_tx_size;
|
|
+ size *= dma_conf->dma_tx_size;
|
|
|
|
addr = dma_alloc_coherent(priv->device, size,
|
|
&tx_q->dma_tx_phy, GFP_KERNEL);
|
|
@@ -2144,7 +2202,8 @@ static int __alloc_dma_tx_desc_resources
|
|
return 0;
|
|
}
|
|
|
|
-static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
|
|
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
u32 tx_count = priv->plat->tx_queues_to_use;
|
|
u32 queue;
|
|
@@ -2152,7 +2211,7 @@ static int alloc_dma_tx_desc_resources(s
|
|
|
|
/* TX queues buffers and DMA */
|
|
for (queue = 0; queue < tx_count; queue++) {
|
|
- ret = __alloc_dma_tx_desc_resources(priv, queue);
|
|
+ ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
|
|
if (ret)
|
|
goto err_dma;
|
|
}
|
|
@@ -2160,27 +2219,29 @@ static int alloc_dma_tx_desc_resources(s
|
|
return 0;
|
|
|
|
err_dma:
|
|
- free_dma_tx_desc_resources(priv);
|
|
+ free_dma_tx_desc_resources(priv, dma_conf);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* alloc_dma_desc_resources - alloc TX/RX resources.
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
* Description: according to which descriptor can be used (extend or basic)
|
|
* this function allocates the resources for TX and RX paths. In case of
|
|
* reception, for example, it pre-allocated the RX socket buffer in order to
|
|
* allow zero-copy mechanism.
|
|
*/
|
|
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
|
|
+static int alloc_dma_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
/* RX Allocation */
|
|
- int ret = alloc_dma_rx_desc_resources(priv);
|
|
+ int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
|
|
|
|
if (ret)
|
|
return ret;
|
|
|
|
- ret = alloc_dma_tx_desc_resources(priv);
|
|
+ ret = alloc_dma_tx_desc_resources(priv, dma_conf);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2188,16 +2249,18 @@ static int alloc_dma_desc_resources(stru
|
|
/**
|
|
* free_dma_desc_resources - free dma desc resources
|
|
* @priv: private structure
|
|
+ * @dma_conf: structure to take the dma data
|
|
*/
|
|
-static void free_dma_desc_resources(struct stmmac_priv *priv)
|
|
+static void free_dma_desc_resources(struct stmmac_priv *priv,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
/* Release the DMA TX socket buffers */
|
|
- free_dma_tx_desc_resources(priv);
|
|
+ free_dma_tx_desc_resources(priv, dma_conf);
|
|
|
|
/* Release the DMA RX socket buffers later
|
|
* to ensure all pending XDP_TX buffers are returned.
|
|
*/
|
|
- free_dma_rx_desc_resources(priv);
|
|
+ free_dma_rx_desc_resources(priv, dma_conf);
|
|
}
|
|
|
|
/**
|
|
@@ -2686,8 +2749,8 @@ static void stmmac_tx_err(struct stmmac_
|
|
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
|
|
|
|
stmmac_stop_tx_dma(priv, chan);
|
|
- dma_free_tx_skbufs(priv, chan);
|
|
- stmmac_clear_tx_descriptors(priv, chan);
|
|
+ dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
|
|
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
|
|
stmmac_reset_tx_queue(priv, chan);
|
|
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
|
|
tx_q->dma_tx_phy, chan);
|
|
@@ -3684,19 +3747,93 @@ static int stmmac_request_irq(struct net
|
|
}
|
|
|
|
/**
|
|
- * stmmac_open - open entry point of the driver
|
|
+ * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
|
|
+ * @priv: driver private structure
|
|
+ * @mtu: MTU to setup the dma queue and buf with
|
|
+ * Description: Allocate and generate a dma_conf based on the provided MTU.
|
|
+ * Allocate the Tx/Rx DMA queue and init them.
|
|
+ * Return value:
|
|
+ * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
|
|
+ */
|
|
+static struct stmmac_dma_conf *
|
|
+stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
|
|
+{
|
|
+ struct stmmac_dma_conf *dma_conf;
|
|
+ int chan, bfsize, ret;
|
|
+
|
|
+ dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
|
|
+ if (!dma_conf) {
|
|
+ netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
|
|
+ __func__);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
+
|
|
+ bfsize = stmmac_set_16kib_bfsize(priv, mtu);
|
|
+ if (bfsize < 0)
|
|
+ bfsize = 0;
|
|
+
|
|
+ if (bfsize < BUF_SIZE_16KiB)
|
|
+ bfsize = stmmac_set_bfsize(mtu, 0);
|
|
+
|
|
+ dma_conf->dma_buf_sz = bfsize;
|
|
+ /* Chose the tx/rx size from the already defined one in the
|
|
+ * priv struct. (if defined)
|
|
+ */
|
|
+ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
|
|
+ dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
|
|
+
|
|
+ if (!dma_conf->dma_tx_size)
|
|
+ dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
|
|
+ if (!dma_conf->dma_rx_size)
|
|
+ dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
|
|
+
|
|
+ /* Earlier check for TBS */
|
|
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
|
|
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
|
|
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
|
|
+
|
|
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
|
|
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
|
|
+ }
|
|
+
|
|
+ ret = alloc_dma_desc_resources(priv, dma_conf);
|
|
+ if (ret < 0) {
|
|
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
|
|
+ __func__);
|
|
+ goto alloc_error;
|
|
+ }
|
|
+
|
|
+ ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
|
|
+ if (ret < 0) {
|
|
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
|
|
+ __func__);
|
|
+ goto init_error;
|
|
+ }
|
|
+
|
|
+ return dma_conf;
|
|
+
|
|
+init_error:
|
|
+ free_dma_desc_resources(priv, dma_conf);
|
|
+alloc_error:
|
|
+ kfree(dma_conf);
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * __stmmac_open - open entry point of the driver
|
|
* @dev : pointer to the device structure.
|
|
+ * @dma_conf : structure to take the dma data
|
|
* Description:
|
|
* This function is the open entry point of the driver.
|
|
* Return value:
|
|
* 0 on success and an appropriate (-)ve integer as defined in errno.h
|
|
* file on failure.
|
|
*/
|
|
-static int stmmac_open(struct net_device *dev)
|
|
+static int __stmmac_open(struct net_device *dev,
|
|
+ struct stmmac_dma_conf *dma_conf)
|
|
{
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
int mode = priv->plat->phy_interface;
|
|
- int bfsize = 0;
|
|
u32 chan;
|
|
int ret;
|
|
|
|
@@ -3723,45 +3860,10 @@ static int stmmac_open(struct net_device
|
|
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
|
|
priv->xstats.threshold = tc;
|
|
|
|
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
|
|
- if (bfsize < 0)
|
|
- bfsize = 0;
|
|
-
|
|
- if (bfsize < BUF_SIZE_16KiB)
|
|
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz);
|
|
-
|
|
- priv->dma_conf.dma_buf_sz = bfsize;
|
|
- buf_sz = bfsize;
|
|
-
|
|
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
|
|
|
|
- if (!priv->dma_conf.dma_tx_size)
|
|
- priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE;
|
|
- if (!priv->dma_conf.dma_rx_size)
|
|
- priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE;
|
|
-
|
|
- /* Earlier check for TBS */
|
|
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
|
|
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
|
|
- int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
|
|
-
|
|
- /* Setup per-TXQ tbs flag before TX descriptor alloc */
|
|
- tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
|
|
- }
|
|
-
|
|
- ret = alloc_dma_desc_resources(priv);
|
|
- if (ret < 0) {
|
|
- netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
|
|
- __func__);
|
|
- goto dma_desc_error;
|
|
- }
|
|
-
|
|
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
|
|
- if (ret < 0) {
|
|
- netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
|
|
- __func__);
|
|
- goto init_error;
|
|
- }
|
|
+ buf_sz = dma_conf->dma_buf_sz;
|
|
+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
|
|
|
|
if (priv->plat->serdes_powerup) {
|
|
ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
|
|
@@ -3804,14 +3906,28 @@ irq_error:
|
|
|
|
stmmac_hw_teardown(dev);
|
|
init_error:
|
|
- free_dma_desc_resources(priv);
|
|
-dma_desc_error:
|
|
+ free_dma_desc_resources(priv, &priv->dma_conf);
|
|
phylink_disconnect_phy(priv->phylink);
|
|
init_phy_error:
|
|
pm_runtime_put(priv->device);
|
|
return ret;
|
|
}
|
|
|
|
+static int stmmac_open(struct net_device *dev)
|
|
+{
|
|
+ struct stmmac_priv *priv = netdev_priv(dev);
|
|
+ struct stmmac_dma_conf *dma_conf;
|
|
+ int ret;
|
|
+
|
|
+ dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
|
|
+ if (IS_ERR(dma_conf))
|
|
+ return PTR_ERR(dma_conf);
|
|
+
|
|
+ ret = __stmmac_open(dev, dma_conf);
|
|
+ kfree(dma_conf);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
|
|
{
|
|
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
|
|
@@ -3858,7 +3974,7 @@ static int stmmac_release(struct net_dev
|
|
stmmac_stop_all_dma(priv);
|
|
|
|
/* Release and free the Rx/Tx resources */
|
|
- free_dma_desc_resources(priv);
|
|
+ free_dma_desc_resources(priv, &priv->dma_conf);
|
|
|
|
/* Disable the MAC Rx/Tx */
|
|
stmmac_mac_set(priv, priv->ioaddr, false);
|
|
@@ -6403,7 +6519,7 @@ void stmmac_disable_rx_queue(struct stmm
|
|
spin_unlock_irqrestore(&ch->lock, flags);
|
|
|
|
stmmac_stop_rx_dma(priv, queue);
|
|
- __free_dma_rx_desc_resources(priv, queue);
|
|
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
|
|
}
|
|
|
|
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
|
|
@@ -6414,21 +6530,21 @@ void stmmac_enable_rx_queue(struct stmma
|
|
u32 buf_size;
|
|
int ret;
|
|
|
|
- ret = __alloc_dma_rx_desc_resources(priv, queue);
|
|
+ ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
|
|
if (ret) {
|
|
netdev_err(priv->dev, "Failed to alloc RX desc.\n");
|
|
return;
|
|
}
|
|
|
|
- ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
|
|
+ ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
|
|
if (ret) {
|
|
- __free_dma_rx_desc_resources(priv, queue);
|
|
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
|
|
netdev_err(priv->dev, "Failed to init RX desc.\n");
|
|
return;
|
|
}
|
|
|
|
stmmac_reset_rx_queue(priv, queue);
|
|
- stmmac_clear_rx_descriptors(priv, queue);
|
|
+ stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
|
|
|
|
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
|
|
rx_q->dma_rx_phy, rx_q->queue_index);
|
|
@@ -6466,7 +6582,7 @@ void stmmac_disable_tx_queue(struct stmm
|
|
spin_unlock_irqrestore(&ch->lock, flags);
|
|
|
|
stmmac_stop_tx_dma(priv, queue);
|
|
- __free_dma_tx_desc_resources(priv, queue);
|
|
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
|
|
}
|
|
|
|
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
|
|
@@ -6476,21 +6592,21 @@ void stmmac_enable_tx_queue(struct stmma
|
|
unsigned long flags;
|
|
int ret;
|
|
|
|
- ret = __alloc_dma_tx_desc_resources(priv, queue);
|
|
+ ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
|
|
if (ret) {
|
|
netdev_err(priv->dev, "Failed to alloc TX desc.\n");
|
|
return;
|
|
}
|
|
|
|
- ret = __init_dma_tx_desc_rings(priv, queue);
|
|
+ ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
|
|
if (ret) {
|
|
- __free_dma_tx_desc_resources(priv, queue);
|
|
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
|
|
netdev_err(priv->dev, "Failed to init TX desc.\n");
|
|
return;
|
|
}
|
|
|
|
stmmac_reset_tx_queue(priv, queue);
|
|
- stmmac_clear_tx_descriptors(priv, queue);
|
|
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
|
|
|
|
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
|
|
tx_q->dma_tx_phy, tx_q->queue_index);
|
|
@@ -6530,7 +6646,7 @@ void stmmac_xdp_release(struct net_devic
|
|
stmmac_stop_all_dma(priv);
|
|
|
|
/* Release and free the Rx/Tx resources */
|
|
- free_dma_desc_resources(priv);
|
|
+ free_dma_desc_resources(priv, &priv->dma_conf);
|
|
|
|
/* Disable the MAC Rx/Tx */
|
|
stmmac_mac_set(priv, priv->ioaddr, false);
|
|
@@ -6555,14 +6671,14 @@ int stmmac_xdp_open(struct net_device *d
|
|
u32 chan;
|
|
int ret;
|
|
|
|
- ret = alloc_dma_desc_resources(priv);
|
|
+ ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
|
|
if (ret < 0) {
|
|
netdev_err(dev, "%s: DMA descriptors allocation failed\n",
|
|
__func__);
|
|
goto dma_desc_error;
|
|
}
|
|
|
|
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
|
|
+ ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
|
|
if (ret < 0) {
|
|
netdev_err(dev, "%s: DMA descriptors initialization failed\n",
|
|
__func__);
|
|
@@ -6644,7 +6760,7 @@ irq_error:
|
|
|
|
stmmac_hw_teardown(dev);
|
|
init_error:
|
|
- free_dma_desc_resources(priv);
|
|
+ free_dma_desc_resources(priv, &priv->dma_conf);
|
|
dma_desc_error:
|
|
return ret;
|
|
}
|
|
@@ -7503,7 +7619,7 @@ int stmmac_resume(struct device *dev)
|
|
stmmac_reset_queues_param(priv);
|
|
|
|
stmmac_free_tx_skbufs(priv);
|
|
- stmmac_clear_descriptors(priv);
|
|
+ stmmac_clear_descriptors(priv, &priv->dma_conf);
|
|
|
|
stmmac_hw_setup(ndev, false);
|
|
stmmac_init_coalesce(priv);
|