mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-20 14:13:16 +00:00
127 lines
4.2 KiB
Diff
127 lines
4.2 KiB
Diff
|
From 73cc32aace5fe123182337c3abd769a1d6edd9fe Mon Sep 17 00:00:00 2001
|
||
|
From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
|
||
|
Date: Wed, 4 Apr 2018 15:12:28 +0300
|
||
|
Subject: [PATCH] dpaa_eth: fix iova handling for sg frames
|
||
|
|
||
|
The driver relies on the no longer valid assumption that dma addresses
|
||
|
(iovas) are identical to physical addressees and uses phys_to_virt() to
|
||
|
make iova -> vaddr conversions. Fix this also for scatter-gather frames
|
||
|
using the iova -> phys conversion function added in the previous patch.
|
||
|
While at it, clean-up a redundant dpaa_bpid2pool() and pass the bp
|
||
|
as parameter.
|
||
|
|
||
|
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
|
||
|
Acked-by: Madalin Bucur <madalin.bucur@nxp.com>
|
||
|
[rebase]
|
||
|
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
|
||
|
---
|
||
|
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 41 +++++++++++++++-----------
|
||
|
1 file changed, 24 insertions(+), 17 deletions(-)
|
||
|
|
||
|
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
||
|
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
||
|
@@ -1656,14 +1656,17 @@ static struct sk_buff *dpaa_cleanup_tx_f
|
||
|
|
||
|
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
|
||
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||
|
- dma_unmap_single(dev, addr,
|
||
|
- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
|
||
|
- dma_dir);
|
||
|
|
||
|
/* The sgt buffer has been allocated with netdev_alloc_frag(),
|
||
|
* it's from lowmem.
|
||
|
*/
|
||
|
- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
|
||
|
+ sgt = phys_to_virt(dpaa_iova_to_phys(dev,
|
||
|
+ addr +
|
||
|
+ qm_fd_get_offset(fd)));
|
||
|
+
|
||
|
+ dma_unmap_single(dev, addr,
|
||
|
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
|
||
|
+ dma_dir);
|
||
|
|
||
|
/* sgt[0] is from lowmem, was dma_map_single()-ed */
|
||
|
dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
|
||
|
@@ -1702,7 +1705,7 @@ static struct sk_buff *dpaa_cleanup_tx_f
|
||
|
else
|
||
|
#endif
|
||
|
/* Free the page frag that we allocated on Tx */
|
||
|
- skb_free_frag(phys_to_virt(addr));
|
||
|
+ skb_free_frag(phys_to_virt(skbh));
|
||
|
}
|
||
|
|
||
|
return skb;
|
||
|
@@ -1760,14 +1763,14 @@ static struct sk_buff *contig_fd_to_skb(
|
||
|
* The page fragment holding the S/G Table is recycled here.
|
||
|
*/
|
||
|
static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
||
|
- const struct qm_fd *fd)
|
||
|
+ const struct qm_fd *fd,
|
||
|
+ struct dpaa_bp *dpaa_bp,
|
||
|
+ void *vaddr)
|
||
|
{
|
||
|
ssize_t fd_off = qm_fd_get_offset(fd);
|
||
|
- dma_addr_t addr = qm_fd_addr(fd);
|
||
|
const struct qm_sg_entry *sgt;
|
||
|
struct page *page, *head_page;
|
||
|
- struct dpaa_bp *dpaa_bp;
|
||
|
- void *vaddr, *sg_vaddr;
|
||
|
+ void *sg_vaddr;
|
||
|
int frag_off, frag_len;
|
||
|
struct sk_buff *skb;
|
||
|
dma_addr_t sg_addr;
|
||
|
@@ -1776,7 +1779,6 @@ static struct sk_buff *sg_fd_to_skb(cons
|
||
|
int *count_ptr;
|
||
|
int i;
|
||
|
|
||
|
- vaddr = phys_to_virt(addr);
|
||
|
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
|
||
|
|
||
|
/* Iterate through the SGT entries and add data buffers to the skb */
|
||
|
@@ -1787,14 +1789,18 @@ static struct sk_buff *sg_fd_to_skb(cons
|
||
|
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
|
||
|
|
||
|
sg_addr = qm_sg_addr(&sgt[i]);
|
||
|
- sg_vaddr = phys_to_virt(sg_addr);
|
||
|
- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
|
||
|
- SMP_CACHE_BYTES));
|
||
|
|
||
|
/* We may use multiple Rx pools */
|
||
|
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
||
|
- if (!dpaa_bp)
|
||
|
+ if (!dpaa_bp) {
|
||
|
+ pr_info("%s: fail to get dpaa_bp for sg bpid %d\n",
|
||
|
+ __func__, sgt[i].bpid);
|
||
|
goto free_buffers;
|
||
|
+ }
|
||
|
+ sg_vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev,
|
||
|
+ sg_addr));
|
||
|
+ WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
|
||
|
+ SMP_CACHE_BYTES));
|
||
|
|
||
|
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||
|
dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
|
||
|
@@ -1866,10 +1872,11 @@ free_buffers:
|
||
|
/* free all the SG entries */
|
||
|
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
|
||
|
sg_addr = qm_sg_addr(&sgt[i]);
|
||
|
- sg_vaddr = phys_to_virt(sg_addr);
|
||
|
- skb_free_frag(sg_vaddr);
|
||
|
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
||
|
if (dpaa_bp) {
|
||
|
+ sg_addr = dpaa_iova_to_phys(dpaa_bp->dev, sg_addr);
|
||
|
+ sg_vaddr = phys_to_virt(sg_addr);
|
||
|
+ skb_free_frag(sg_vaddr);
|
||
|
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||
|
(*count_ptr)--;
|
||
|
}
|
||
|
@@ -2498,7 +2505,7 @@ static enum qman_cb_dqrr_result rx_defau
|
||
|
if (likely(fd_format == qm_fd_contig))
|
||
|
skb = contig_fd_to_skb(priv, fd, dpaa_bp, vaddr);
|
||
|
else
|
||
|
- skb = sg_fd_to_skb(priv, fd);
|
||
|
+ skb = sg_fd_to_skb(priv, fd, dpaa_bp, vaddr);
|
||
|
if (!skb)
|
||
|
return qman_cb_dqrr_consume;
|
||
|
|