openwrt/target/linux/bcm27xx/patches-5.4/950-0439-dma-direct-turn-ARCH_ZONE_DMA_BITS-into-a-variable.patch
Álvaro Fernández Rojas 62b7f5931c bcm27xx: import latest patches from the RPi foundation
bcm2708: boot tested on RPi B+ v1.2
bcm2709: boot tested on RPi 3B v1.2 and RPi 4B v1.1 4G
bcm2710: boot tested on RPi 3B v1.2
bcm2711: boot tested on RPi 4B v1.1 4G

Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com>
(cherry-picked from commit f07e572f64)
2021-02-19 07:17:21 +01:00

196 lines
6.1 KiB
Diff

From 78b03f0aef9f67c4db700ba5dc56e2c8f562d181 Mon Sep 17 00:00:00 2001
From: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Date: Mon, 14 Oct 2019 20:31:03 +0200
Subject: [PATCH] dma/direct: turn ARCH_ZONE_DMA_BITS into a variable
commit 8b5369ea580964dbc982781bfb9fb93459fc5e8d upstream.
Some architectures, notably ARM, are interested in tweaking this
depending on their runtime DMA addressing limitations.
Acked-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/include/asm/page.h | 2 --
arch/arm64/mm/init.c | 9 +++++++--
arch/powerpc/include/asm/page.h | 9 ---------
arch/powerpc/mm/mem.c | 20 +++++++++++++++-----
arch/s390/include/asm/page.h | 2 --
arch/s390/mm/init.c | 1 +
include/linux/dma-direct.h | 2 ++
kernel/dma/direct.c | 13 ++++++-------
8 files changed, 31 insertions(+), 27 deletions(-)
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -38,6 +38,4 @@ extern int pfn_valid(unsigned long);
#include <asm-generic/getorder.h>
-#define ARCH_ZONE_DMA_BITS 30
-
#endif
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -20,6 +20,7 @@
#include <linux/sort.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
@@ -41,6 +42,8 @@
#include <asm/tlb.h>
#include <asm/alternative.h>
+#define ARM64_ZONE_DMA_BITS 30
+
/*
* We need to be able to catch inadvertent references to memstart_addr
* that occur (potentially in generic code) before arm64_memblock_init()
@@ -430,8 +433,10 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- arm64_dma_phys_limit = max_zone_phys(ARCH_ZONE_DMA_BITS);
+ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ zone_dma_bits = ARM64_ZONE_DMA_BITS;
+ arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
+ }
if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma32_phys_limit = max_zone_phys(32);
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -334,13 +334,4 @@ struct vm_area_struct;
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>
-/*
- * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
- */
-#ifdef CONFIG_PPC32
-#define ARCH_ZONE_DMA_BITS 30
-#else
-#define ARCH_ZONE_DMA_BITS 31
-#endif
-
#endif /* _ASM_POWERPC_PAGE_H */
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/memremap.h>
+#include <linux/dma-direct.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -223,10 +224,10 @@ static int __init mark_nonram_nosave(voi
* everything else. GFP_DMA32 page allocations automatically fall back to
* ZONE_DMA.
*
- * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
- * inform the generic DMA mapping code. 32-bit only devices (if not handled
- * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
- * otherwise served by ZONE_DMA.
+ * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
+ * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
+ * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
+ * ZONE_DMA.
*/
static unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -259,9 +260,18 @@ void __init paging_init(void)
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20));
+ /*
+ * Allow 30-bit DMA for very limited Broadcom wifi chips on many
+ * powerbooks.
+ */
+ if (IS_ENABLED(CONFIG_PPC32))
+ zone_dma_bits = 30;
+ else
+ zone_dma_bits = 31;
+
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
- 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
+ 1UL << (zone_dma_bits - PAGE_SHIFT));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -179,8 +179,6 @@ static inline int devmem_is_allowed(unsi
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define ARCH_ZONE_DMA_BITS 31
-
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -118,6 +118,7 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
+ zone_dma_bits = 31;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -8,6 +8,8 @@
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+extern unsigned int zone_dma_bits;
+
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#else
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -16,12 +16,11 @@
#include <linux/swiotlb.h>
/*
- * Most architectures use ZONE_DMA for the first 16 Megabytes, but
- * some use it for entirely different regions:
+ * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
+ * it for entirely different regions. In that case the arch code needs to
+ * override the variable below for dma-direct to work properly.
*/
-#ifndef ARCH_ZONE_DMA_BITS
-#define ARCH_ZONE_DMA_BITS 24
-#endif
+unsigned int zone_dma_bits __ro_after_init = 24;
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{
@@ -70,7 +69,7 @@ static gfp_t __dma_direct_optimal_gfp_ma
* Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
* zones.
*/
- if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ if (*phys_mask <= DMA_BIT_MASK(zone_dma_bits))
return GFP_DMA;
if (*phys_mask <= DMA_BIT_MASK(32))
return GFP_DMA32;
@@ -396,7 +395,7 @@ int dma_direct_supported(struct device *
u64 min_mask;
if (IS_ENABLED(CONFIG_ZONE_DMA))
- min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
+ min_mask = DMA_BIT_MASK(zone_dma_bits);
else
min_mask = DMA_BIT_MASK(30);