openwrt/target/linux/layerscape/patches-4.14/301-arch-support-layerscape.patch
Koen Vandeputte 9a1d7ff187 kernel: bump 4.14 to 4.14.99
Refreshed all patches.

Remove upstreamed:
- 950-0434-mmc-bcm2835-Recover-from-MMC_SEND_EXT_CSD.patch

Compile-tested on: ar71xx, cns3xxx, imx6, x86_64
Runtime-tested on: ar71xx, cns3xxx, imx6

Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
2019-02-14 16:45:01 +01:00

326 lines
12 KiB
Diff

From 6eeff55fd4756f271ad09a914078c9aa45f8359d Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
Date: Fri, 16 Nov 2018 14:23:40 +0800
Subject: [PATCH 04/39] arch: support layerscape
This is an integrated patch of arch for layerscape
Signed-off-by: Abhimanyu Saini <abhimanyu.saini@nxp.com>
Signed-off-by: Alison Wang <alison.wang@freescale.com>
Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Dave Liu <daveliu@freescale.com>
Signed-off-by: Guanhua <guanhua.gao@nxp.com>
Signed-off-by: Haiying Wang <Haiying.wang@freescale.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Jerry Huang <Chang-Ming.Huang@freescale.com>
Signed-off-by: Jianhua Xie <jianhua.xie@nxp.com>
Signed-off-by: Jin Qing <b24347@freescale.com>
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Li Yang <leoli@freescale.com>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
Signed-off-by: Poonam Aggrwal <poonam.aggrwal@nxp.com>
Signed-off-by: Rajesh Bhagat <rajesh.bhagat@nxp.com>
Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Shengzhou Liu <Shengzhou.Liu@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@freescale.com>
Signed-off-by: Wang Dongsheng <dongsheng.wang@freescale.com>
Signed-off-by: Xie Xiaobo <X.Xie@freescale.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
arch/arm/include/asm/delay.h | 16 ++++++++++++++
arch/arm/include/asm/io.h | 31 +++++++++++++++++++++++++++
arch/arm/include/asm/mach/map.h | 4 ++--
arch/arm/include/asm/pgtable.h | 7 ++++++
arch/arm/kernel/time.c | 3 +++
arch/arm/mm/dma-mapping.c | 1 +
arch/arm/mm/ioremap.c | 7 ++++++
arch/arm/mm/mmu.c | 9 ++++++++
arch/arm64/include/asm/cache.h | 2 +-
arch/arm64/include/asm/io.h | 1 +
arch/arm64/include/asm/pgtable-prot.h | 3 +++
arch/arm64/include/asm/pgtable.h | 5 +++++
arch/arm64/mm/dma-mapping.c | 1 +
arch/arm64/mm/init.c | 12 +++++++----
14 files changed, 95 insertions(+), 7 deletions(-)
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -85,6 +85,22 @@ extern void __bad_udelay(void);
__const_udelay((n) * UDELAY_MULT)) : \
__udelay(n))
+#define spin_event_timeout(condition, timeout, delay) \
+({ \
+ typeof(condition) __ret; \
+ int i = 0; \
+ while (!(__ret = (condition)) && (i++ < timeout)) { \
+ if (delay) \
+ udelay(delay); \
+ else \
+ cpu_relax(); \
+ udelay(1); \
+ } \
+ if (!__ret) \
+ __ret = (condition); \
+ __ret; \
+})
+
/* Loop-based definitions for assembly code. */
extern void __loop_delay(unsigned long loops);
extern void __loop_udelay(unsigned long usecs);
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -128,6 +128,7 @@ static inline u32 __raw_readl(const vola
#define MT_DEVICE_NONSHARED 1
#define MT_DEVICE_CACHED 2
#define MT_DEVICE_WC 3
+#define MT_MEMORY_RW_NS 4
/*
* types 4 onwards can be found in asm/mach/map.h and are undefined
* for ioremap
@@ -229,6 +230,34 @@ void __iomem *pci_remap_cfgspace(resourc
#endif
#endif
+/* access ports */
+#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
+#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
+
+#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
+#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
+
+#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
+#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
+
+/* Clear and set bits in one shot. These macros can be used to clear and
+ * set multiple bits in a register using a single read-modify-write. These
+ * macros can also be used to set a multiple-bit bit pattern using a mask,
+ * by specifying the mask in the 'clear' parameter and the new bit pattern
+ * in the 'set' parameter.
+ */
+
+#define clrsetbits_be32(addr, clear, set) \
+ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
+#define clrsetbits_le32(addr, clear, set) \
+ iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
+#define clrsetbits_be16(addr, clear, set) \
+ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
+#define clrsetbits_le16(addr, clear, set) \
+ iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
+#define clrsetbits_8(addr, clear, set) \
+ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
+
/*
* IO port access primitives
* -------------------------
@@ -417,6 +446,8 @@ void __iomem *ioremap_wc(resource_size_t
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
+
void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -21,9 +21,9 @@ struct map_desc {
unsigned int type;
};
-/* types 0-3 are defined in asm/io.h */
+/* types 0-4 are defined in asm/io.h */
enum {
- MT_UNCACHED = 4,
+ MT_UNCACHED = 5,
MT_CACHECLEAN,
MT_MINICLEAN,
MT_LOW_VECTORS,
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -119,6 +119,13 @@ extern pgprot_t pgprot_s2_device;
#define pgprot_noncached(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+#define pgprot_cached(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED)
+
+#define pgprot_cached_ns(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \
+ L_PTE_MT_DEV_NONSHARED)
+
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -12,6 +12,7 @@
* reading the RTC at bootup, etc...
*/
#include <linux/clk-provider.h>
+#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -121,5 +122,7 @@ void __init time_init(void)
of_clk_init(NULL);
#endif
timer_probe();
+
+ tick_setup_hrtimer_broadcast();
}
}
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2416,6 +2416,7 @@ void arch_setup_dma_ops(struct device *d
#endif
dev->archdata.dma_ops_setup = true;
}
+EXPORT_SYMBOL(arch_setup_dma_ops);
void arch_teardown_dma_ops(struct device *dev)
{
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t
}
EXPORT_SYMBOL(ioremap_wc);
+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache_ns);
+
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space as memory. Needed when the kernel wants to execute
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -315,6 +315,13 @@ static struct mem_type mem_types[] __ro_
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_RW_NS] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
@@ -651,6 +658,7 @@ static void __init build_mem_type_table(
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
+ mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
/*
* Set PXN for user mappings
@@ -679,6 +687,7 @@ static void __init build_mem_type_table(
mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -34,7 +34,7 @@
#define ICACHE_POLICY_VIPT 2
#define ICACHE_POLICY_PIPT 3
-#define L1_CACHE_SHIFT 7
+#define L1_CACHE_SHIFT 6
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -186,6 +186,7 @@ extern void __iomem *ioremap_cache(phys_
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS))
#define iounmap __iounmap
/*
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -48,6 +48,8 @@
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -68,6 +70,7 @@
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+#define PAGE_S2_NS __pgprot(PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDWR | PTE_TYPE_PAGE | PTE_AF)
#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -377,6 +377,11 @@ static inline int pmd_protnone(pmd_t pmd
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
+#define pgprot_cached(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
+ PTE_PXN | PTE_UXN)
+#define pgprot_cached_ns(prot) \
+ __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED)
#define pgprot_device(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -937,3 +937,4 @@ void arch_setup_dma_ops(struct device *d
}
#endif
}
+EXPORT_SYMBOL(arch_setup_dma_ops);
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -457,6 +457,14 @@ void __init arm64_memblock_init(void)
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
+
+ /* make this the first reservation so that there are no chances of
+ * overlap
+ */
+ reserve_elfcorehdr();
+
+ reserve_crashkernel();
+
memblock_reserve(__pa_symbol(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
@@ -476,10 +484,6 @@ void __init arm64_memblock_init(void)
else
arm64_dma_phys_limit = PHYS_MASK + 1;
- reserve_crashkernel();
-
- reserve_elfcorehdr();
-
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);