mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-25 08:21:14 +00:00
6bf179b270
Switch to the mainline Lantiq PCIe PHY driver and update the vr9.dtsi accordingly. The Lantiq IRQ SMP support added upstream required changes to the SoC dtsi as well. Following changes are made to the Lantiq kernel patches: 0005-lantiq_etop-pass-struct-device-to-DMA-API-functions.patch 0006-MIPS-lantiq-pass-struct-device-to-DMA-API-functions.patch applied upstream 0008-MIPS-lantiq-backport-old-timer-code.patch access_ok API update because it lost it's type (which was the first) parameter in upstream commit 96d4f267e40f95 ("Remove 'type' argument from access_ok() function") 0024-MIPS-lantiq-autoselect-soc-rev-matching-fw.patch merged into 0026-MIPS-lantiq-Add-GPHY-Firmware-loader.patch 0024-MIPS-lantiq-revert-DSA-switch-driver-PMU-clock-chang.patch revert upstream changes required for upstream xrx200 ethernet and xrx200 (DSA) switch driver but breaking our driver 0026-MIPS-lantiq-Add-GPHY-Firmware-loader.patch required for our driver but dropped upstream, add former upstream version 0028-NET-lantiq-various-etop-fixes.patch now has to use the phy_set_max_speed API instead of modifying phydev->supported. Also call ltq_dma_enable_irq() in ltq_etop_open() based on upstream commit cc973aecf0b054 ("MIPS: lantiq: Do not enable IRQs in dma open") Signed-off-by: Mathias Kresin <dev@kresin.me> Signed-off-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
122 lines
3.7 KiB
Diff
122 lines
3.7 KiB
Diff
From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001
|
|
From: John Crispin <blogic@openwrt.org>
|
|
Date: Tue, 9 Sep 2014 23:12:15 +0200
|
|
Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes
|
|
|
|
Signed-off-by: John Crispin <blogic@openwrt.org>
|
|
---
|
|
drivers/mtd/nand/raw/xway_nand.c | 63 ++++++++++++++++++++++++++++++++++++++++++
|
|
1 file changed, 63 insertions(+)
|
|
|
|
--- a/drivers/mtd/nand/raw/xway_nand.c
|
|
+++ b/drivers/mtd/nand/raw/xway_nand.c
|
|
@@ -61,6 +61,24 @@
|
|
#define NAND_CON_CSMUX (1 << 1)
|
|
#define NAND_CON_NANDM 1
|
|
|
|
+#define DANUBE_PCI_REG32( addr ) (*(volatile u32 *)(addr))
|
|
+#define PCI_CR_PR_OFFSET (KSEG1+0x1E105400)
|
|
+#define PCI_CR_PC_ARB (PCI_CR_PR_OFFSET + 0x0080)
|
|
+
|
|
+/*
|
|
+ * req_mask provides a mechanism to prevent interference between
|
|
+ * nand and pci (probably only relevant for the BT Home Hub 2B).
|
|
+ * Setting it causes the corresponding pci req pins to be masked
|
|
+ * during nand access, and also moves ebu locking from the read/write
|
|
+ * functions to the chip select function to ensure that the whole
|
|
+ * operation runs with interrupts disabled.
|
|
+ * In addition it switches on some extra waiting in xway_cmd_ctrl().
|
|
+ * This seems to be necessary if the ebu_cs1 pin has open-drain disabled,
|
|
+ * which in turn seems to be necessary for the nor chip to be recognised
|
|
+ * reliably, on a board (Home Hub 2B again) which has both nor and nand.
|
|
+ */
|
|
+static __be32 req_mask = 0;
|
|
+
|
|
struct xway_nand_data {
|
|
struct nand_chip chip;
|
|
unsigned long csflags;
|
|
@@ -91,10 +109,22 @@ static void xway_select_chip(struct nand
|
|
case -1:
|
|
ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
|
|
ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
|
|
+
|
|
+ if (req_mask) {
|
|
+ /* Unmask all external PCI request */
|
|
+ DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16);
|
|
+ }
|
|
+
|
|
spin_unlock_irqrestore(&ebu_lock, data->csflags);
|
|
break;
|
|
case 0:
|
|
spin_lock_irqsave(&ebu_lock, data->csflags);
|
|
+
|
|
+ if (req_mask) {
|
|
+ /* Mask all external PCI request */
|
|
+ DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16);
|
|
+ }
|
|
+
|
|
ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
|
|
ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
|
|
break;
|
|
@@ -107,6 +137,11 @@ static void xway_cmd_ctrl(struct nand_ch
|
|
{
|
|
struct mtd_info *mtd = nand_to_mtd(chip);
|
|
|
|
+ if (req_mask) {
|
|
+ if (cmd != NAND_CMD_STATUS)
|
|
+ ltq_ebu_w32(0, EBU_NAND_WAIT); /* Clear nand ready */
|
|
+ }
|
|
+
|
|
if (cmd == NAND_CMD_NONE)
|
|
return;
|
|
|
|
@@ -117,6 +152,24 @@ static void xway_cmd_ctrl(struct nand_ch
|
|
|
|
while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
|
|
;
|
|
+
|
|
+ if (req_mask) {
|
|
+ /*
|
|
+ * program and erase have their own busy handlers
|
|
+ * status and sequential in needs no delay
|
|
+ */
|
|
+ switch (cmd) {
|
|
+ case NAND_CMD_ERASE1:
|
|
+ case NAND_CMD_SEQIN:
|
|
+ case NAND_CMD_STATUS:
|
|
+ case NAND_CMD_READID:
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* wait until command is processed */
|
|
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0)
|
|
+ ;
|
|
+ }
|
|
}
|
|
|
|
static int xway_dev_ready(struct nand_chip *chip)
|
|
@@ -156,6 +209,7 @@ static int xway_nand_probe(struct platfo
|
|
int err;
|
|
u32 cs;
|
|
u32 cs_flag = 0;
|
|
+ const __be32 *req_mask_ptr;
|
|
|
|
/* Allocate memory for the device structure (and zero it) */
|
|
data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
|
|
@@ -191,6 +245,15 @@ static int xway_nand_probe(struct platfo
|
|
if (!err && cs == 1)
|
|
cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
|
|
|
|
+ req_mask_ptr = of_get_property(pdev->dev.of_node,
|
|
+ "req-mask", NULL);
|
|
+
|
|
+ /*
|
|
+ * Load the PCI req lines to mask from the device tree. If the
|
|
+ * property is not present, setting req_mask to 0 disables masking.
|
|
+ */
|
|
+ req_mask = (req_mask_ptr ? *req_mask_ptr : 0);
|
|
+
|
|
/* setup the EBU to run in NAND mode on our base addr */
|
|
ltq_ebu_w32(CPHYSADDR(data->nandaddr)
|
|
| ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
|