mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-25 08:21:14 +00:00
ef944dcb85
Also refresh 3.10 patches. Signed-off-by: Gabor Juhos <juhosg@openwrt.org> SVN-Revision: 37502
2573 lines
74 KiB
Diff
2573 lines
74 KiB
Diff
--- a/drivers/mmc/card/block.c
|
|
+++ b/drivers/mmc/card/block.c
|
|
@@ -1294,7 +1294,7 @@ static void mmc_blk_rw_rq_prep(struct mm
|
|
brq->data.blocks = 1;
|
|
}
|
|
|
|
- if (brq->data.blocks > 1 || do_rel_wr) {
|
|
+ if (brq->data.blocks > 1 || do_rel_wr || card->host->caps2 & MMC_CAP2_FORCE_MULTIBLOCK) {
|
|
/* SPI multiblock writes terminate using a special
|
|
* token, not a STOP_TRANSMISSION request.
|
|
*/
|
|
--- a/drivers/mmc/core/sd.c
|
|
+++ b/drivers/mmc/core/sd.c
|
|
@@ -13,6 +13,8 @@
|
|
#include <linux/err.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/stat.h>
|
|
+#include <linux/jiffies.h>
|
|
+#include <linux/nmi.h>
|
|
|
|
#include <linux/mmc/host.h>
|
|
#include <linux/mmc/card.h>
|
|
@@ -58,6 +60,15 @@ static const unsigned int tacc_mant[] =
|
|
__res & __mask; \
|
|
})
|
|
|
|
+// timeout for tries
|
|
+static const unsigned long retry_timeout_ms= 10*1000;
|
|
+
|
|
+// try at least 10 times, even if timeout is reached
|
|
+static const int retry_min_tries= 10;
|
|
+
|
|
+// delay between tries
|
|
+static const unsigned long retry_delay_ms= 10;
|
|
+
|
|
/*
|
|
* Given the decoded CSD structure, decode the raw CID to our CID structure.
|
|
*/
|
|
@@ -210,12 +221,62 @@ static int mmc_decode_scr(struct mmc_car
|
|
}
|
|
|
|
/*
|
|
- * Fetch and process SD Status register.
|
|
+ * Fetch and process SD Configuration Register.
|
|
+ */
|
|
+static int mmc_read_scr(struct mmc_card *card)
|
|
+{
|
|
+ unsigned long timeout_at;
|
|
+ int err, tries;
|
|
+
|
|
+ timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
|
|
+ tries= 0;
|
|
+
|
|
+ while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
|
|
+ {
|
|
+ unsigned long delay_at;
|
|
+ tries++;
|
|
+
|
|
+ err = mmc_app_send_scr(card, card->raw_scr);
|
|
+ if( !err )
|
|
+ break; // success!!!
|
|
+
|
|
+ touch_nmi_watchdog(); // we are still alive!
|
|
+
|
|
+ // delay
|
|
+ delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
|
|
+ while( time_before( jiffies, delay_at ) )
|
|
+ {
|
|
+ mdelay( 1 );
|
|
+ touch_nmi_watchdog(); // we are still alive!
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if( err)
|
|
+ {
|
|
+ pr_err("%s: failed to read SD Configuration register (SCR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ if( tries > 1 )
|
|
+ {
|
|
+ pr_info("%s: could read SD Configuration register (SCR) at the %dth attempt\n", mmc_hostname(card->host), tries );
|
|
+ }
|
|
+
|
|
+ err = mmc_decode_scr(card);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Fetch and process SD Status Register.
|
|
*/
|
|
static int mmc_read_ssr(struct mmc_card *card)
|
|
{
|
|
+ unsigned long timeout_at;
|
|
unsigned int au, es, et, eo;
|
|
- int err, i;
|
|
+ int err, i, tries;
|
|
u32 *ssr;
|
|
|
|
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
|
|
@@ -228,14 +289,40 @@ static int mmc_read_ssr(struct mmc_card
|
|
if (!ssr)
|
|
return -ENOMEM;
|
|
|
|
+ timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
|
|
+ tries= 0;
|
|
+
|
|
+ while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
|
|
+ {
|
|
+ unsigned long delay_at;
|
|
+ tries++;
|
|
+
|
|
err = mmc_app_sd_status(card, ssr);
|
|
- if (err) {
|
|
- pr_warning("%s: problem reading SD Status "
|
|
- "register.\n", mmc_hostname(card->host));
|
|
- err = 0;
|
|
+ if( !err )
|
|
+ break; // sucess!!!
|
|
+
|
|
+ touch_nmi_watchdog(); // we are still alive!
|
|
+
|
|
+ // delay
|
|
+ delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
|
|
+ while( time_before( jiffies, delay_at ) )
|
|
+ {
|
|
+ mdelay( 1 );
|
|
+ touch_nmi_watchdog(); // we are still alive!
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if( err)
|
|
+ {
|
|
+ pr_err("%s: failed to read SD Status register (SSR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
|
|
goto out;
|
|
}
|
|
|
|
+ if( tries > 1 )
|
|
+ {
|
|
+ pr_info("%s: read SD Status register (SSR) after %d attempts\n", mmc_hostname(card->host), tries );
|
|
+ }
|
|
+
|
|
for (i = 0; i < 16; i++)
|
|
ssr[i] = be32_to_cpu(ssr[i]);
|
|
|
|
@@ -808,13 +895,9 @@ int mmc_sd_setup_card(struct mmc_host *h
|
|
|
|
if (!reinit) {
|
|
/*
|
|
- * Fetch SCR from card.
|
|
+ * Fetch and decode SD Configuration register.
|
|
*/
|
|
- err = mmc_app_send_scr(card, card->raw_scr);
|
|
- if (err)
|
|
- return err;
|
|
-
|
|
- err = mmc_decode_scr(card);
|
|
+ err = mmc_read_scr(card);
|
|
if (err)
|
|
return err;
|
|
|
|
--- a/drivers/mmc/host/Kconfig
|
|
+++ b/drivers/mmc/host/Kconfig
|
|
@@ -249,6 +249,27 @@ config MMC_SDHCI_S3C_DMA
|
|
|
|
YMMV.
|
|
|
|
+config MMC_SDHCI_BCM2708
|
|
+ tristate "SDHCI support on BCM2708"
|
|
+ depends on MMC_SDHCI && MACH_BCM2708
|
|
+ select MMC_SDHCI_IO_ACCESSORS
|
|
+ help
|
|
+ This selects the Secure Digital Host Controller Interface (SDHCI)
|
|
+ often referrered to as the eMMC block.
|
|
+
|
|
+ If you have a controller with this interface, say Y or M here.
|
|
+
|
|
+ If unsure, say N.
|
|
+
|
|
+config MMC_SDHCI_BCM2708_DMA
|
|
+ bool "DMA support on BCM2708 Arasan controller"
|
|
+ depends on MMC_SDHCI_BCM2708
|
|
+ help
|
|
+ Enable DMA support on the Arasan SDHCI controller in Broadcom 2708
|
|
+ based chips.
|
|
+
|
|
+ If unsure, say N.
|
|
+
|
|
config MMC_SDHCI_BCM2835
|
|
tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
|
|
depends on ARCH_BCM2835
|
|
--- a/drivers/mmc/host/Makefile
|
|
+++ b/drivers/mmc/host/Makefile
|
|
@@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-p
|
|
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
|
|
obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
|
|
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
|
|
+obj-$(CONFIG_MMC_SDHCI_BCM2708) += sdhci-bcm2708.o
|
|
obj-$(CONFIG_MMC_WBSD) += wbsd.o
|
|
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
|
|
obj-$(CONFIG_MMC_OMAP) += omap.o
|
|
--- /dev/null
|
|
+++ b/drivers/mmc/host/sdhci-bcm2708.c
|
|
@@ -0,0 +1,1420 @@
|
|
+/*
|
|
+ * sdhci-bcm2708.c Support for SDHCI device on BCM2708
|
|
+ * Copyright (c) 2010 Broadcom
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
+ */
|
|
+
|
|
+/* Supports:
|
|
+ * SDHCI platform device - Arasan SD controller in BCM2708
|
|
+ *
|
|
+ * Inspired by sdhci-pci.c, by Pierre Ossman
|
|
+ */
|
|
+
|
|
+#include <linux/delay.h>
|
|
+#include <linux/highmem.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mmc/mmc.h>
|
|
+#include <linux/mmc/host.h>
|
|
+#include <linux/mmc/sd.h>
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <mach/dma.h>
|
|
+
|
|
+#include "sdhci.h"
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * Configuration *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+#define DRIVER_NAME "bcm2708_sdhci"
|
|
+
|
|
+/* for the time being insist on DMA mode - PIO seems not to work */
|
|
+#ifndef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+#warning Non-DMA (PIO) version of this driver currently unavailable
|
|
+#endif
|
|
+#undef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+#define CONFIG_MMC_SDHCI_BCM2708_DMA y
|
|
+
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+/* #define CHECK_DMA_USE */
|
|
+#endif
|
|
+//#define LOG_REGISTERS
|
|
+
|
|
+#define USE_SCHED_TIME
|
|
+#define USE_SPACED_WRITES_2CLK 1 /* space consecutive register writes */
|
|
+#define USE_SOFTWARE_TIMEOUTS 1 /* not hardware timeouts */
|
|
+#define SOFTWARE_ERASE_TIMEOUT_SEC 30
|
|
+
|
|
+#define SDHCI_BCM_DMA_CHAN 4 /* this default is normally overriden */
|
|
+#define SDHCI_BCM_DMA_WAITS 0 /* delays slowing DMA transfers: 0-31 */
|
|
+/* We are worried that SD card DMA use may be blocking the AXI bus for others */
|
|
+
|
|
+/*! TODO: obtain these from the physical address */
|
|
+#define DMA_SDHCI_BASE 0x7e300000 /* EMMC register block on Videocore */
|
|
+#define DMA_SDHCI_BUFFER (DMA_SDHCI_BASE + SDHCI_BUFFER)
|
|
+
|
|
+#define BCM2708_SDHCI_SLEEP_TIMEOUT 1000 /* msecs */
|
|
+
|
|
+/* Mhz clock that the EMMC core is running at. Should match the platform clockman settings */
|
|
+#define BCM2708_EMMC_CLOCK_FREQ 50000000
|
|
+
|
|
+#define REG_EXRDFIFO_EN 0x80
|
|
+#define REG_EXRDFIFO_CFG 0x84
|
|
+
|
|
+int cycle_delay=2;
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * Debug *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+
|
|
+
|
|
+#define DBG(f, x...) \
|
|
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
|
|
+// printk(KERN_INFO DRIVER_NAME " [%s()]: " f, __func__,## x)//GRAYG
|
|
+
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * High Precision Time *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+#ifdef USE_SCHED_TIME
|
|
+
|
|
+#include <mach/frc.h>
|
|
+
|
|
+typedef unsigned long hptime_t;
|
|
+
|
|
+#define FMT_HPT "lu"
|
|
+
|
|
+static inline hptime_t hptime(void)
|
|
+{
|
|
+ return frc_clock_ticks32();
|
|
+}
|
|
+
|
|
+#define HPTIME_CLK_NS 1000ul
|
|
+
|
|
+#else
|
|
+
|
|
+typedef unsigned long hptime_t;
|
|
+
|
|
+#define FMT_HPT "lu"
|
|
+
|
|
+static inline hptime_t hptime(void)
|
|
+{
|
|
+ return jiffies;
|
|
+}
|
|
+
|
|
+#define HPTIME_CLK_NS (1000000000ul/HZ)
|
|
+
|
|
+#endif
|
|
+
|
|
+static inline unsigned long int since_ns(hptime_t t)
|
|
+{
|
|
+ return (unsigned long)((hptime() - t) * HPTIME_CLK_NS);
|
|
+}
|
|
+
|
|
+static bool allow_highspeed = 1;
|
|
+static int emmc_clock_freq = BCM2708_EMMC_CLOCK_FREQ;
|
|
+static bool sync_after_dma = 1;
|
|
+static bool missing_status = 1;
|
|
+static bool spurious_crc_acmd51 = 0;
|
|
+bool enable_llm = 1;
|
|
+bool extra_messages = 0;
|
|
+
|
|
+#if 0
|
|
+static void hptime_test(void)
|
|
+{
|
|
+ hptime_t now;
|
|
+ hptime_t later;
|
|
+
|
|
+ now = hptime();
|
|
+ msleep(10);
|
|
+ later = hptime();
|
|
+
|
|
+ printk(KERN_INFO DRIVER_NAME": 10ms = %"FMT_HPT" clks "
|
|
+ "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
|
|
+ later-now, now, later,
|
|
+ (unsigned long)(HPTIME_CLK_NS * (later - now)));
|
|
+
|
|
+ now = hptime();
|
|
+ msleep(1000);
|
|
+ later = hptime();
|
|
+
|
|
+ printk(KERN_INFO DRIVER_NAME": 1s = %"FMT_HPT" clks "
|
|
+ "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
|
|
+ later-now, now, later,
|
|
+ (unsigned long)(HPTIME_CLK_NS * (later - now)));
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * SDHCI core callbacks *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+
|
|
+#ifdef CHECK_DMA_USE
|
|
+/*#define CHECK_DMA_REG_USE*/
|
|
+#endif
|
|
+
|
|
+#ifdef CHECK_DMA_REG_USE
|
|
+/* we don't expect anything to be using these registers during a
|
|
+ DMA (except the IRQ status) - so check */
|
|
+static void check_dma_reg_use(struct sdhci_host *host, int reg);
|
|
+#else
|
|
+#define check_dma_reg_use(host, reg)
|
|
+#endif
|
|
+
|
|
+
|
|
+static inline u32 sdhci_bcm2708_raw_readl(struct sdhci_host *host, int reg)
|
|
+{
|
|
+ return readl(host->ioaddr + reg);
|
|
+}
|
|
+
|
|
+u32 sdhci_bcm2708_readl(struct sdhci_host *host, int reg)
|
|
+{
|
|
+ u32 l = sdhci_bcm2708_raw_readl(host, reg);
|
|
+
|
|
+#ifdef LOG_REGISTERS
|
|
+ printk(KERN_ERR "%s: readl from 0x%02x, value 0x%08x\n",
|
|
+ mmc_hostname(host->mmc), reg, l);
|
|
+#endif
|
|
+ check_dma_reg_use(host, reg);
|
|
+
|
|
+ return l;
|
|
+}
|
|
+
|
|
+u16 sdhci_bcm2708_readw(struct sdhci_host *host, int reg)
|
|
+{
|
|
+ u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
|
|
+ u32 w = l >> (reg << 3 & 0x18) & 0xffff;
|
|
+
|
|
+#ifdef LOG_REGISTERS
|
|
+ printk(KERN_ERR "%s: readw from 0x%02x, value 0x%04x\n",
|
|
+ mmc_hostname(host->mmc), reg, w);
|
|
+#endif
|
|
+ check_dma_reg_use(host, reg);
|
|
+
|
|
+ return (u16)w;
|
|
+}
|
|
+
|
|
+u8 sdhci_bcm2708_readb(struct sdhci_host *host, int reg)
|
|
+{
|
|
+ u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
|
|
+ u32 b = l >> (reg << 3 & 0x18) & 0xff;
|
|
+
|
|
+#ifdef LOG_REGISTERS
|
|
+ printk(KERN_ERR "%s: readb from 0x%02x, value 0x%02x\n",
|
|
+ mmc_hostname(host->mmc), reg, b);
|
|
+#endif
|
|
+ check_dma_reg_use(host, reg);
|
|
+
|
|
+ return (u8)b;
|
|
+}
|
|
+
|
|
+
|
|
+static void sdhci_bcm2708_raw_writel(struct sdhci_host *host, u32 val, int reg)
|
|
+{
|
|
+ u32 ier;
|
|
+
|
|
+#if USE_SPACED_WRITES_2CLK
|
|
+ static bool timeout_disabled = false;
|
|
+ unsigned int ns_2clk = 0;
|
|
+
|
|
+ /* The Arasan has a bugette whereby it may lose the content of
|
|
+ * successive writes to registers that are within two SD-card clock
|
|
+ * cycles of each other (a clock domain crossing problem).
|
|
+ * It seems, however, that the data register does not have this problem.
|
|
+ * (Which is just as well - otherwise we'd have to nobble the DMA engine
|
|
+ * too)
|
|
+ */
|
|
+ if (reg != SDHCI_BUFFER && host->clock != 0) {
|
|
+ /* host->clock is the clock freq in Hz */
|
|
+ static hptime_t last_write_hpt;
|
|
+ hptime_t now = hptime();
|
|
+ ns_2clk = cycle_delay*1000000/(host->clock/1000);
|
|
+
|
|
+ if (now == last_write_hpt || now == last_write_hpt+1) {
|
|
+ /* we can't guarantee any significant time has
|
|
+ * passed - we'll have to wait anyway ! */
|
|
+ ndelay(ns_2clk);
|
|
+ } else
|
|
+ {
|
|
+ /* we must have waited at least this many ns: */
|
|
+ unsigned int ns_wait = HPTIME_CLK_NS *
|
|
+ (last_write_hpt - now - 1);
|
|
+ if (ns_wait < ns_2clk)
|
|
+ ndelay(ns_2clk - ns_wait);
|
|
+ }
|
|
+ last_write_hpt = now;
|
|
+ }
|
|
+#if USE_SOFTWARE_TIMEOUTS
|
|
+ /* The Arasan is clocked for timeouts using the SD clock which is too
|
|
+ * fast for ERASE commands and causes issues. So we disable timeouts
|
|
+ * for ERASE */
|
|
+ if (host->cmd != NULL && host->cmd->opcode == MMC_ERASE &&
|
|
+ reg == (SDHCI_COMMAND & ~3)) {
|
|
+ mod_timer(&host->timer,
|
|
+ jiffies + SOFTWARE_ERASE_TIMEOUT_SEC * HZ);
|
|
+ ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
|
|
+ ier &= ~SDHCI_INT_DATA_TIMEOUT;
|
|
+ writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
|
|
+ timeout_disabled = true;
|
|
+ ndelay(ns_2clk);
|
|
+ } else if (timeout_disabled) {
|
|
+ ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
|
|
+ ier |= SDHCI_INT_DATA_TIMEOUT;
|
|
+ writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
|
|
+ timeout_disabled = false;
|
|
+ ndelay(ns_2clk);
|
|
+ }
|
|
+#endif
|
|
+ writel(val, host->ioaddr + reg);
|
|
+#else
|
|
+ void __iomem * regaddr = host->ioaddr + reg;
|
|
+
|
|
+ writel(val, regaddr);
|
|
+
|
|
+ if (reg != SDHCI_BUFFER && reg != SDHCI_INT_STATUS && host->clock != 0)
|
|
+ {
|
|
+ int timeout = 100000;
|
|
+ while (val != readl(regaddr) && --timeout > 0)
|
|
+ continue;
|
|
+
|
|
+ if (timeout <= 0)
|
|
+ printk(KERN_ERR "%s: writing 0x%X to reg 0x%X "
|
|
+ "always gives 0x%X\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ val, reg, readl(regaddr));
|
|
+ BUG_ON(timeout <= 0);
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+
|
|
+void sdhci_bcm2708_writel(struct sdhci_host *host, u32 val, int reg)
|
|
+{
|
|
+#ifdef LOG_REGISTERS
|
|
+ printk(KERN_ERR "%s: writel to 0x%02x, value 0x%08x\n",
|
|
+ mmc_hostname(host->mmc), reg, val);
|
|
+#endif
|
|
+ check_dma_reg_use(host, reg);
|
|
+
|
|
+ sdhci_bcm2708_raw_writel(host, val, reg);
|
|
+}
|
|
+
|
|
+void sdhci_bcm2708_writew(struct sdhci_host *host, u16 val, int reg)
|
|
+{
|
|
+ static u32 shadow = 0;
|
|
+
|
|
+ u32 p = reg == SDHCI_COMMAND ? shadow :
|
|
+ sdhci_bcm2708_raw_readl(host, reg & ~3);
|
|
+ u32 s = reg << 3 & 0x18;
|
|
+ u32 l = val << s;
|
|
+ u32 m = 0xffff << s;
|
|
+
|
|
+#ifdef LOG_REGISTERS
|
|
+ printk(KERN_ERR "%s: writew to 0x%02x, value 0x%04x\n",
|
|
+ mmc_hostname(host->mmc), reg, val);
|
|
+#endif
|
|
+
|
|
+ if (reg == SDHCI_TRANSFER_MODE)
|
|
+ shadow = (p & ~m) | l;
|
|
+ else {
|
|
+ check_dma_reg_use(host, reg);
|
|
+ sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
|
|
+ }
|
|
+}
|
|
+
|
|
+void sdhci_bcm2708_writeb(struct sdhci_host *host, u8 val, int reg)
|
|
+{
|
|
+ u32 p = sdhci_bcm2708_raw_readl(host, reg & ~3);
|
|
+ u32 s = reg << 3 & 0x18;
|
|
+ u32 l = val << s;
|
|
+ u32 m = 0xff << s;
|
|
+
|
|
+#ifdef LOG_REGISTERS
|
|
+ printk(KERN_ERR "%s: writeb to 0x%02x, value 0x%02x\n",
|
|
+ mmc_hostname(host->mmc), reg, val);
|
|
+#endif
|
|
+
|
|
+ check_dma_reg_use(host, reg);
|
|
+ sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
|
|
+}
|
|
+
|
|
+static unsigned int sdhci_bcm2708_get_max_clock(struct sdhci_host *host)
|
|
+{
|
|
+ return emmc_clock_freq;
|
|
+}
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * DMA Operation *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+struct sdhci_bcm2708_priv {
|
|
+ int dma_chan;
|
|
+ int dma_irq;
|
|
+ void __iomem *dma_chan_base;
|
|
+ struct bcm2708_dma_cb *cb_base; /* DMA control blocks */
|
|
+ dma_addr_t cb_handle;
|
|
+ /* tracking scatter gather progress */
|
|
+ unsigned sg_ix; /* scatter gather list index */
|
|
+ unsigned sg_done; /* bytes in current sg_ix done */
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ unsigned char dma_wanted; /* DMA transfer requested */
|
|
+ unsigned char dma_waits; /* wait states in DMAs */
|
|
+#ifdef CHECK_DMA_USE
|
|
+ unsigned char dmas_pending; /* no of unfinished DMAs */
|
|
+ hptime_t when_started;
|
|
+ hptime_t when_reset;
|
|
+ hptime_t when_stopped;
|
|
+#endif
|
|
+#endif
|
|
+ /* signalling the end of a transfer */
|
|
+ void (*complete)(struct sdhci_host *);
|
|
+};
|
|
+
|
|
+#define SDHCI_HOST_PRIV(host) \
|
|
+ (struct sdhci_bcm2708_priv *)((struct sdhci_host *)(host)+1)
|
|
+
|
|
+
|
|
+
|
|
+#ifdef CHECK_DMA_REG_USE
|
|
+static void check_dma_reg_use(struct sdhci_host *host, int reg)
|
|
+{
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ if (host_priv->dma_wanted && reg != SDHCI_INT_STATUS) {
|
|
+ printk(KERN_INFO"%s: accessing register 0x%x during DMA\n",
|
|
+ mmc_hostname(host->mmc), reg);
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+
|
|
+static void sdhci_clear_set_irqgen(struct sdhci_host *host, u32 clear, u32 set)
|
|
+{
|
|
+ u32 ier;
|
|
+
|
|
+ ier = sdhci_bcm2708_raw_readl(host, SDHCI_SIGNAL_ENABLE);
|
|
+ ier &= ~clear;
|
|
+ ier |= set;
|
|
+ /* change which requests generate IRQs - makes no difference to
|
|
+ the content of SDHCI_INT_STATUS, or the need to acknowledge IRQs */
|
|
+ sdhci_bcm2708_raw_writel(host, ier, SDHCI_SIGNAL_ENABLE);
|
|
+}
|
|
+
|
|
+static void sdhci_signal_irqs(struct sdhci_host *host, u32 irqs)
|
|
+{
|
|
+ sdhci_clear_set_irqgen(host, 0, irqs);
|
|
+}
|
|
+
|
|
+static void sdhci_unsignal_irqs(struct sdhci_host *host, u32 irqs)
|
|
+{
|
|
+ sdhci_clear_set_irqgen(host, irqs, 0);
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+static void schci_bcm2708_cb_read(struct sdhci_bcm2708_priv *host,
|
|
+ int ix,
|
|
+ dma_addr_t dma_addr, unsigned len,
|
|
+ int /*bool*/ is_last)
|
|
+{
|
|
+ struct bcm2708_dma_cb *cb = &host->cb_base[ix];
|
|
+ unsigned char dmawaits = host->dma_waits;
|
|
+
|
|
+ cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
|
|
+ BCM2708_DMA_WAITS(dmawaits) |
|
|
+ BCM2708_DMA_S_DREQ |
|
|
+ BCM2708_DMA_D_WIDTH |
|
|
+ BCM2708_DMA_D_INC;
|
|
+ cb->src = DMA_SDHCI_BUFFER; /* DATA register DMA address */
|
|
+ cb->dst = dma_addr;
|
|
+ cb->length = len;
|
|
+ cb->stride = 0;
|
|
+
|
|
+ if (is_last) {
|
|
+ cb->info |= BCM2708_DMA_INT_EN |
|
|
+ BCM2708_DMA_WAIT_RESP;
|
|
+ cb->next = 0;
|
|
+ } else
|
|
+ cb->next = host->cb_handle +
|
|
+ (ix+1)*sizeof(struct bcm2708_dma_cb);
|
|
+
|
|
+ cb->pad[0] = 0;
|
|
+ cb->pad[1] = 0;
|
|
+}
|
|
+
|
|
+static void schci_bcm2708_cb_write(struct sdhci_bcm2708_priv *host,
|
|
+ int ix,
|
|
+ dma_addr_t dma_addr, unsigned len,
|
|
+ int /*bool*/ is_last)
|
|
+{
|
|
+ struct bcm2708_dma_cb *cb = &host->cb_base[ix];
|
|
+ unsigned char dmawaits = host->dma_waits;
|
|
+
|
|
+ /* We can make arbitrarily large writes as long as we specify DREQ to
|
|
+ pace the delivery of bytes to the Arasan hardware */
|
|
+ cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
|
|
+ BCM2708_DMA_WAITS(dmawaits) |
|
|
+ BCM2708_DMA_D_DREQ |
|
|
+ BCM2708_DMA_S_WIDTH |
|
|
+ BCM2708_DMA_S_INC;
|
|
+ cb->src = dma_addr;
|
|
+ cb->dst = DMA_SDHCI_BUFFER; /* DATA register DMA address */
|
|
+ cb->length = len;
|
|
+ cb->stride = 0;
|
|
+
|
|
+ if (is_last) {
|
|
+ cb->info |= BCM2708_DMA_INT_EN |
|
|
+ BCM2708_DMA_WAIT_RESP;
|
|
+ cb->next = 0;
|
|
+ } else
|
|
+ cb->next = host->cb_handle +
|
|
+ (ix+1)*sizeof(struct bcm2708_dma_cb);
|
|
+
|
|
+ cb->pad[0] = 0;
|
|
+ cb->pad[1] = 0;
|
|
+}
|
|
+
|
|
+
|
|
+static void schci_bcm2708_dma_go(struct sdhci_host *host)
|
|
+{
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ void __iomem *dma_chan_base = host_priv->dma_chan_base;
|
|
+
|
|
+ BUG_ON(host_priv->dma_wanted);
|
|
+#ifdef CHECK_DMA_USE
|
|
+ if (host_priv->dma_wanted)
|
|
+ printk(KERN_ERR "%s: DMA already in progress - "
|
|
+ "now %"FMT_HPT", last started %lu "
|
|
+ "reset %lu stopped %lu\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ hptime(), since_ns(host_priv->when_started),
|
|
+ since_ns(host_priv->when_reset),
|
|
+ since_ns(host_priv->when_stopped));
|
|
+ else if (host_priv->dmas_pending > 0)
|
|
+ printk(KERN_INFO "%s: note - new DMA when %d reset DMAs "
|
|
+ "already in progress - "
|
|
+ "now %"FMT_HPT", started %lu reset %lu stopped %lu\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ host_priv->dmas_pending,
|
|
+ hptime(), since_ns(host_priv->when_started),
|
|
+ since_ns(host_priv->when_reset),
|
|
+ since_ns(host_priv->when_stopped));
|
|
+ host_priv->dmas_pending += 1;
|
|
+ host_priv->when_started = hptime();
|
|
+#endif
|
|
+ host_priv->dma_wanted = 1;
|
|
+ DBG("PDMA go - base %p handle %08X\n", dma_chan_base,
|
|
+ host_priv->cb_handle);
|
|
+ bcm_dma_start(dma_chan_base, host_priv->cb_handle);
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+sdhci_platdma_read(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
|
|
+{
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+
|
|
+ DBG("PDMA to read %d bytes\n", len);
|
|
+ host_priv->sg_done += len;
|
|
+ schci_bcm2708_cb_read(host_priv, 0, dma_addr, len, 1/*TRUE*/);
|
|
+ schci_bcm2708_dma_go(host);
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+sdhci_platdma_write(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
|
|
+{
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+
|
|
+ DBG("PDMA to write %d bytes\n", len);
|
|
+ //BUG_ON(0 != (len & 0x1ff));
|
|
+
|
|
+ host_priv->sg_done += len;
|
|
+ schci_bcm2708_cb_write(host_priv, 0, dma_addr, len, 1/*TRUE*/);
|
|
+ schci_bcm2708_dma_go(host);
|
|
+}
|
|
+
|
|
+/*! space is avaiable to receive into or data is available to write
|
|
+ Platform DMA exported function
|
|
+*/
|
|
+void
|
|
+sdhci_bcm2708_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
|
|
+ void(*completion_callback)(struct sdhci_host *host))
|
|
+{
|
|
+ struct mmc_data *data = host->data;
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ int sg_ix;
|
|
+ size_t bytes;
|
|
+ dma_addr_t addr;
|
|
+
|
|
+ BUG_ON(NULL == data);
|
|
+ BUG_ON(0 == data->blksz);
|
|
+
|
|
+ host_priv->complete = completion_callback;
|
|
+
|
|
+ sg_ix = host_priv->sg_ix;
|
|
+ BUG_ON(sg_ix >= data->sg_len);
|
|
+
|
|
+ /* we can DMA blocks larger than blksz - it may hang the DMA
|
|
+ channel but we are its only user */
|
|
+ bytes = sg_dma_len(&data->sg[sg_ix]) - host_priv->sg_done;
|
|
+ addr = sg_dma_address(&data->sg[sg_ix]) + host_priv->sg_done;
|
|
+
|
|
+ if (bytes > 0) {
|
|
+ /* We're going to poll for read/write available state until
|
|
+ we finish this DMA
|
|
+ */
|
|
+
|
|
+ if (data->flags & MMC_DATA_READ) {
|
|
+ if (*ref_intmask & SDHCI_INT_DATA_AVAIL) {
|
|
+ sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
|
|
+ SDHCI_INT_SPACE_AVAIL);
|
|
+ sdhci_platdma_read(host, addr, bytes);
|
|
+ }
|
|
+ } else {
|
|
+ if (*ref_intmask & SDHCI_INT_SPACE_AVAIL) {
|
|
+ sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
|
|
+ SDHCI_INT_SPACE_AVAIL);
|
|
+ sdhci_platdma_write(host, addr, bytes);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ /* else:
|
|
+ we have run out of bytes that need transferring (e.g. we may be in
|
|
+ the middle of the last DMA transfer), or
|
|
+ it is also possible that we've been called when another IRQ is
|
|
+ signalled, even though we've turned off signalling of our own IRQ */
|
|
+
|
|
+ *ref_intmask &= ~SDHCI_INT_DATA_END;
|
|
+ /* don't let the main sdhci driver act on this .. we'll deal with it
|
|
+ when we respond to the DMA - if one is currently in progress */
|
|
+}
|
|
+
|
|
+/* is it possible to DMA the given mmc_data structure?
|
|
+ Platform DMA exported function
|
|
+*/
|
|
+int /*bool*/
|
|
+sdhci_bcm2708_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
|
|
+{
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ int ok = bcm_sg_suitable_for_dma(data->sg, data->sg_len);
|
|
+
|
|
+ if (!ok)
|
|
+ DBG("Reverting to PIO - bad cache alignment\n");
|
|
+
|
|
+ else {
|
|
+ host_priv->sg_ix = 0; /* first SG index */
|
|
+ host_priv->sg_done = 0; /* no bytes done */
|
|
+ }
|
|
+
|
|
+ return ok;
|
|
+}
|
|
+
|
|
+#include <mach/arm_control.h> //GRAYG
|
|
+/*! the current SD transacton has been abandonned
|
|
+ We need to tidy up if we were in the middle of a DMA
|
|
+ Platform DMA exported function
|
|
+*/
|
|
+void
|
|
+sdhci_bcm2708_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
|
|
+{
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+// unsigned long flags;
|
|
+
|
|
+ BUG_ON(NULL == host);
|
|
+
|
|
+// spin_lock_irqsave(&host->lock, flags);
|
|
+
|
|
+ if (host_priv->dma_wanted) {
|
|
+ if (NULL == data) {
|
|
+ printk(KERN_ERR "%s: ongoing DMA reset - no data!\n",
|
|
+ mmc_hostname(host->mmc));
|
|
+ BUG_ON(NULL == data);
|
|
+ } else {
|
|
+ struct scatterlist *sg;
|
|
+ int sg_len;
|
|
+ int sg_todo;
|
|
+ int rc;
|
|
+ unsigned long cs;
|
|
+
|
|
+ sg = data->sg;
|
|
+ sg_len = data->sg_len;
|
|
+ sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
|
|
+
|
|
+ cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
|
|
+
|
|
+ if (!(BCM2708_DMA_ACTIVE & cs))
|
|
+ {
|
|
+ if (extra_messages)
|
|
+ printk(KERN_INFO "%s: missed completion of "
|
|
+ "cmd %d DMA (%d/%d [%d]/[%d]) - "
|
|
+ "ignoring it\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ host->last_cmdop,
|
|
+ host_priv->sg_done, sg_todo,
|
|
+ host_priv->sg_ix+1, sg_len);
|
|
+ }
|
|
+ else
|
|
+ printk(KERN_INFO "%s: resetting ongoing cmd %d"
|
|
+ "DMA before %d/%d [%d]/[%d] complete\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ host->last_cmdop,
|
|
+ host_priv->sg_done, sg_todo,
|
|
+ host_priv->sg_ix+1, sg_len);
|
|
+#ifdef CHECK_DMA_USE
|
|
+ printk(KERN_INFO "%s: now %"FMT_HPT" started %lu "
|
|
+ "last reset %lu last stopped %lu\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ hptime(), since_ns(host_priv->when_started),
|
|
+ since_ns(host_priv->when_reset),
|
|
+ since_ns(host_priv->when_stopped));
|
|
+ { unsigned long info, debug;
|
|
+ void __iomem *base;
|
|
+ unsigned long pend0, pend1, pend2;
|
|
+
|
|
+ base = host_priv->dma_chan_base;
|
|
+ cs = readl(base + BCM2708_DMA_CS);
|
|
+ info = readl(base + BCM2708_DMA_INFO);
|
|
+ debug = readl(base + BCM2708_DMA_DEBUG);
|
|
+ printk(KERN_INFO "%s: DMA%d CS=%08lX TI=%08lX "
|
|
+ "DEBUG=%08lX\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ host_priv->dma_chan,
|
|
+ cs, info, debug);
|
|
+ pend0 = readl(__io_address(ARM_IRQ_PEND0));
|
|
+ pend1 = readl(__io_address(ARM_IRQ_PEND1));
|
|
+ pend2 = readl(__io_address(ARM_IRQ_PEND2));
|
|
+
|
|
+ printk(KERN_INFO "%s: PEND0=%08lX "
|
|
+ "PEND1=%08lX PEND2=%08lX\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ pend0, pend1, pend2);
|
|
+
|
|
+ //gintsts = readl(__io_address(GINTSTS));
|
|
+ //gintmsk = readl(__io_address(GINTMSK));
|
|
+ //printk(KERN_INFO "%s: USB GINTSTS=%08lX"
|
|
+ // "GINTMSK=%08lX\n",
|
|
+ // mmc_hostname(host->mmc), gintsts, gintmsk);
|
|
+ }
|
|
+#endif
|
|
+ rc = bcm_dma_abort(host_priv->dma_chan_base);
|
|
+ BUG_ON(rc != 0);
|
|
+ }
|
|
+ host_priv->dma_wanted = 0;
|
|
+#ifdef CHECK_DMA_USE
|
|
+ host_priv->when_reset = hptime();
|
|
+#endif
|
|
+ }
|
|
+
|
|
+// spin_unlock_irqrestore(&host->lock, flags);
|
|
+}
|
|
+
|
|
+
|
|
+static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
|
|
+ u32 dma_cs)
|
|
+{
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ struct mmc_data *data;
|
|
+ struct scatterlist *sg;
|
|
+ int sg_len;
|
|
+ int sg_ix;
|
|
+ int sg_todo;
|
|
+// unsigned long flags;
|
|
+
|
|
+ BUG_ON(NULL == host);
|
|
+
|
|
+// spin_lock_irqsave(&host->lock, flags);
|
|
+ data = host->data;
|
|
+
|
|
+#ifdef CHECK_DMA_USE
|
|
+ if (host_priv->dmas_pending <= 0)
|
|
+ DBG("on completion no DMA in progress - "
|
|
+ "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
|
|
+ hptime(), since_ns(host_priv->when_started),
|
|
+ since_ns(host_priv->when_reset),
|
|
+ since_ns(host_priv->when_stopped));
|
|
+ else if (host_priv->dmas_pending > 1)
|
|
+ DBG("still %d DMA in progress after completion - "
|
|
+ "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
|
|
+ host_priv->dmas_pending - 1,
|
|
+ hptime(), since_ns(host_priv->when_started),
|
|
+ since_ns(host_priv->when_reset),
|
|
+ since_ns(host_priv->when_stopped));
|
|
+ BUG_ON(host_priv->dmas_pending <= 0);
|
|
+ host_priv->dmas_pending -= 1;
|
|
+ host_priv->when_stopped = hptime();
|
|
+#endif
|
|
+ host_priv->dma_wanted = 0;
|
|
+
|
|
+ if (NULL == data) {
|
|
+ DBG("PDMA unused completion - status 0x%X\n", dma_cs);
|
|
+// spin_unlock_irqrestore(&host->lock, flags);
|
|
+ return;
|
|
+ }
|
|
+ sg = data->sg;
|
|
+ sg_len = data->sg_len;
|
|
+ sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
|
|
+
|
|
+ DBG("PDMA complete %d/%d [%d]/[%d]..\n",
|
|
+ host_priv->sg_done, sg_todo,
|
|
+ host_priv->sg_ix+1, sg_len);
|
|
+
|
|
+ BUG_ON(host_priv->sg_done > sg_todo);
|
|
+
|
|
+ if (host_priv->sg_done >= sg_todo) {
|
|
+ host_priv->sg_ix++;
|
|
+ host_priv->sg_done = 0;
|
|
+ }
|
|
+
|
|
+ sg_ix = host_priv->sg_ix;
|
|
+ if (sg_ix < sg_len) {
|
|
+ u32 irq_mask;
|
|
+ /* Set off next DMA if we've got the capacity */
|
|
+
|
|
+ if (data->flags & MMC_DATA_READ)
|
|
+ irq_mask = SDHCI_INT_DATA_AVAIL;
|
|
+ else
|
|
+ irq_mask = SDHCI_INT_SPACE_AVAIL;
|
|
+
|
|
+ /* We have to use the interrupt status register on the BCM2708
|
|
+ rather than the SDHCI_PRESENT_STATE register because latency
|
|
+ in the glue logic means that the information retrieved from
|
|
+ the latter is not always up-to-date w.r.t the DMA engine -
|
|
+ it may not indicate that a read or a write is ready yet */
|
|
+ if (sdhci_bcm2708_raw_readl(host, SDHCI_INT_STATUS) &
|
|
+ irq_mask) {
|
|
+ size_t bytes = sg_dma_len(&sg[sg_ix]) -
|
|
+ host_priv->sg_done;
|
|
+ dma_addr_t addr = sg_dma_address(&data->sg[sg_ix]) +
|
|
+ host_priv->sg_done;
|
|
+
|
|
+ /* acknowledge interrupt */
|
|
+ sdhci_bcm2708_raw_writel(host, irq_mask,
|
|
+ SDHCI_INT_STATUS);
|
|
+
|
|
+ BUG_ON(0 == bytes);
|
|
+
|
|
+ if (data->flags & MMC_DATA_READ)
|
|
+ sdhci_platdma_read(host, addr, bytes);
|
|
+ else
|
|
+ sdhci_platdma_write(host, addr, bytes);
|
|
+ } else {
|
|
+ DBG("PDMA - wait avail\n");
|
|
+ /* may generate an IRQ if already present */
|
|
+ sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
|
|
+ SDHCI_INT_SPACE_AVAIL);
|
|
+ }
|
|
+ } else {
|
|
+ if (sync_after_dma) {
|
|
+ /* On the Arasan controller the stop command (which will be
|
|
+ scheduled after this completes) does not seem to work
|
|
+ properly if we allow it to be issued when we are
|
|
+ transferring data to/from the SD card.
|
|
+ We get CRC and DEND errors unless we wait for
|
|
+ the SD controller to finish reading/writing to the card. */
|
|
+ u32 state_mask;
|
|
+ int timeout=30*5000;
|
|
+
|
|
+ DBG("PDMA over - sync card\n");
|
|
+ if (data->flags & MMC_DATA_READ)
|
|
+ state_mask = SDHCI_DOING_READ;
|
|
+ else
|
|
+ state_mask = SDHCI_DOING_WRITE;
|
|
+
|
|
+ while (0 != (sdhci_bcm2708_raw_readl(host, SDHCI_PRESENT_STATE)
|
|
+ & state_mask) && --timeout > 0)
|
|
+ {
|
|
+ udelay(1);
|
|
+ continue;
|
|
+ }
|
|
+ if (timeout <= 0)
|
|
+ printk(KERN_ERR"%s: final %s to SD card still "
|
|
+ "running\n",
|
|
+ mmc_hostname(host->mmc),
|
|
+ data->flags & MMC_DATA_READ? "read": "write");
|
|
+ }
|
|
+ if (host_priv->complete) {
|
|
+ (*host_priv->complete)(host);
|
|
+ DBG("PDMA %s complete\n",
|
|
+ data->flags & MMC_DATA_READ?"read":"write");
|
|
+ sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
|
|
+ SDHCI_INT_SPACE_AVAIL);
|
|
+ }
|
|
+ }
|
|
+// spin_unlock_irqrestore(&host->lock, flags);
|
|
+}
|
|
+
|
|
+static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
|
|
+{
|
|
+ irqreturn_t result = IRQ_NONE;
|
|
+ struct sdhci_host *host = dev_id;
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ u32 dma_cs; /* control and status register */
|
|
+
|
|
+ BUG_ON(NULL == dev_id);
|
|
+ BUG_ON(NULL == host_priv->dma_chan_base);
|
|
+
|
|
+ sdhci_spin_lock(host);
|
|
+
|
|
+ dma_cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
|
|
+
|
|
+ if (dma_cs & BCM2708_DMA_ERR) {
|
|
+ unsigned long debug;
|
|
+ debug = readl(host_priv->dma_chan_base +
|
|
+ BCM2708_DMA_DEBUG);
|
|
+ printk(KERN_ERR "%s: DMA error - CS %lX DEBUG %lX\n",
|
|
+ mmc_hostname(host->mmc), (unsigned long)dma_cs,
|
|
+ (unsigned long)debug);
|
|
+ /* reset error */
|
|
+ writel(debug, host_priv->dma_chan_base +
|
|
+ BCM2708_DMA_DEBUG);
|
|
+ }
|
|
+ if (dma_cs & BCM2708_DMA_INT) {
|
|
+ /* acknowledge interrupt */
|
|
+ writel(BCM2708_DMA_INT,
|
|
+ host_priv->dma_chan_base + BCM2708_DMA_CS);
|
|
+
|
|
+ dsb(); /* ARM data synchronization (push) operation */
|
|
+
|
|
+ if (!host_priv->dma_wanted) {
|
|
+ /* ignore this interrupt - it was reset */
|
|
+ if (extra_messages)
|
|
+ printk(KERN_INFO "%s: DMA IRQ %X ignored - "
|
|
+ "results were reset\n",
|
|
+ mmc_hostname(host->mmc), dma_cs);
|
|
+#ifdef CHECK_DMA_USE
|
|
+ printk(KERN_INFO "%s: now %"FMT_HPT
|
|
+ " started %lu reset %lu stopped %lu\n",
|
|
+ mmc_hostname(host->mmc), hptime(),
|
|
+ since_ns(host_priv->when_started),
|
|
+ since_ns(host_priv->when_reset),
|
|
+ since_ns(host_priv->when_stopped));
|
|
+ host_priv->dmas_pending--;
|
|
+#endif
|
|
+ } else
|
|
+ sdhci_bcm2708_dma_complete_irq(host, dma_cs);
|
|
+
|
|
+ result = IRQ_HANDLED;
|
|
+ }
|
|
+ sdhci_spin_unlock(host);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+#endif /* CONFIG_MMC_SDHCI_BCM2708_DMA */
|
|
+
|
|
+
|
|
+/***************************************************************************** \
|
|
+ * *
|
|
+ * Device Attributes *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+
|
|
+/**
|
|
+ * Show the DMA-using status
|
|
+ */
|
|
+static ssize_t attr_dma_show(struct device *_dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
|
|
+
|
|
+ if (host) {
|
|
+ int use_dma = (host->flags & SDHCI_USE_PLATDMA? 1:0);
|
|
+ return sprintf(buf, "%d\n", use_dma);
|
|
+ } else
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set the DMA-using status
|
|
+ */
|
|
+static ssize_t attr_dma_store(struct device *_dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
|
|
+
|
|
+ if (host) {
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ int on = simple_strtol(buf, NULL, 0);
|
|
+ if (on) {
|
|
+ host->flags |= SDHCI_USE_PLATDMA;
|
|
+ sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
|
|
+ printk(KERN_INFO "%s: DMA enabled\n",
|
|
+ mmc_hostname(host->mmc));
|
|
+ } else {
|
|
+ host->flags &= ~(SDHCI_USE_PLATDMA | SDHCI_REQ_USE_DMA);
|
|
+ sdhci_bcm2708_writel(host, 0, REG_EXRDFIFO_EN);
|
|
+ printk(KERN_INFO "%s: DMA disabled\n",
|
|
+ mmc_hostname(host->mmc));
|
|
+ }
|
|
+#endif
|
|
+ return count;
|
|
+ } else
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(use_dma, S_IRUGO | S_IWUGO, attr_dma_show, attr_dma_store);
|
|
+
|
|
+
|
|
+/**
|
|
+ * Show the DMA wait states used
|
|
+ */
|
|
+static ssize_t attr_dmawait_show(struct device *_dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
|
|
+
|
|
+ if (host) {
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ int dmawait = host_priv->dma_waits;
|
|
+ return sprintf(buf, "%d\n", dmawait);
|
|
+ } else
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set the DMA wait state used
|
|
+ */
|
|
+static ssize_t attr_dmawait_store(struct device *_dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
|
|
+
|
|
+ if (host) {
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ int dma_waits = simple_strtol(buf, NULL, 0);
|
|
+ if (dma_waits >= 0 && dma_waits < 32)
|
|
+ host_priv->dma_waits = dma_waits;
|
|
+ else
|
|
+ printk(KERN_ERR "%s: illegal dma_waits value - %d",
|
|
+ mmc_hostname(host->mmc), dma_waits);
|
|
+#endif
|
|
+ return count;
|
|
+ } else
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(dma_wait, S_IRUGO | S_IWUGO,
|
|
+ attr_dmawait_show, attr_dmawait_store);
|
|
+
|
|
+
|
|
+/**
|
|
+ * Show the DMA-using status
|
|
+ */
|
|
+static ssize_t attr_status_show(struct device *_dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
|
|
+
|
|
+ if (host) {
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ return sprintf(buf,
|
|
+ "present: yes\n"
|
|
+ "power: %s\n"
|
|
+ "clock: %u Hz\n"
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ "dma: %s (%d waits)\n",
|
|
+#else
|
|
+ "dma: unconfigured\n",
|
|
+#endif
|
|
+ "always on",
|
|
+ host->clock
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ , (host->flags & SDHCI_USE_PLATDMA)? "on": "off"
|
|
+ , host_priv->dma_waits
|
|
+#endif
|
|
+ );
|
|
+ } else
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(status, S_IRUGO, attr_status_show, NULL);
|
|
+
|
|
+/***************************************************************************** \
|
|
+ * *
|
|
+ * Power Management *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int sdhci_bcm2708_suspend(struct platform_device *dev, pm_message_t state)
|
|
+{
|
|
+ struct sdhci_host *host = (struct sdhci_host *)
|
|
+ platform_get_drvdata(dev);
|
|
+ int ret = 0;
|
|
+
|
|
+ if (host->mmc) {
|
|
+ ret = mmc_suspend_host(host->mmc);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int sdhci_bcm2708_resume(struct platform_device *dev)
|
|
+{
|
|
+ struct sdhci_host *host = (struct sdhci_host *)
|
|
+ platform_get_drvdata(dev);
|
|
+ int ret = 0;
|
|
+
|
|
+ if (host->mmc) {
|
|
+ ret = mmc_resume_host(host->mmc);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * Device quirk functions. Implemented as local ops because the flags *
|
|
+ * field is out of space with newer kernels. This implementation can be *
|
|
+ * back ported to older kernels as well. *
|
|
+\****************************************************************************/
|
|
+static unsigned int sdhci_bcm2708_quirk_extra_ints(struct sdhci_host *host)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static unsigned int sdhci_bcm2708_quirk_spurious_crc_acmd51(struct sdhci_host *host)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static unsigned int sdhci_bcm2708_quirk_voltage_broken(struct sdhci_host *host)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static unsigned int sdhci_bcm2708_uhs_broken(struct sdhci_host *host)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static unsigned int sdhci_bcm2708_missing_status(struct sdhci_host *host)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/***************************************************************************** \
|
|
+ * *
|
|
+ * Device ops *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+static struct sdhci_ops sdhci_bcm2708_ops = {
|
|
+#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
|
|
+ .read_l = sdhci_bcm2708_readl,
|
|
+ .read_w = sdhci_bcm2708_readw,
|
|
+ .read_b = sdhci_bcm2708_readb,
|
|
+ .write_l = sdhci_bcm2708_writel,
|
|
+ .write_w = sdhci_bcm2708_writew,
|
|
+ .write_b = sdhci_bcm2708_writeb,
|
|
+#else
|
|
+#error The BCM2708 SDHCI driver needs CONFIG_MMC_SDHCI_IO_ACCESSORS to be set
|
|
+#endif
|
|
+ .get_max_clock = sdhci_bcm2708_get_max_clock,
|
|
+
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ // Platform DMA operations
|
|
+ .pdma_able = sdhci_bcm2708_platdma_dmaable,
|
|
+ .pdma_avail = sdhci_bcm2708_platdma_avail,
|
|
+ .pdma_reset = sdhci_bcm2708_platdma_reset,
|
|
+#endif
|
|
+ .extra_ints = sdhci_bcm2708_quirk_extra_ints,
|
|
+ .voltage_broken = sdhci_bcm2708_quirk_voltage_broken,
|
|
+ .uhs_broken = sdhci_bcm2708_uhs_broken,
|
|
+};
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * Device probing/removal *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+static int sdhci_bcm2708_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct sdhci_host *host;
|
|
+ struct resource *iomem;
|
|
+ struct sdhci_bcm2708_priv *host_priv;
|
|
+ int ret;
|
|
+
|
|
+ BUG_ON(pdev == NULL);
|
|
+
|
|
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (!iomem) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (resource_size(iomem) != 0x100)
|
|
+ dev_err(&pdev->dev, "Invalid iomem size. You may "
|
|
+ "experience problems.\n");
|
|
+
|
|
+ if (pdev->dev.parent)
|
|
+ host = sdhci_alloc_host(pdev->dev.parent,
|
|
+ sizeof(struct sdhci_bcm2708_priv));
|
|
+ else
|
|
+ host = sdhci_alloc_host(&pdev->dev,
|
|
+ sizeof(struct sdhci_bcm2708_priv));
|
|
+
|
|
+ if (IS_ERR(host)) {
|
|
+ ret = PTR_ERR(host);
|
|
+ goto err;
|
|
+ }
|
|
+ if (missing_status) {
|
|
+ sdhci_bcm2708_ops.missing_status = sdhci_bcm2708_missing_status;
|
|
+ }
|
|
+
|
|
+ if( spurious_crc_acmd51 ) {
|
|
+ sdhci_bcm2708_ops.spurious_crc_acmd51 = sdhci_bcm2708_quirk_spurious_crc_acmd51;
|
|
+ }
|
|
+
|
|
+
|
|
+ printk("sdhci: %s low-latency mode\n",enable_llm?"Enable":"Disable");
|
|
+
|
|
+ host->hw_name = "BCM2708_Arasan";
|
|
+ host->ops = &sdhci_bcm2708_ops;
|
|
+ host->irq = platform_get_irq(pdev, 0);
|
|
+ host->second_irq = 0;
|
|
+
|
|
+ host->quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
|
|
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
|
|
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
|
|
+ SDHCI_QUIRK_MISSING_CAPS |
|
|
+ SDHCI_QUIRK_NO_HISPD_BIT |
|
|
+ (sync_after_dma ? 0:SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12);
|
|
+
|
|
+
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ host->flags = SDHCI_USE_PLATDMA;
|
|
+#endif
|
|
+
|
|
+ if (!request_mem_region(iomem->start, resource_size(iomem),
|
|
+ mmc_hostname(host->mmc))) {
|
|
+ dev_err(&pdev->dev, "cannot request region\n");
|
|
+ ret = -EBUSY;
|
|
+ goto err_request;
|
|
+ }
|
|
+
|
|
+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
|
|
+ if (!host->ioaddr) {
|
|
+ dev_err(&pdev->dev, "failed to remap registers\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto err_remap;
|
|
+ }
|
|
+
|
|
+ host_priv = SDHCI_HOST_PRIV(host);
|
|
+
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ host_priv->dma_wanted = 0;
|
|
+#ifdef CHECK_DMA_USE
|
|
+ host_priv->dmas_pending = 0;
|
|
+ host_priv->when_started = 0;
|
|
+ host_priv->when_reset = 0;
|
|
+ host_priv->when_stopped = 0;
|
|
+#endif
|
|
+ host_priv->sg_ix = 0;
|
|
+ host_priv->sg_done = 0;
|
|
+ host_priv->complete = NULL;
|
|
+ host_priv->dma_waits = SDHCI_BCM_DMA_WAITS;
|
|
+
|
|
+ host_priv->cb_base = dma_alloc_writecombine(&pdev->dev, SZ_4K,
|
|
+ &host_priv->cb_handle,
|
|
+ GFP_KERNEL);
|
|
+ if (!host_priv->cb_base) {
|
|
+ dev_err(&pdev->dev, "cannot allocate DMA CBs\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto err_alloc_cb;
|
|
+ }
|
|
+
|
|
+ ret = bcm_dma_chan_alloc(BCM_DMA_FEATURE_FAST,
|
|
+ &host_priv->dma_chan_base,
|
|
+ &host_priv->dma_irq);
|
|
+ if (ret < 0) {
|
|
+ dev_err(&pdev->dev, "couldn't allocate a DMA channel\n");
|
|
+ goto err_add_dma;
|
|
+ }
|
|
+ host_priv->dma_chan = ret;
|
|
+
|
|
+ ret = request_irq(host_priv->dma_irq, sdhci_bcm2708_dma_irq,0,//IRQF_SHARED,
|
|
+ DRIVER_NAME " (dma)", host);
|
|
+ if (ret) {
|
|
+ dev_err(&pdev->dev, "cannot set DMA IRQ\n");
|
|
+ goto err_add_dma_irq;
|
|
+ }
|
|
+ host->second_irq = host_priv->dma_irq;
|
|
+ DBG("DMA CBs %p handle %08X DMA%d %p DMA IRQ %d\n",
|
|
+ host_priv->cb_base, (unsigned)host_priv->cb_handle,
|
|
+ host_priv->dma_chan, host_priv->dma_chan_base,
|
|
+ host_priv->dma_irq);
|
|
+
|
|
+ if (allow_highspeed)
|
|
+ host->mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
|
|
+
|
|
+ /* single block writes cause data loss with some SD cards! */
|
|
+ host->mmc->caps2 |= MMC_CAP2_FORCE_MULTIBLOCK;
|
|
+#endif
|
|
+
|
|
+ ret = sdhci_add_host(host);
|
|
+ if (ret)
|
|
+ goto err_add_host;
|
|
+
|
|
+ platform_set_drvdata(pdev, host);
|
|
+ ret = device_create_file(&pdev->dev, &dev_attr_use_dma);
|
|
+ ret = device_create_file(&pdev->dev, &dev_attr_dma_wait);
|
|
+ ret = device_create_file(&pdev->dev, &dev_attr_status);
|
|
+
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ /* enable extension fifo for paced DMA transfers */
|
|
+ sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
|
|
+ sdhci_bcm2708_writel(host, 4, REG_EXRDFIFO_CFG);
|
|
+#endif
|
|
+
|
|
+ printk(KERN_INFO "%s: BCM2708 SDHC host at 0x%08llx DMA %d IRQ %d\n",
|
|
+ mmc_hostname(host->mmc), (unsigned long long)iomem->start,
|
|
+ host_priv->dma_chan, host_priv->dma_irq);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_add_host:
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ free_irq(host_priv->dma_irq, host);
|
|
+err_add_dma_irq:
|
|
+ bcm_dma_chan_free(host_priv->dma_chan);
|
|
+err_add_dma:
|
|
+ dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
|
|
+ host_priv->cb_handle);
|
|
+err_alloc_cb:
|
|
+#endif
|
|
+ iounmap(host->ioaddr);
|
|
+err_remap:
|
|
+ release_mem_region(iomem->start, resource_size(iomem));
|
|
+err_request:
|
|
+ sdhci_free_host(host);
|
|
+err:
|
|
+ dev_err(&pdev->dev, "probe failed, err %d\n", ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int sdhci_bcm2708_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct sdhci_host *host = platform_get_drvdata(pdev);
|
|
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
|
|
+ int dead;
|
|
+ u32 scratch;
|
|
+
|
|
+ dead = 0;
|
|
+ scratch = sdhci_bcm2708_readl(host, SDHCI_INT_STATUS);
|
|
+ if (scratch == (u32)-1)
|
|
+ dead = 1;
|
|
+
|
|
+ device_remove_file(&pdev->dev, &dev_attr_status);
|
|
+ device_remove_file(&pdev->dev, &dev_attr_dma_wait);
|
|
+ device_remove_file(&pdev->dev, &dev_attr_use_dma);
|
|
+
|
|
+#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
|
|
+ free_irq(host_priv->dma_irq, host);
|
|
+ dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
|
|
+ host_priv->cb_handle);
|
|
+#endif
|
|
+ sdhci_remove_host(host, dead);
|
|
+ iounmap(host->ioaddr);
|
|
+ release_mem_region(iomem->start, resource_size(iomem));
|
|
+ sdhci_free_host(host);
|
|
+ platform_set_drvdata(pdev, NULL);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct platform_driver sdhci_bcm2708_driver = {
|
|
+ .driver = {
|
|
+ .name = DRIVER_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
+ },
|
|
+ .probe = sdhci_bcm2708_probe,
|
|
+ .remove = sdhci_bcm2708_remove,
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+ .suspend = sdhci_bcm2708_suspend,
|
|
+ .resume = sdhci_bcm2708_resume,
|
|
+#endif
|
|
+
|
|
+};
|
|
+
|
|
+/*****************************************************************************\
|
|
+ * *
|
|
+ * Driver init/exit *
|
|
+ * *
|
|
+\*****************************************************************************/
|
|
+
|
|
+static int __init sdhci_drv_init(void)
|
|
+{
|
|
+ return platform_driver_register(&sdhci_bcm2708_driver);
|
|
+}
|
|
+
|
|
+static void __exit sdhci_drv_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&sdhci_bcm2708_driver);
|
|
+}
|
|
+
|
|
+module_init(sdhci_drv_init);
|
|
+module_exit(sdhci_drv_exit);
|
|
+
|
|
+module_param(allow_highspeed, bool, 0444);
|
|
+module_param(emmc_clock_freq, int, 0444);
|
|
+module_param(sync_after_dma, bool, 0444);
|
|
+module_param(missing_status, bool, 0444);
|
|
+module_param(spurious_crc_acmd51, bool, 0444);
|
|
+module_param(enable_llm, bool, 0444);
|
|
+module_param(cycle_delay, int, 0444);
|
|
+module_param(extra_messages, bool, 0444);
|
|
+
|
|
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
|
|
+MODULE_AUTHOR("Broadcom <info@broadcom.com>");
|
|
+MODULE_LICENSE("GPL v2");
|
|
+MODULE_ALIAS("platform:"DRIVER_NAME);
|
|
+
|
|
+MODULE_PARM_DESC(allow_highspeed, "Allow high speed transfers modes");
|
|
+MODULE_PARM_DESC(emmc_clock_freq, "Specify the speed of emmc clock");
|
|
+MODULE_PARM_DESC(sync_after_dma, "Block in driver until dma complete");
|
|
+MODULE_PARM_DESC(missing_status, "Use the missing status quirk");
|
|
+MODULE_PARM_DESC(spurious_crc_acmd51, "Use the spurious crc quirk for reading SCR (ACMD51)");
|
|
+MODULE_PARM_DESC(enable_llm, "Enable low-latency mode");
|
|
+MODULE_PARM_DESC(extra_messages, "Enable more sdcard warning messages");
|
|
+
|
|
+
|
|
--- a/drivers/mmc/host/sdhci.c
|
|
+++ b/drivers/mmc/host/sdhci.c
|
|
@@ -28,6 +28,7 @@
|
|
#include <linux/mmc/mmc.h>
|
|
#include <linux/mmc/host.h>
|
|
#include <linux/mmc/card.h>
|
|
+#include <linux/mmc/sd.h>
|
|
#include <linux/mmc/slot-gpio.h>
|
|
|
|
#include "sdhci.h"
|
|
@@ -123,6 +124,91 @@ static void sdhci_dumpregs(struct sdhci_
|
|
* Low level functions *
|
|
* *
|
|
\*****************************************************************************/
|
|
+extern bool enable_llm;
|
|
+static int sdhci_locked=0;
|
|
+void sdhci_spin_lock(struct sdhci_host *host)
|
|
+{
|
|
+ spin_lock(&host->lock);
|
|
+#ifdef CONFIG_PREEMPT
|
|
+ if(enable_llm)
|
|
+ {
|
|
+ disable_irq_nosync(host->irq);
|
|
+ if(host->second_irq)
|
|
+ disable_irq_nosync(host->second_irq);
|
|
+ local_irq_enable();
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+void sdhci_spin_unlock(struct sdhci_host *host)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT
|
|
+ if(enable_llm)
|
|
+ {
|
|
+ local_irq_disable();
|
|
+ if(host->second_irq)
|
|
+ enable_irq(host->second_irq);
|
|
+ enable_irq(host->irq);
|
|
+ }
|
|
+#endif
|
|
+ spin_unlock(&host->lock);
|
|
+}
|
|
+
|
|
+void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT
|
|
+ if(enable_llm)
|
|
+ {
|
|
+ while(sdhci_locked)
|
|
+ {
|
|
+ preempt_schedule();
|
|
+ }
|
|
+ spin_lock_irqsave(&host->lock,*flags);
|
|
+ disable_irq(host->irq);
|
|
+ if(host->second_irq)
|
|
+ disable_irq(host->second_irq);
|
|
+ local_irq_enable();
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+ spin_lock_irqsave(&host->lock,*flags);
|
|
+}
|
|
+
|
|
+void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT
|
|
+ if(enable_llm)
|
|
+ {
|
|
+ local_irq_disable();
|
|
+ if(host->second_irq)
|
|
+ enable_irq(host->second_irq);
|
|
+ enable_irq(host->irq);
|
|
+ }
|
|
+#endif
|
|
+ spin_unlock_irqrestore(&host->lock,flags);
|
|
+}
|
|
+
|
|
+static void sdhci_spin_enable_schedule(struct sdhci_host *host)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT
|
|
+ if(enable_llm)
|
|
+ {
|
|
+ sdhci_locked = 1;
|
|
+ preempt_enable();
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+static void sdhci_spin_disable_schedule(struct sdhci_host *host)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT
|
|
+ if(enable_llm)
|
|
+ {
|
|
+ preempt_disable();
|
|
+ sdhci_locked = 0;
|
|
+ }
|
|
+#endif
|
|
+}
|
|
|
|
static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
|
|
{
|
|
@@ -288,7 +374,7 @@ static void sdhci_led_control(struct led
|
|
struct sdhci_host *host = container_of(led, struct sdhci_host, led);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
if (host->runtime_suspended)
|
|
goto out;
|
|
@@ -298,7 +384,7 @@ static void sdhci_led_control(struct led
|
|
else
|
|
sdhci_activate_led(host);
|
|
out:
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
#endif
|
|
|
|
@@ -315,7 +401,7 @@ static void sdhci_read_block_pio(struct
|
|
u32 uninitialized_var(scratch);
|
|
u8 *buf;
|
|
|
|
- DBG("PIO reading\n");
|
|
+ DBG("PIO reading %db\n", host->data->blksz);
|
|
|
|
blksize = host->data->blksz;
|
|
chunk = 0;
|
|
@@ -360,7 +446,7 @@ static void sdhci_write_block_pio(struct
|
|
u32 scratch;
|
|
u8 *buf;
|
|
|
|
- DBG("PIO writing\n");
|
|
+ DBG("PIO writing %db\n", host->data->blksz);
|
|
|
|
blksize = host->data->blksz;
|
|
chunk = 0;
|
|
@@ -399,19 +485,28 @@ static void sdhci_write_block_pio(struct
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
-static void sdhci_transfer_pio(struct sdhci_host *host)
|
|
+static void sdhci_transfer_pio(struct sdhci_host *host, u32 intstate)
|
|
{
|
|
u32 mask;
|
|
+ u32 state = 0;
|
|
+ u32 intmask;
|
|
+ int available;
|
|
|
|
BUG_ON(!host->data);
|
|
|
|
if (host->blocks == 0)
|
|
return;
|
|
|
|
- if (host->data->flags & MMC_DATA_READ)
|
|
+ if (host->data->flags & MMC_DATA_READ) {
|
|
mask = SDHCI_DATA_AVAILABLE;
|
|
- else
|
|
+ intmask = SDHCI_INT_DATA_AVAIL;
|
|
+ } else {
|
|
mask = SDHCI_SPACE_AVAILABLE;
|
|
+ intmask = SDHCI_INT_SPACE_AVAIL;
|
|
+ }
|
|
+
|
|
+ /* initially we can see whether we can procede using intstate */
|
|
+ available = (intstate & intmask);
|
|
|
|
/*
|
|
* Some controllers (JMicron JMB38x) mess up the buffer bits
|
|
@@ -422,7 +517,7 @@ static void sdhci_transfer_pio(struct sd
|
|
(host->data->blocks == 1))
|
|
mask = ~0;
|
|
|
|
- while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
|
|
+ while (available) {
|
|
if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
|
|
udelay(100);
|
|
|
|
@@ -434,9 +529,11 @@ static void sdhci_transfer_pio(struct sd
|
|
host->blocks--;
|
|
if (host->blocks == 0)
|
|
break;
|
|
+ state = sdhci_readl(host, SDHCI_PRESENT_STATE);
|
|
+ available = state & mask;
|
|
}
|
|
|
|
- DBG("PIO transfer complete.\n");
|
|
+ DBG("PIO transfer complete - %d blocks left.\n", host->blocks);
|
|
}
|
|
|
|
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
|
|
@@ -709,7 +806,9 @@ static void sdhci_set_transfer_irqs(stru
|
|
u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
|
|
u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
|
|
|
|
- if (host->flags & SDHCI_REQ_USE_DMA)
|
|
+ /* platform DMA will begin on receipt of PIO irqs */
|
|
+ if ((host->flags & SDHCI_REQ_USE_DMA) &&
|
|
+ !(host->flags & SDHCI_USE_PLATDMA))
|
|
sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
|
|
else
|
|
sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
|
|
@@ -741,44 +840,25 @@ static void sdhci_prepare_data(struct sd
|
|
host->data_early = 0;
|
|
host->data->bytes_xfered = 0;
|
|
|
|
- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
|
|
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA | SDHCI_USE_PLATDMA))
|
|
host->flags |= SDHCI_REQ_USE_DMA;
|
|
|
|
/*
|
|
* FIXME: This doesn't account for merging when mapping the
|
|
* scatterlist.
|
|
*/
|
|
- if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
- int broken, i;
|
|
- struct scatterlist *sg;
|
|
-
|
|
- broken = 0;
|
|
- if (host->flags & SDHCI_USE_ADMA) {
|
|
- if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
|
|
- broken = 1;
|
|
- } else {
|
|
- if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
|
|
- broken = 1;
|
|
- }
|
|
-
|
|
- if (unlikely(broken)) {
|
|
- for_each_sg(data->sg, sg, data->sg_len, i) {
|
|
- if (sg->length & 0x3) {
|
|
- DBG("Reverting to PIO because of "
|
|
- "transfer size (%d)\n",
|
|
- sg->length);
|
|
- host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
- break;
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
|
|
/*
|
|
* The assumption here being that alignment is the same after
|
|
* translation to device address space.
|
|
*/
|
|
- if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
+ if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) ==
|
|
+ (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) {
|
|
+
|
|
+ if (! sdhci_platdma_dmaable(host, data))
|
|
+ host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
+
|
|
+ } else if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
int broken, i;
|
|
struct scatterlist *sg;
|
|
|
|
@@ -837,7 +917,8 @@ static void sdhci_prepare_data(struct sd
|
|
*/
|
|
WARN_ON(1);
|
|
host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
- } else {
|
|
+ } else
|
|
+ if (!(host->flags & SDHCI_USE_PLATDMA)) {
|
|
WARN_ON(sg_cnt != 1);
|
|
sdhci_writel(host, sg_dma_address(data->sg),
|
|
SDHCI_DMA_ADDRESS);
|
|
@@ -853,11 +934,13 @@ static void sdhci_prepare_data(struct sd
|
|
if (host->version >= SDHCI_SPEC_200) {
|
|
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
|
|
ctrl &= ~SDHCI_CTRL_DMA_MASK;
|
|
+ if (! (host->flags & SDHCI_USE_PLATDMA)) {
|
|
if ((host->flags & SDHCI_REQ_USE_DMA) &&
|
|
(host->flags & SDHCI_USE_ADMA))
|
|
ctrl |= SDHCI_CTRL_ADMA32;
|
|
else
|
|
ctrl |= SDHCI_CTRL_SDMA;
|
|
+ }
|
|
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
|
|
}
|
|
|
|
@@ -909,7 +992,8 @@ static void sdhci_set_transfer_mode(stru
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
|
mode |= SDHCI_TRNS_READ;
|
|
- if (host->flags & SDHCI_REQ_USE_DMA)
|
|
+ if ((host->flags & SDHCI_REQ_USE_DMA) &&
|
|
+ !(host->flags & SDHCI_USE_PLATDMA))
|
|
mode |= SDHCI_TRNS_DMA;
|
|
|
|
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
|
|
@@ -925,13 +1009,16 @@ static void sdhci_finish_data(struct sdh
|
|
host->data = NULL;
|
|
|
|
if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
- if (host->flags & SDHCI_USE_ADMA)
|
|
- sdhci_adma_table_post(host, data);
|
|
- else {
|
|
+ /* we may have to abandon an ongoing platform DMA */
|
|
+ if (host->flags & SDHCI_USE_PLATDMA)
|
|
+ sdhci_platdma_reset(host, data);
|
|
+
|
|
+ if (host->flags & (SDHCI_USE_PLATDMA | SDHCI_USE_SDMA)) {
|
|
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
|
data->sg_len, (data->flags & MMC_DATA_READ) ?
|
|
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
|
- }
|
|
+ } else if (host->flags & SDHCI_USE_ADMA)
|
|
+ sdhci_adma_table_post(host, data);
|
|
}
|
|
|
|
/*
|
|
@@ -984,6 +1071,12 @@ static void sdhci_send_command(struct sd
|
|
if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
|
|
mask |= SDHCI_DATA_INHIBIT;
|
|
|
|
+ if(host->ops->missing_status && (cmd->opcode == MMC_SEND_STATUS)) {
|
|
+ timeout = 5000; // Really obscenely large delay to send the status, due to bug in controller
|
|
+ // which might cause the STATUS command to get stuck when a data operation is in flow
|
|
+ mask |= SDHCI_DATA_INHIBIT;
|
|
+ }
|
|
+
|
|
/* We shouldn't wait for data inihibit for stop commands, even
|
|
though they might use busy signaling */
|
|
if (host->mrq->data && (cmd == host->mrq->data->stop))
|
|
@@ -999,12 +1092,20 @@ static void sdhci_send_command(struct sd
|
|
return;
|
|
}
|
|
timeout--;
|
|
+ sdhci_spin_enable_schedule(host);
|
|
mdelay(1);
|
|
+ sdhci_spin_disable_schedule(host);
|
|
}
|
|
+ DBG("send cmd %d - wait 0x%X irq 0x%x\n", cmd->opcode, mask,
|
|
+ sdhci_readl(host, SDHCI_INT_STATUS));
|
|
|
|
mod_timer(&host->timer, jiffies + 10 * HZ);
|
|
|
|
host->cmd = cmd;
|
|
+ if (host->last_cmdop == MMC_APP_CMD)
|
|
+ host->last_cmdop = -cmd->opcode;
|
|
+ else
|
|
+ host->last_cmdop = cmd->opcode;
|
|
|
|
sdhci_prepare_data(host, cmd);
|
|
|
|
@@ -1220,7 +1321,9 @@ clock_set:
|
|
return;
|
|
}
|
|
timeout--;
|
|
+ sdhci_spin_enable_schedule(host);
|
|
mdelay(1);
|
|
+ sdhci_spin_disable_schedule(host);
|
|
}
|
|
|
|
clk |= SDHCI_CLOCK_CARD_EN;
|
|
@@ -1316,7 +1419,7 @@ static void sdhci_request(struct mmc_hos
|
|
|
|
sdhci_runtime_pm_get(host);
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
WARN_ON(host->mrq != NULL);
|
|
|
|
@@ -1374,9 +1477,9 @@ static void sdhci_request(struct mmc_hos
|
|
mmc->card->type == MMC_TYPE_MMC ?
|
|
MMC_SEND_TUNING_BLOCK_HS200 :
|
|
MMC_SEND_TUNING_BLOCK;
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
sdhci_execute_tuning(mmc, tuning_opcode);
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
/* Restore original mmc_request structure */
|
|
host->mrq = mrq;
|
|
@@ -1390,7 +1493,7 @@ static void sdhci_request(struct mmc_hos
|
|
}
|
|
|
|
mmiowb();
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
|
|
@@ -1399,10 +1502,10 @@ static void sdhci_do_set_ios(struct sdhc
|
|
int vdd_bit = -1;
|
|
u8 ctrl;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
if (host->flags & SDHCI_DEVICE_DEAD) {
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
|
|
mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
|
|
return;
|
|
@@ -1429,9 +1532,9 @@ static void sdhci_do_set_ios(struct sdhc
|
|
vdd_bit = sdhci_set_power(host, ios->vdd);
|
|
|
|
if (host->vmmc && vdd_bit != -1) {
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
}
|
|
|
|
if (host->ops->platform_send_init_74_clocks)
|
|
@@ -1470,7 +1573,7 @@ static void sdhci_do_set_ios(struct sdhc
|
|
else
|
|
ctrl &= ~SDHCI_CTRL_HISPD;
|
|
|
|
- if (host->version >= SDHCI_SPEC_300) {
|
|
+ if (host->version >= SDHCI_SPEC_300 && !(host->ops->uhs_broken)) {
|
|
u16 clk, ctrl_2;
|
|
|
|
/* In case of UHS-I modes, set High Speed Enable */
|
|
@@ -1569,7 +1672,7 @@ static void sdhci_do_set_ios(struct sdhc
|
|
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
|
|
|
|
mmiowb();
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
@@ -1617,7 +1720,7 @@ static int sdhci_check_ro(struct sdhci_h
|
|
unsigned long flags;
|
|
int is_readonly;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
if (host->flags & SDHCI_DEVICE_DEAD)
|
|
is_readonly = 0;
|
|
@@ -1627,7 +1730,7 @@ static int sdhci_check_ro(struct sdhci_h
|
|
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
|
|
& SDHCI_WRITE_PROTECT);
|
|
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
|
|
/* This quirk needs to be replaced by a callback-function later */
|
|
return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
|
|
@@ -1700,9 +1803,9 @@ static void sdhci_enable_sdio_irq(struct
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
sdhci_enable_sdio_irq_nolock(host, enable);
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
|
|
@@ -2046,7 +2149,7 @@ static void sdhci_card_event(struct mmc_
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
/* Check host->mrq first in case we are runtime suspended */
|
|
if (host->mrq &&
|
|
@@ -2063,7 +2166,7 @@ static void sdhci_card_event(struct mmc_
|
|
tasklet_schedule(&host->finish_tasklet);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
static const struct mmc_host_ops sdhci_ops = {
|
|
@@ -2102,14 +2205,14 @@ static void sdhci_tasklet_finish(unsigne
|
|
|
|
host = (struct sdhci_host*)param;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
/*
|
|
* If this tasklet gets rescheduled while running, it will
|
|
* be run again afterwards but without any active request.
|
|
*/
|
|
if (!host->mrq) {
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
return;
|
|
}
|
|
|
|
@@ -2147,7 +2250,7 @@ static void sdhci_tasklet_finish(unsigne
|
|
#endif
|
|
|
|
mmiowb();
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
|
|
mmc_request_done(host->mmc, mrq);
|
|
sdhci_runtime_pm_put(host);
|
|
@@ -2160,11 +2263,11 @@ static void sdhci_timeout_timer(unsigned
|
|
|
|
host = (struct sdhci_host*)data;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
if (host->mrq) {
|
|
pr_err("%s: Timeout waiting for hardware "
|
|
- "interrupt.\n", mmc_hostname(host->mmc));
|
|
+ "interrupt - cmd%d.\n", mmc_hostname(host->mmc), host->last_cmdop);
|
|
sdhci_dumpregs(host);
|
|
|
|
if (host->data) {
|
|
@@ -2181,7 +2284,7 @@ static void sdhci_timeout_timer(unsigned
|
|
}
|
|
|
|
mmiowb();
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
static void sdhci_tuning_timer(unsigned long data)
|
|
@@ -2191,11 +2294,11 @@ static void sdhci_tuning_timer(unsigned
|
|
|
|
host = (struct sdhci_host *)data;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
host->flags |= SDHCI_NEEDS_RETUNING;
|
|
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
/*****************************************************************************\
|
|
@@ -2209,10 +2312,13 @@ static void sdhci_cmd_irq(struct sdhci_h
|
|
BUG_ON(intmask == 0);
|
|
|
|
if (!host->cmd) {
|
|
+ if (!(host->ops->extra_ints)) {
|
|
pr_err("%s: Got command interrupt 0x%08x even "
|
|
"though no command operation was in progress.\n",
|
|
mmc_hostname(host->mmc), (unsigned)intmask);
|
|
sdhci_dumpregs(host);
|
|
+ } else
|
|
+ DBG("cmd irq 0x%08x cmd complete\n", (unsigned)intmask);
|
|
return;
|
|
}
|
|
|
|
@@ -2282,6 +2388,19 @@ static void sdhci_show_adma_error(struct
|
|
static void sdhci_show_adma_error(struct sdhci_host *host) { }
|
|
#endif
|
|
|
|
+static void sdhci_data_end(struct sdhci_host *host)
|
|
+{
|
|
+ if (host->cmd) {
|
|
+ /*
|
|
+ * Data managed to finish before the
|
|
+ * command completed. Make sure we do
|
|
+ * things in the proper order.
|
|
+ */
|
|
+ host->data_early = 1;
|
|
+ } else
|
|
+ sdhci_finish_data(host);
|
|
+}
|
|
+
|
|
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
|
|
{
|
|
u32 command;
|
|
@@ -2311,23 +2430,39 @@ static void sdhci_data_irq(struct sdhci_
|
|
}
|
|
}
|
|
|
|
+ if (!(host->ops->extra_ints)) {
|
|
pr_err("%s: Got data interrupt 0x%08x even "
|
|
"though no data operation was in progress.\n",
|
|
mmc_hostname(host->mmc), (unsigned)intmask);
|
|
sdhci_dumpregs(host);
|
|
+ } else
|
|
+ DBG("data irq 0x%08x but no data\n", (unsigned)intmask);
|
|
|
|
return;
|
|
}
|
|
|
|
if (intmask & SDHCI_INT_DATA_TIMEOUT)
|
|
host->data->error = -ETIMEDOUT;
|
|
- else if (intmask & SDHCI_INT_DATA_END_BIT)
|
|
+ else if (intmask & SDHCI_INT_DATA_END_BIT) {
|
|
+ DBG("end error in cmd %d\n", host->last_cmdop);
|
|
+ if (host->ops->spurious_crc_acmd51 &&
|
|
+ host->last_cmdop == -SD_APP_SEND_SCR) {
|
|
+ DBG("ignoring spurious data_end_bit error\n");
|
|
+ intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
|
|
+ } else
|
|
host->data->error = -EILSEQ;
|
|
- else if ((intmask & SDHCI_INT_DATA_CRC) &&
|
|
+ } else if ((intmask & SDHCI_INT_DATA_CRC) &&
|
|
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
|
|
- != MMC_BUS_TEST_R)
|
|
+ != MMC_BUS_TEST_R) {
|
|
+ DBG("crc error in cmd %d\n", host->last_cmdop);
|
|
+ if (host->ops->spurious_crc_acmd51 &&
|
|
+ host->last_cmdop == -SD_APP_SEND_SCR) {
|
|
+ DBG("ignoring spurious data_crc_bit error\n");
|
|
+ intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
|
|
+ } else {
|
|
host->data->error = -EILSEQ;
|
|
- else if (intmask & SDHCI_INT_ADMA_ERROR) {
|
|
+ }
|
|
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
|
|
pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
|
|
sdhci_show_adma_error(host);
|
|
host->data->error = -EIO;
|
|
@@ -2335,11 +2470,18 @@ static void sdhci_data_irq(struct sdhci_
|
|
host->ops->adma_workaround(host, intmask);
|
|
}
|
|
|
|
- if (host->data->error)
|
|
+ if (host->data->error) {
|
|
+ DBG("finish request early on error %d\n", host->data->error);
|
|
sdhci_finish_data(host);
|
|
- else {
|
|
- if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
|
|
- sdhci_transfer_pio(host);
|
|
+ } else {
|
|
+ if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) {
|
|
+ if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
+ /* possible only in PLATDMA mode */
|
|
+ sdhci_platdma_avail(host, &intmask,
|
|
+ &sdhci_data_end);
|
|
+ } else
|
|
+ sdhci_transfer_pio(host, intmask);
|
|
+ }
|
|
|
|
/*
|
|
* We currently don't do anything fancy with DMA
|
|
@@ -2368,18 +2510,8 @@ static void sdhci_data_irq(struct sdhci_
|
|
sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
|
|
}
|
|
|
|
- if (intmask & SDHCI_INT_DATA_END) {
|
|
- if (host->cmd) {
|
|
- /*
|
|
- * Data managed to finish before the
|
|
- * command completed. Make sure we do
|
|
- * things in the proper order.
|
|
- */
|
|
- host->data_early = 1;
|
|
- } else {
|
|
- sdhci_finish_data(host);
|
|
- }
|
|
- }
|
|
+ if (intmask & SDHCI_INT_DATA_END)
|
|
+ sdhci_data_end(host);
|
|
}
|
|
}
|
|
|
|
@@ -2390,10 +2522,10 @@ static irqreturn_t sdhci_irq(int irq, vo
|
|
u32 intmask, unexpected = 0;
|
|
int cardint = 0, max_loops = 16;
|
|
|
|
- spin_lock(&host->lock);
|
|
+ sdhci_spin_lock(host);
|
|
|
|
if (host->runtime_suspended) {
|
|
- spin_unlock(&host->lock);
|
|
+ sdhci_spin_unlock(host);
|
|
pr_warning("%s: got irq while runtime suspended\n",
|
|
mmc_hostname(host->mmc));
|
|
return IRQ_HANDLED;
|
|
@@ -2435,6 +2567,22 @@ again:
|
|
tasklet_schedule(&host->card_tasklet);
|
|
}
|
|
|
|
+ if (intmask & SDHCI_INT_ERROR_MASK & ~SDHCI_INT_ERROR)
|
|
+ DBG("controller reports error 0x%x -"
|
|
+ "%s%s%s%s%s%s%s%s%s%s",
|
|
+ intmask,
|
|
+ intmask & SDHCI_INT_TIMEOUT? " timeout": "",
|
|
+ intmask & SDHCI_INT_CRC ? " crc": "",
|
|
+ intmask & SDHCI_INT_END_BIT? " endbit": "",
|
|
+ intmask & SDHCI_INT_INDEX? " index": "",
|
|
+ intmask & SDHCI_INT_DATA_TIMEOUT? " data_timeout": "",
|
|
+ intmask & SDHCI_INT_DATA_CRC? " data_crc": "",
|
|
+ intmask & SDHCI_INT_DATA_END_BIT? " data_endbit": "",
|
|
+ intmask & SDHCI_INT_BUS_POWER? " buspower": "",
|
|
+ intmask & SDHCI_INT_ACMD12ERR? " acmd12": "",
|
|
+ intmask & SDHCI_INT_ADMA_ERROR? " adma": ""
|
|
+ );
|
|
+
|
|
if (intmask & SDHCI_INT_CMD_MASK) {
|
|
sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
|
|
SDHCI_INT_STATUS);
|
|
@@ -2449,7 +2597,13 @@ again:
|
|
|
|
intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
|
|
|
|
- intmask &= ~SDHCI_INT_ERROR;
|
|
+ if (intmask & SDHCI_INT_ERROR_MASK) {
|
|
+ /* collect any uncovered errors */
|
|
+ sdhci_writel(host, intmask & SDHCI_INT_ERROR_MASK,
|
|
+ SDHCI_INT_STATUS);
|
|
+ }
|
|
+
|
|
+ intmask &= ~SDHCI_INT_ERROR_MASK;
|
|
|
|
if (intmask & SDHCI_INT_BUS_POWER) {
|
|
pr_err("%s: Card is consuming too much power!\n",
|
|
@@ -2475,7 +2629,7 @@ again:
|
|
if (intmask && --max_loops)
|
|
goto again;
|
|
out:
|
|
- spin_unlock(&host->lock);
|
|
+ sdhci_spin_unlock(host);
|
|
|
|
if (unexpected) {
|
|
pr_err("%s: Unexpected interrupt 0x%08x.\n",
|
|
@@ -2569,7 +2723,8 @@ int sdhci_resume_host(struct sdhci_host
|
|
{
|
|
int ret;
|
|
|
|
- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
|
|
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
|
|
+ SDHCI_USE_PLATDMA)) {
|
|
if (host->ops->enable_dma)
|
|
host->ops->enable_dma(host);
|
|
}
|
|
@@ -2636,15 +2791,15 @@ int sdhci_runtime_suspend_host(struct sd
|
|
host->flags &= ~SDHCI_NEEDS_RETUNING;
|
|
}
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
|
|
synchronize_irq(host->irq);
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
host->runtime_suspended = true;
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2670,16 +2825,16 @@ int sdhci_runtime_resume_host(struct sdh
|
|
sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
|
|
if ((host_flags & SDHCI_PV_ENABLED) &&
|
|
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
sdhci_enable_preset_value(host, true);
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
/* Set the re-tuning expiration flag */
|
|
if (host->flags & SDHCI_USING_RETUNING_TIMER)
|
|
host->flags |= SDHCI_NEEDS_RETUNING;
|
|
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
host->runtime_suspended = false;
|
|
|
|
@@ -2690,7 +2845,7 @@ int sdhci_runtime_resume_host(struct sdh
|
|
/* Enable Card Detection */
|
|
sdhci_enable_card_detection(host);
|
|
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2785,14 +2940,16 @@ int sdhci_add_host(struct sdhci_host *ho
|
|
host->flags &= ~SDHCI_USE_ADMA;
|
|
}
|
|
|
|
- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
|
|
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
|
|
+ SDHCI_USE_PLATDMA)) {
|
|
if (host->ops->enable_dma) {
|
|
if (host->ops->enable_dma(host)) {
|
|
pr_warning("%s: No suitable DMA "
|
|
"available. Falling back to PIO.\n",
|
|
mmc_hostname(mmc));
|
|
host->flags &=
|
|
- ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
|
|
+ ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA |
|
|
+ SDHCI_USE_PLATDMA);
|
|
}
|
|
}
|
|
}
|
|
@@ -3080,6 +3237,12 @@ int sdhci_add_host(struct sdhci_host *ho
|
|
SDHCI_MAX_CURRENT_MULTIPLIER;
|
|
}
|
|
|
|
+ if(host->ops->voltage_broken) {
|
|
+ ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
|
|
+ // Cannot support UHS modes if we are stuck at 3.3V;
|
|
+ mmc->caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50);
|
|
+ }
|
|
+
|
|
mmc->ocr_avail = ocr_avail;
|
|
mmc->ocr_avail_sdio = ocr_avail;
|
|
if (host->ocr_avail_sdio)
|
|
@@ -3174,7 +3337,7 @@ int sdhci_add_host(struct sdhci_host *ho
|
|
host->tuning_timer.function = sdhci_tuning_timer;
|
|
}
|
|
|
|
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
|
|
+ ret = request_irq(host->irq, sdhci_irq, 0,//IRQF_SHARED,
|
|
mmc_hostname(mmc), host);
|
|
if (ret) {
|
|
pr_err("%s: Failed to request IRQ %d: %d\n",
|
|
@@ -3210,6 +3373,7 @@ int sdhci_add_host(struct sdhci_host *ho
|
|
|
|
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
|
|
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
|
|
+ (host->flags & SDHCI_USE_PLATDMA) ? "platform's DMA" :
|
|
(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
|
|
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
|
|
|
|
@@ -3237,7 +3401,7 @@ void sdhci_remove_host(struct sdhci_host
|
|
unsigned long flags;
|
|
|
|
if (dead) {
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
+ sdhci_spin_lock_irqsave(host, &flags);
|
|
|
|
host->flags |= SDHCI_DEVICE_DEAD;
|
|
|
|
@@ -3249,7 +3413,7 @@ void sdhci_remove_host(struct sdhci_host
|
|
tasklet_schedule(&host->finish_tasklet);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
+ sdhci_spin_unlock_irqrestore(host, flags);
|
|
}
|
|
|
|
sdhci_disable_card_detection(host);
|
|
--- a/drivers/mmc/host/sdhci.h
|
|
+++ b/drivers/mmc/host/sdhci.h
|
|
@@ -289,6 +289,20 @@ struct sdhci_ops {
|
|
void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
|
|
void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
|
|
int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
|
|
+
|
|
+ int (*pdma_able)(struct sdhci_host *host,
|
|
+ struct mmc_data *data);
|
|
+ void (*pdma_avail)(struct sdhci_host *host,
|
|
+ unsigned int *ref_intmask,
|
|
+ void(*complete)(struct sdhci_host *));
|
|
+ void (*pdma_reset)(struct sdhci_host *host,
|
|
+ struct mmc_data *data);
|
|
+ unsigned int (*extra_ints)(struct sdhci_host *host);
|
|
+ unsigned int (*spurious_crc_acmd51)(struct sdhci_host *host);
|
|
+ unsigned int (*voltage_broken)(struct sdhci_host *host);
|
|
+ unsigned int (*uhs_broken)(struct sdhci_host *host);
|
|
+ unsigned int (*missing_status)(struct sdhci_host *host);
|
|
+
|
|
void (*hw_reset)(struct sdhci_host *host);
|
|
void (*platform_suspend)(struct sdhci_host *host);
|
|
void (*platform_resume)(struct sdhci_host *host);
|
|
@@ -399,9 +413,38 @@ extern int sdhci_resume_host(struct sdhc
|
|
extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
|
|
#endif
|
|
|
|
+static inline int /*bool*/
|
|
+sdhci_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
|
|
+{
|
|
+ if (host->ops->pdma_able)
|
|
+ return host->ops->pdma_able(host, data);
|
|
+ else
|
|
+ return 1;
|
|
+}
|
|
+static inline void
|
|
+sdhci_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
|
|
+ void(*completion_callback)(struct sdhci_host *))
|
|
+{
|
|
+ if (host->ops->pdma_avail)
|
|
+ host->ops->pdma_avail(host, ref_intmask, completion_callback);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+sdhci_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
|
|
+{
|
|
+ if (host->ops->pdma_reset)
|
|
+ host->ops->pdma_reset(host, data);
|
|
+}
|
|
+
|
|
#ifdef CONFIG_PM_RUNTIME
|
|
extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
|
|
extern int sdhci_runtime_resume_host(struct sdhci_host *host);
|
|
#endif
|
|
|
|
+extern void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags);
|
|
+extern void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags);
|
|
+extern void sdhci_spin_lock(struct sdhci_host *host);
|
|
+extern void sdhci_spin_unlock(struct sdhci_host *host);
|
|
+
|
|
+
|
|
#endif /* __SDHCI_HW_H */
|
|
--- a/include/linux/mmc/host.h
|
|
+++ b/include/linux/mmc/host.h
|
|
@@ -281,6 +281,7 @@ struct mmc_host {
|
|
#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
|
|
MMC_CAP2_PACKED_WR)
|
|
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
|
|
+#define MMC_CAP2_FORCE_MULTIBLOCK (1 << 31) /* Always use multiblock transfers */
|
|
|
|
mmc_pm_flag_t pm_caps; /* supported pm features */
|
|
|
|
--- a/include/linux/mmc/sdhci.h
|
|
+++ b/include/linux/mmc/sdhci.h
|
|
@@ -97,6 +97,7 @@ struct sdhci_host {
|
|
#define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
|
|
|
|
int irq; /* Device IRQ */
|
|
+ int second_irq; /* Additional IRQ to disable/enable in low-latency mode */
|
|
void __iomem *ioaddr; /* Mapped address */
|
|
|
|
const struct sdhci_ops *ops; /* Low level hw interface */
|
|
@@ -128,6 +129,7 @@ struct sdhci_host {
|
|
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
|
|
#define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */
|
|
#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
|
|
+#define SDHCI_USE_PLATDMA (1<<12) /* Host uses 3rd party DMA */
|
|
|
|
unsigned int version; /* SDHCI spec. version */
|
|
|
|
@@ -142,6 +144,7 @@ struct sdhci_host {
|
|
|
|
struct mmc_request *mrq; /* Current request */
|
|
struct mmc_command *cmd; /* Current command */
|
|
+ int last_cmdop; /* Opcode of last cmd sent */
|
|
struct mmc_data *data; /* Current data request */
|
|
unsigned int data_early:1; /* Data finished before cmd */
|
|
|