mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-20 22:23:27 +00:00
39e8ab17d5
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
5287 lines
162 KiB
Diff
5287 lines
162 KiB
Diff
From a369af5149e6eb442b22ce89b564dd7a76e03638 Mon Sep 17 00:00:00 2001
|
|
From: John Crispin <blogic@openwrt.org>
|
|
Date: Tue, 26 Apr 2016 19:05:01 +0200
|
|
Subject: [PATCH 072/102] mtd: backport v4.7-0day patches from Boris
|
|
|
|
Signed-off-by: John Crispin <blogic@openwrt.org>
|
|
---
|
|
drivers/mtd/Kconfig | 4 +-
|
|
drivers/mtd/cmdlinepart.c | 3 +-
|
|
drivers/mtd/devices/m25p80.c | 44 +--
|
|
drivers/mtd/maps/physmap_of.c | 6 +-
|
|
drivers/mtd/mtdchar.c | 123 ++++++--
|
|
drivers/mtd/mtdconcat.c | 2 +-
|
|
drivers/mtd/mtdcore.c | 428 ++++++++++++++++++++++++--
|
|
drivers/mtd/mtdcore.h | 7 +-
|
|
drivers/mtd/mtdpart.c | 161 ++++++----
|
|
drivers/mtd/mtdswap.c | 24 +-
|
|
drivers/mtd/nand/Kconfig | 21 +-
|
|
drivers/mtd/nand/Makefile | 2 +
|
|
drivers/mtd/nand/nand_base.c | 571 +++++++++++++++++++----------------
|
|
drivers/mtd/nand/nand_bbt.c | 34 +--
|
|
drivers/mtd/nand/nand_bch.c | 52 ++--
|
|
drivers/mtd/nand/nand_ecc.c | 6 +-
|
|
drivers/mtd/nand/nand_ids.c | 4 +-
|
|
drivers/mtd/nand/nandsim.c | 43 +--
|
|
drivers/mtd/ofpart.c | 53 ++--
|
|
drivers/mtd/spi-nor/Kconfig | 10 +-
|
|
drivers/mtd/spi-nor/Makefile | 1 +
|
|
drivers/mtd/spi-nor/mtk-quadspi.c | 485 +++++++++++++++++++++++++++++
|
|
drivers/mtd/spi-nor/spi-nor.c | 321 +++++++++++++-------
|
|
drivers/mtd/tests/mtd_nandecctest.c | 2 +-
|
|
drivers/mtd/tests/oobtest.c | 49 ++-
|
|
drivers/mtd/tests/pagetest.c | 3 +-
|
|
include/linux/mtd/bbm.h | 1 -
|
|
include/linux/mtd/fsmc.h | 18 --
|
|
include/linux/mtd/inftl.h | 1 -
|
|
include/linux/mtd/map.h | 9 +-
|
|
include/linux/mtd/mtd.h | 80 ++++-
|
|
include/linux/mtd/nand.h | 94 ++++--
|
|
include/linux/mtd/nand_bch.h | 10 +-
|
|
include/linux/mtd/nftl.h | 1 -
|
|
include/linux/mtd/onenand.h | 2 -
|
|
include/linux/mtd/partitions.h | 27 +-
|
|
include/linux/mtd/sh_flctl.h | 4 +-
|
|
include/linux/mtd/sharpsl.h | 2 +-
|
|
include/linux/mtd/spi-nor.h | 23 +-
|
|
include/uapi/mtd/mtd-abi.h | 2 +-
|
|
45 files changed, 2077 insertions(+), 748 deletions(-)
|
|
create mode 100644 drivers/mtd/spi-nor/mtk-quadspi.c
|
|
|
|
--- a/drivers/mtd/Kconfig
|
|
+++ b/drivers/mtd/Kconfig
|
|
@@ -131,7 +131,7 @@ config MTD_CMDLINE_PARTS
|
|
|
|
config MTD_AFS_PARTS
|
|
tristate "ARM Firmware Suite partition parsing"
|
|
- depends on ARM
|
|
+ depends on (ARM || ARM64)
|
|
---help---
|
|
The ARM Firmware Suite allows the user to divide flash devices into
|
|
multiple 'images'. Each such image has a header containing its name
|
|
@@ -161,7 +161,7 @@ config MTD_AR7_PARTS
|
|
|
|
config MTD_BCM63XX_PARTS
|
|
tristate "BCM63XX CFE partitioning support"
|
|
- depends on BCM63XX
|
|
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
|
|
select CRC32
|
|
help
|
|
This provides partions parsing for BCM63xx devices with CFE
|
|
--- a/drivers/mtd/cmdlinepart.c
|
|
+++ b/drivers/mtd/cmdlinepart.c
|
|
@@ -304,7 +304,7 @@ static int mtdpart_setup_real(char *s)
|
|
* the first one in the chain if a NULL mtd_id is passed in.
|
|
*/
|
|
static int parse_cmdline_partitions(struct mtd_info *master,
|
|
- struct mtd_partition **pparts,
|
|
+ const struct mtd_partition **pparts,
|
|
struct mtd_part_parser_data *data)
|
|
{
|
|
unsigned long long offset;
|
|
@@ -382,7 +382,6 @@ static int __init mtdpart_setup(char *s)
|
|
__setup("mtdparts=", mtdpart_setup);
|
|
|
|
static struct mtd_part_parser cmdline_parser = {
|
|
- .owner = THIS_MODULE,
|
|
.parse_fn = parse_cmdline_partitions,
|
|
.name = "cmdlinepart",
|
|
};
|
|
--- a/drivers/mtd/devices/m25p80.c
|
|
+++ b/drivers/mtd/devices/m25p80.c
|
|
@@ -174,22 +174,6 @@ static int m25p80_read(struct spi_nor *n
|
|
return 0;
|
|
}
|
|
|
|
-static int m25p80_erase(struct spi_nor *nor, loff_t offset)
|
|
-{
|
|
- struct m25p *flash = nor->priv;
|
|
-
|
|
- dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
|
|
- flash->spi_nor.mtd.erasesize / 1024, (u32)offset);
|
|
-
|
|
- /* Set up command buffer. */
|
|
- flash->command[0] = nor->erase_opcode;
|
|
- m25p_addr2cmd(nor, offset, flash->command);
|
|
-
|
|
- spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
/*
|
|
* board specific setup should have ensured the SPI clock used here
|
|
* matches what the READ command supports, at least until this driver
|
|
@@ -197,12 +181,11 @@ static int m25p80_erase(struct spi_nor *
|
|
*/
|
|
static int m25p_probe(struct spi_device *spi)
|
|
{
|
|
- struct mtd_part_parser_data ppdata;
|
|
struct flash_platform_data *data;
|
|
struct m25p *flash;
|
|
struct spi_nor *nor;
|
|
enum read_mode mode = SPI_NOR_NORMAL;
|
|
- char *flash_name = NULL;
|
|
+ char *flash_name;
|
|
int ret;
|
|
|
|
data = dev_get_platdata(&spi->dev);
|
|
@@ -216,12 +199,11 @@ static int m25p_probe(struct spi_device
|
|
/* install the hooks */
|
|
nor->read = m25p80_read;
|
|
nor->write = m25p80_write;
|
|
- nor->erase = m25p80_erase;
|
|
nor->write_reg = m25p80_write_reg;
|
|
nor->read_reg = m25p80_read_reg;
|
|
|
|
nor->dev = &spi->dev;
|
|
- nor->flash_node = spi->dev.of_node;
|
|
+ spi_nor_set_flash_node(nor, spi->dev.of_node);
|
|
nor->priv = flash;
|
|
|
|
spi_set_drvdata(spi, flash);
|
|
@@ -242,6 +224,8 @@ static int m25p_probe(struct spi_device
|
|
*/
|
|
if (data && data->type)
|
|
flash_name = data->type;
|
|
+ else if (!strcmp(spi->modalias, "spi-nor"))
|
|
+ flash_name = NULL; /* auto-detect */
|
|
else
|
|
flash_name = spi->modalias;
|
|
|
|
@@ -249,11 +233,8 @@ static int m25p_probe(struct spi_device
|
|
if (ret)
|
|
return ret;
|
|
|
|
- ppdata.of_node = spi->dev.of_node;
|
|
-
|
|
- return mtd_device_parse_register(&nor->mtd, NULL, &ppdata,
|
|
- data ? data->parts : NULL,
|
|
- data ? data->nr_parts : 0);
|
|
+ return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
|
|
+ data ? data->nr_parts : 0);
|
|
}
|
|
|
|
|
|
@@ -279,14 +260,21 @@ static int m25p_remove(struct spi_device
|
|
*/
|
|
static const struct spi_device_id m25p_ids[] = {
|
|
/*
|
|
+ * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
|
|
+ * hack around the fact that the SPI core does not provide uevent
|
|
+ * matching for .of_match_table
|
|
+ */
|
|
+ {"spi-nor"},
|
|
+
|
|
+ /*
|
|
* Entries not used in DTs that should be safe to drop after replacing
|
|
- * them with "nor-jedec" in platform data.
|
|
+ * them with "spi-nor" in platform data.
|
|
*/
|
|
{"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
|
|
|
|
/*
|
|
- * Entries that were used in DTs without "nor-jedec" fallback and should
|
|
- * be kept for backward compatibility.
|
|
+ * Entries that were used in DTs without "jedec,spi-nor" fallback and
|
|
+ * should be kept for backward compatibility.
|
|
*/
|
|
{"at25df321a"}, {"at25df641"}, {"at26df081a"},
|
|
{"mr25h256"},
|
|
--- a/drivers/mtd/maps/physmap_of.c
|
|
+++ b/drivers/mtd/maps/physmap_of.c
|
|
@@ -128,7 +128,6 @@ static int of_flash_probe(struct platfor
|
|
int reg_tuple_size;
|
|
struct mtd_info **mtd_list = NULL;
|
|
resource_size_t res_size;
|
|
- struct mtd_part_parser_data ppdata;
|
|
bool map_indirect;
|
|
const char *mtd_name = NULL;
|
|
|
|
@@ -272,8 +271,9 @@ static int of_flash_probe(struct platfor
|
|
if (err)
|
|
goto err_out;
|
|
|
|
- ppdata.of_node = dp;
|
|
- mtd_device_parse_register(info->cmtd, part_probe_types_def, &ppdata,
|
|
+ info->cmtd->dev.parent = &dev->dev;
|
|
+ mtd_set_of_node(info->cmtd, dp);
|
|
+ mtd_device_parse_register(info->cmtd, part_probe_types_def, NULL,
|
|
NULL, 0);
|
|
|
|
kfree(mtd_list);
|
|
--- a/drivers/mtd/mtdchar.c
|
|
+++ b/drivers/mtd/mtdchar.c
|
|
@@ -465,38 +465,111 @@ static int mtdchar_readoob(struct file *
|
|
}
|
|
|
|
/*
|
|
- * Copies (and truncates, if necessary) data from the larger struct,
|
|
- * nand_ecclayout, to the smaller, deprecated layout struct,
|
|
- * nand_ecclayout_user. This is necessary only to support the deprecated
|
|
- * API ioctl ECCGETLAYOUT while allowing all new functionality to use
|
|
- * nand_ecclayout flexibly (i.e. the struct may change size in new
|
|
- * releases without requiring major rewrites).
|
|
+ * Copies (and truncates, if necessary) OOB layout information to the
|
|
+ * deprecated layout struct, nand_ecclayout_user. This is necessary only to
|
|
+ * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
|
|
+ * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
|
|
+ * can describe any kind of OOB layout with almost zero overhead from a
|
|
+ * memory usage point of view).
|
|
*/
|
|
-static int shrink_ecclayout(const struct nand_ecclayout *from,
|
|
- struct nand_ecclayout_user *to)
|
|
+static int shrink_ecclayout(struct mtd_info *mtd,
|
|
+ struct nand_ecclayout_user *to)
|
|
{
|
|
- int i;
|
|
+ struct mtd_oob_region oobregion;
|
|
+ int i, section = 0, ret;
|
|
|
|
- if (!from || !to)
|
|
+ if (!mtd || !to)
|
|
return -EINVAL;
|
|
|
|
memset(to, 0, sizeof(*to));
|
|
|
|
- to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
|
|
- for (i = 0; i < to->eccbytes; i++)
|
|
- to->eccpos[i] = from->eccpos[i];
|
|
+ to->eccbytes = 0;
|
|
+ for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
|
|
+ u32 eccpos;
|
|
+
|
|
+ ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
|
|
+ if (ret < 0) {
|
|
+ if (ret != -ERANGE)
|
|
+ return ret;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ eccpos = oobregion.offset;
|
|
+ for (; i < MTD_MAX_ECCPOS_ENTRIES &&
|
|
+ eccpos < oobregion.offset + oobregion.length; i++) {
|
|
+ to->eccpos[i] = eccpos++;
|
|
+ to->eccbytes++;
|
|
+ }
|
|
+ }
|
|
|
|
for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
|
|
- if (from->oobfree[i].length == 0 &&
|
|
- from->oobfree[i].offset == 0)
|
|
+ ret = mtd_ooblayout_free(mtd, i, &oobregion);
|
|
+ if (ret < 0) {
|
|
+ if (ret != -ERANGE)
|
|
+ return ret;
|
|
+
|
|
break;
|
|
- to->oobavail += from->oobfree[i].length;
|
|
- to->oobfree[i] = from->oobfree[i];
|
|
+ }
|
|
+
|
|
+ to->oobfree[i].offset = oobregion.offset;
|
|
+ to->oobfree[i].length = oobregion.length;
|
|
+ to->oobavail += to->oobfree[i].length;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
+static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
|
|
+{
|
|
+ struct mtd_oob_region oobregion;
|
|
+ int i, section = 0, ret;
|
|
+
|
|
+ if (!mtd || !to)
|
|
+ return -EINVAL;
|
|
+
|
|
+ memset(to, 0, sizeof(*to));
|
|
+
|
|
+ to->eccbytes = 0;
|
|
+ for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
|
|
+ u32 eccpos;
|
|
+
|
|
+ ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
|
|
+ if (ret < 0) {
|
|
+ if (ret != -ERANGE)
|
|
+ return ret;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
|
|
+ return -EINVAL;
|
|
+
|
|
+ eccpos = oobregion.offset;
|
|
+ for (; eccpos < oobregion.offset + oobregion.length; i++) {
|
|
+ to->eccpos[i] = eccpos++;
|
|
+ to->eccbytes++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ ret = mtd_ooblayout_free(mtd, i, &oobregion);
|
|
+ if (ret < 0) {
|
|
+ if (ret != -ERANGE)
|
|
+ return ret;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ to->oobfree[i][0] = oobregion.offset;
|
|
+ to->oobfree[i][1] = oobregion.length;
|
|
+ }
|
|
+
|
|
+ to->useecc = MTD_NANDECC_AUTOPLACE;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
|
|
struct blkpg_ioctl_arg *arg)
|
|
{
|
|
@@ -815,16 +888,12 @@ static int mtdchar_ioctl(struct file *fi
|
|
{
|
|
struct nand_oobinfo oi;
|
|
|
|
- if (!mtd->ecclayout)
|
|
+ if (!mtd->ooblayout)
|
|
return -EOPNOTSUPP;
|
|
- if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
|
|
- return -EINVAL;
|
|
|
|
- oi.useecc = MTD_NANDECC_AUTOPLACE;
|
|
- memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
|
|
- memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
|
|
- sizeof(oi.oobfree));
|
|
- oi.eccbytes = mtd->ecclayout->eccbytes;
|
|
+ ret = get_oobinfo(mtd, &oi);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
|
|
return -EFAULT;
|
|
@@ -913,14 +982,14 @@ static int mtdchar_ioctl(struct file *fi
|
|
{
|
|
struct nand_ecclayout_user *usrlay;
|
|
|
|
- if (!mtd->ecclayout)
|
|
+ if (!mtd->ooblayout)
|
|
return -EOPNOTSUPP;
|
|
|
|
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
|
|
if (!usrlay)
|
|
return -ENOMEM;
|
|
|
|
- shrink_ecclayout(mtd->ecclayout, usrlay);
|
|
+ shrink_ecclayout(mtd, usrlay);
|
|
|
|
if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
|
|
ret = -EFAULT;
|
|
--- a/drivers/mtd/mtdconcat.c
|
|
+++ b/drivers/mtd/mtdconcat.c
|
|
@@ -777,7 +777,7 @@ struct mtd_info *mtd_concat_create(struc
|
|
|
|
}
|
|
|
|
- concat->mtd.ecclayout = subdev[0]->ecclayout;
|
|
+ mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
|
|
|
|
concat->num_subdev = num_devs;
|
|
concat->mtd.name = name;
|
|
--- a/drivers/mtd/mtdcore.c
|
|
+++ b/drivers/mtd/mtdcore.c
|
|
@@ -32,6 +32,7 @@
|
|
#include <linux/err.h>
|
|
#include <linux/ioctl.h>
|
|
#include <linux/init.h>
|
|
+#include <linux/of.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/idr.h>
|
|
#include <linux/backing-dev.h>
|
|
@@ -446,6 +447,7 @@ int add_mtd_device(struct mtd_info *mtd)
|
|
mtd->dev.devt = MTD_DEVT(i);
|
|
dev_set_name(&mtd->dev, "mtd%d", i);
|
|
dev_set_drvdata(&mtd->dev, mtd);
|
|
+ of_node_get(mtd_get_of_node(mtd));
|
|
error = device_register(&mtd->dev);
|
|
if (error)
|
|
goto fail_added;
|
|
@@ -477,6 +479,7 @@ int add_mtd_device(struct mtd_info *mtd)
|
|
return 0;
|
|
|
|
fail_added:
|
|
+ of_node_put(mtd_get_of_node(mtd));
|
|
idr_remove(&mtd_idr, i);
|
|
fail_locked:
|
|
mutex_unlock(&mtd_table_mutex);
|
|
@@ -518,6 +521,7 @@ int del_mtd_device(struct mtd_info *mtd)
|
|
device_unregister(&mtd->dev);
|
|
|
|
idr_remove(&mtd_idr, mtd->index);
|
|
+ of_node_put(mtd_get_of_node(mtd));
|
|
|
|
module_put(THIS_MODULE);
|
|
ret = 0;
|
|
@@ -529,9 +533,10 @@ out_error:
|
|
}
|
|
|
|
static int mtd_add_device_partitions(struct mtd_info *mtd,
|
|
- struct mtd_partition *real_parts,
|
|
- int nbparts)
|
|
+ struct mtd_partitions *parts)
|
|
{
|
|
+ const struct mtd_partition *real_parts = parts->parts;
|
|
+ int nbparts = parts->nr_parts;
|
|
int ret;
|
|
|
|
if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
|
|
@@ -600,29 +605,29 @@ int mtd_device_parse_register(struct mtd
|
|
const struct mtd_partition *parts,
|
|
int nr_parts)
|
|
{
|
|
+ struct mtd_partitions parsed;
|
|
int ret;
|
|
- struct mtd_partition *real_parts = NULL;
|
|
|
|
mtd_set_dev_defaults(mtd);
|
|
|
|
- ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
|
|
- if (ret <= 0 && nr_parts && parts) {
|
|
- real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
|
|
- GFP_KERNEL);
|
|
- if (!real_parts)
|
|
- ret = -ENOMEM;
|
|
- else
|
|
- ret = nr_parts;
|
|
- }
|
|
- /* Didn't come up with either parsed OR fallback partitions */
|
|
- if (ret < 0) {
|
|
- pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
|
|
+ memset(&parsed, 0, sizeof(parsed));
|
|
+
|
|
+ ret = parse_mtd_partitions(mtd, types, &parsed, parser_data);
|
|
+ if ((ret < 0 || parsed.nr_parts == 0) && parts && nr_parts) {
|
|
+ /* Fall back to driver-provided partitions */
|
|
+ parsed = (struct mtd_partitions){
|
|
+ .parts = parts,
|
|
+ .nr_parts = nr_parts,
|
|
+ };
|
|
+ } else if (ret < 0) {
|
|
+ /* Didn't come up with parsed OR fallback partitions */
|
|
+ pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
|
|
ret);
|
|
/* Don't abort on errors; we can still use unpartitioned MTD */
|
|
- ret = 0;
|
|
+ memset(&parsed, 0, sizeof(parsed));
|
|
}
|
|
|
|
- ret = mtd_add_device_partitions(mtd, real_parts, ret);
|
|
+ ret = mtd_add_device_partitions(mtd, &parsed);
|
|
if (ret)
|
|
goto out;
|
|
|
|
@@ -642,7 +647,8 @@ int mtd_device_parse_register(struct mtd
|
|
}
|
|
|
|
out:
|
|
- kfree(real_parts);
|
|
+ /* Cleanup any parsed partitions */
|
|
+ mtd_part_parser_cleanup(&parsed);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mtd_device_parse_register);
|
|
@@ -767,7 +773,6 @@ out:
|
|
}
|
|
EXPORT_SYMBOL_GPL(get_mtd_device);
|
|
|
|
-
|
|
int __get_mtd_device(struct mtd_info *mtd)
|
|
{
|
|
int err;
|
|
@@ -1001,6 +1006,366 @@ int mtd_read_oob(struct mtd_info *mtd, l
|
|
}
|
|
EXPORT_SYMBOL_GPL(mtd_read_oob);
|
|
|
|
+/**
|
|
+ * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
|
|
+ * @mtd: MTD device structure
|
|
+ * @section: ECC section. Depending on the layout you may have all the ECC
|
|
+ * bytes stored in a single contiguous section, or one section
|
|
+ * per ECC chunk (and sometime several sections for a single ECC
|
|
+ * ECC chunk)
|
|
+ * @oobecc: OOB region struct filled with the appropriate ECC position
|
|
+ * information
|
|
+ *
|
|
+ * This functions return ECC section information in the OOB area. I you want
|
|
+ * to get all the ECC bytes information, then you should call
|
|
+ * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobecc)
|
|
+{
|
|
+ memset(oobecc, 0, sizeof(*oobecc));
|
|
+
|
|
+ if (!mtd || section < 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!mtd->ooblayout || !mtd->ooblayout->ecc)
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ return mtd->ooblayout->ecc(mtd, section, oobecc);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_free - Get the OOB region definition of a specific free
|
|
+ * section
|
|
+ * @mtd: MTD device structure
|
|
+ * @section: Free section you are interested in. Depending on the layout
|
|
+ * you may have all the free bytes stored in a single contiguous
|
|
+ * section, or one section per ECC chunk plus an extra section
|
|
+ * for the remaining bytes (or other funky layout).
|
|
+ * @oobfree: OOB region struct filled with the appropriate free position
|
|
+ * information
|
|
+ *
|
|
+ * This functions return free bytes position in the OOB area. I you want
|
|
+ * to get all the free bytes information, then you should call
|
|
+ * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobfree)
|
|
+{
|
|
+ memset(oobfree, 0, sizeof(*oobfree));
|
|
+
|
|
+ if (!mtd || section < 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!mtd->ooblayout || !mtd->ooblayout->free)
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ return mtd->ooblayout->free(mtd, section, oobfree);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_find_region - Find the region attached to a specific byte
|
|
+ * @mtd: mtd info structure
|
|
+ * @byte: the byte we are searching for
|
|
+ * @sectionp: pointer where the section id will be stored
|
|
+ * @oobregion: used to retrieve the ECC position
|
|
+ * @iter: iterator function. Should be either mtd_ooblayout_free or
|
|
+ * mtd_ooblayout_ecc depending on the region type you're searching for
|
|
+ *
|
|
+ * This functions returns the section id and oobregion information of a
|
|
+ * specific byte. For example, say you want to know where the 4th ECC byte is
|
|
+ * stored, you'll use:
|
|
+ *
|
|
+ * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
|
|
+ int *sectionp, struct mtd_oob_region *oobregion,
|
|
+ int (*iter)(struct mtd_info *,
|
|
+ int section,
|
|
+ struct mtd_oob_region *oobregion))
|
|
+{
|
|
+ int pos = 0, ret, section = 0;
|
|
+
|
|
+ memset(oobregion, 0, sizeof(*oobregion));
|
|
+
|
|
+ while (1) {
|
|
+ ret = iter(mtd, section, oobregion);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (pos + oobregion->length > byte)
|
|
+ break;
|
|
+
|
|
+ pos += oobregion->length;
|
|
+ section++;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Adjust region info to make it start at the beginning at the
|
|
+ * 'start' ECC byte.
|
|
+ */
|
|
+ oobregion->offset += byte - pos;
|
|
+ oobregion->length -= byte - pos;
|
|
+ *sectionp = section;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
|
|
+ * ECC byte
|
|
+ * @mtd: mtd info structure
|
|
+ * @eccbyte: the byte we are searching for
|
|
+ * @sectionp: pointer where the section id will be stored
|
|
+ * @oobregion: OOB region information
|
|
+ *
|
|
+ * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
|
|
+ * byte.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
|
|
+ int *section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
+{
|
|
+ return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
|
|
+ mtd_ooblayout_ecc);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
|
|
+ * @mtd: mtd info structure
|
|
+ * @buf: destination buffer to store OOB bytes
|
|
+ * @oobbuf: OOB buffer
|
|
+ * @start: first byte to retrieve
|
|
+ * @nbytes: number of bytes to retrieve
|
|
+ * @iter: section iterator
|
|
+ *
|
|
+ * Extract bytes attached to a specific category (ECC or free)
|
|
+ * from the OOB buffer and copy them into buf.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
|
|
+ const u8 *oobbuf, int start, int nbytes,
|
|
+ int (*iter)(struct mtd_info *,
|
|
+ int section,
|
|
+ struct mtd_oob_region *oobregion))
|
|
+{
|
|
+ struct mtd_oob_region oobregion = { };
|
|
+ int section = 0, ret;
|
|
+
|
|
+ ret = mtd_ooblayout_find_region(mtd, start, §ion,
|
|
+ &oobregion, iter);
|
|
+
|
|
+ while (!ret) {
|
|
+ int cnt;
|
|
+
|
|
+ cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
|
|
+ memcpy(buf, oobbuf + oobregion.offset, cnt);
|
|
+ buf += cnt;
|
|
+ nbytes -= cnt;
|
|
+
|
|
+ if (!nbytes)
|
|
+ break;
|
|
+
|
|
+ ret = iter(mtd, ++section, &oobregion);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
|
|
+ * @mtd: mtd info structure
|
|
+ * @buf: source buffer to get OOB bytes from
|
|
+ * @oobbuf: OOB buffer
|
|
+ * @start: first OOB byte to set
|
|
+ * @nbytes: number of OOB bytes to set
|
|
+ * @iter: section iterator
|
|
+ *
|
|
+ * Fill the OOB buffer with data provided in buf. The category (ECC or free)
|
|
+ * is selected by passing the appropriate iterator.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
|
|
+ u8 *oobbuf, int start, int nbytes,
|
|
+ int (*iter)(struct mtd_info *,
|
|
+ int section,
|
|
+ struct mtd_oob_region *oobregion))
|
|
+{
|
|
+ struct mtd_oob_region oobregion = { };
|
|
+ int section = 0, ret;
|
|
+
|
|
+ ret = mtd_ooblayout_find_region(mtd, start, §ion,
|
|
+ &oobregion, iter);
|
|
+
|
|
+ while (!ret) {
|
|
+ int cnt;
|
|
+
|
|
+ cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
|
|
+ memcpy(oobbuf + oobregion.offset, buf, cnt);
|
|
+ buf += cnt;
|
|
+ nbytes -= cnt;
|
|
+
|
|
+ if (!nbytes)
|
|
+ break;
|
|
+
|
|
+ ret = iter(mtd, ++section, &oobregion);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
|
|
+ * @mtd: mtd info structure
|
|
+ * @iter: category iterator
|
|
+ *
|
|
+ * Count the number of bytes in a given category.
|
|
+ *
|
|
+ * Returns a positive value on success, a negative error code otherwise.
|
|
+ */
|
|
+static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
|
|
+ int (*iter)(struct mtd_info *,
|
|
+ int section,
|
|
+ struct mtd_oob_region *oobregion))
|
|
+{
|
|
+ struct mtd_oob_region oobregion = { };
|
|
+ int section = 0, ret, nbytes = 0;
|
|
+
|
|
+ while (1) {
|
|
+ ret = iter(mtd, section++, &oobregion);
|
|
+ if (ret) {
|
|
+ if (ret == -ERANGE)
|
|
+ ret = nbytes;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ nbytes += oobregion.length;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
|
|
+ * @mtd: mtd info structure
|
|
+ * @eccbuf: destination buffer to store ECC bytes
|
|
+ * @oobbuf: OOB buffer
|
|
+ * @start: first ECC byte to retrieve
|
|
+ * @nbytes: number of ECC bytes to retrieve
|
|
+ *
|
|
+ * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
|
|
+ const u8 *oobbuf, int start, int nbytes)
|
|
+{
|
|
+ return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
|
|
+ mtd_ooblayout_ecc);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
|
|
+ * @mtd: mtd info structure
|
|
+ * @eccbuf: source buffer to get ECC bytes from
|
|
+ * @oobbuf: OOB buffer
|
|
+ * @start: first ECC byte to set
|
|
+ * @nbytes: number of ECC bytes to set
|
|
+ *
|
|
+ * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
|
|
+ u8 *oobbuf, int start, int nbytes)
|
|
+{
|
|
+ return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
|
|
+ mtd_ooblayout_ecc);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
|
|
+ * @mtd: mtd info structure
|
|
+ * @databuf: destination buffer to store ECC bytes
|
|
+ * @oobbuf: OOB buffer
|
|
+ * @start: first ECC byte to retrieve
|
|
+ * @nbytes: number of ECC bytes to retrieve
|
|
+ *
|
|
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
|
|
+ const u8 *oobbuf, int start, int nbytes)
|
|
+{
|
|
+ return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
|
|
+ mtd_ooblayout_free);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
|
|
+ * @mtd: mtd info structure
|
|
+ * @eccbuf: source buffer to get data bytes from
|
|
+ * @oobbuf: OOB buffer
|
|
+ * @start: first ECC byte to set
|
|
+ * @nbytes: number of ECC bytes to set
|
|
+ *
|
|
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
|
|
+ u8 *oobbuf, int start, int nbytes)
|
|
+{
|
|
+ return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
|
|
+ mtd_ooblayout_free);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
|
|
+ * @mtd: mtd info structure
|
|
+ *
|
|
+ * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
|
|
+{
|
|
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
|
|
+
|
|
+/**
|
|
+ * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
|
|
+ * @mtd: mtd info structure
|
|
+ *
|
|
+ * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
|
|
+ *
|
|
+ * Returns zero on success, a negative error code otherwise.
|
|
+ */
|
|
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
|
|
+{
|
|
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
|
|
+
|
|
/*
|
|
* Method to access the protection register area, present in some flash
|
|
* devices. The user data is one time programmable but the factory data is read
|
|
--- a/drivers/mtd/mtdcore.h
|
|
+++ b/drivers/mtd/mtdcore.h
|
|
@@ -10,10 +10,15 @@ int add_mtd_device(struct mtd_info *mtd)
|
|
int del_mtd_device(struct mtd_info *mtd);
|
|
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
|
|
int del_mtd_partitions(struct mtd_info *);
|
|
+
|
|
+struct mtd_partitions;
|
|
+
|
|
int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
|
|
- struct mtd_partition **pparts,
|
|
+ struct mtd_partitions *pparts,
|
|
struct mtd_part_parser_data *data);
|
|
|
|
+void mtd_part_parser_cleanup(struct mtd_partitions *parts);
|
|
+
|
|
int __init init_mtdchar(void);
|
|
void __exit cleanup_mtdchar(void);
|
|
|
|
--- a/drivers/mtd/mtdpart.c
|
|
+++ b/drivers/mtd/mtdpart.c
|
|
@@ -55,9 +55,12 @@ static void mtd_partition_split(struct m
|
|
|
|
/*
|
|
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve
|
|
- * the pointer to that structure with this macro.
|
|
+ * the pointer to that structure.
|
|
*/
|
|
-#define PART(x) ((struct mtd_part *)(x))
|
|
+static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
|
|
+{
|
|
+ return container_of(mtd, struct mtd_part, mtd);
|
|
+}
|
|
|
|
|
|
/*
|
|
@@ -68,7 +71,7 @@ static void mtd_partition_split(struct m
|
|
static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|
size_t *retlen, u_char *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
struct mtd_ecc_stats stats;
|
|
int res;
|
|
|
|
@@ -87,7 +90,7 @@ static int part_read(struct mtd_info *mt
|
|
static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
|
|
size_t *retlen, void **virt, resource_size_t *phys)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
|
|
return part->master->_point(part->master, from + part->offset, len,
|
|
retlen, virt, phys);
|
|
@@ -95,7 +98,7 @@ static int part_point(struct mtd_info *m
|
|
|
|
static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
|
|
return part->master->_unpoint(part->master, from + part->offset, len);
|
|
}
|
|
@@ -105,7 +108,7 @@ static unsigned long part_get_unmapped_a
|
|
unsigned long offset,
|
|
unsigned long flags)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
|
|
offset += part->offset;
|
|
return part->master->_get_unmapped_area(part->master, len, offset,
|
|
@@ -115,7 +118,7 @@ static unsigned long part_get_unmapped_a
|
|
static int part_read_oob(struct mtd_info *mtd, loff_t from,
|
|
struct mtd_oob_ops *ops)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
int res;
|
|
|
|
if (from >= mtd->size)
|
|
@@ -130,10 +133,7 @@ static int part_read_oob(struct mtd_info
|
|
if (ops->oobbuf) {
|
|
size_t len, pages;
|
|
|
|
- if (ops->mode == MTD_OPS_AUTO_OOB)
|
|
- len = mtd->oobavail;
|
|
- else
|
|
- len = mtd->oobsize;
|
|
+ len = mtd_oobavail(mtd, ops);
|
|
pages = mtd_div_by_ws(mtd->size, mtd);
|
|
pages -= mtd_div_by_ws(from, mtd);
|
|
if (ops->ooboffs + ops->ooblen > pages * len)
|
|
@@ -153,7 +153,7 @@ static int part_read_oob(struct mtd_info
|
|
static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
|
|
size_t len, size_t *retlen, u_char *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_read_user_prot_reg(part->master, from, len,
|
|
retlen, buf);
|
|
}
|
|
@@ -161,7 +161,7 @@ static int part_read_user_prot_reg(struc
|
|
static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
|
|
size_t *retlen, struct otp_info *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_get_user_prot_info(part->master, len, retlen,
|
|
buf);
|
|
}
|
|
@@ -169,7 +169,7 @@ static int part_get_user_prot_info(struc
|
|
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
|
|
size_t len, size_t *retlen, u_char *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_read_fact_prot_reg(part->master, from, len,
|
|
retlen, buf);
|
|
}
|
|
@@ -177,7 +177,7 @@ static int part_read_fact_prot_reg(struc
|
|
static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
|
|
size_t *retlen, struct otp_info *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_get_fact_prot_info(part->master, len, retlen,
|
|
buf);
|
|
}
|
|
@@ -185,7 +185,7 @@ static int part_get_fact_prot_info(struc
|
|
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
size_t *retlen, const u_char *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_write(part->master, to + part->offset, len,
|
|
retlen, buf);
|
|
}
|
|
@@ -193,7 +193,7 @@ static int part_write(struct mtd_info *m
|
|
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
size_t *retlen, const u_char *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_panic_write(part->master, to + part->offset, len,
|
|
retlen, buf);
|
|
}
|
|
@@ -201,7 +201,7 @@ static int part_panic_write(struct mtd_i
|
|
static int part_write_oob(struct mtd_info *mtd, loff_t to,
|
|
struct mtd_oob_ops *ops)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
|
|
if (to >= mtd->size)
|
|
return -EINVAL;
|
|
@@ -213,7 +213,7 @@ static int part_write_oob(struct mtd_inf
|
|
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
|
|
size_t len, size_t *retlen, u_char *buf)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_write_user_prot_reg(part->master, from, len,
|
|
retlen, buf);
|
|
}
|
|
@@ -221,21 +221,21 @@ static int part_write_user_prot_reg(stru
|
|
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
|
|
size_t len)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_lock_user_prot_reg(part->master, from, len);
|
|
}
|
|
|
|
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
|
|
unsigned long count, loff_t to, size_t *retlen)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_writev(part->master, vecs, count,
|
|
to + part->offset, retlen);
|
|
}
|
|
|
|
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
int ret;
|
|
|
|
|
|
@@ -299,7 +299,7 @@ static int part_erase(struct mtd_info *m
|
|
void mtd_erase_callback(struct erase_info *instr)
|
|
{
|
|
if (instr->mtd->_erase == part_erase) {
|
|
- struct mtd_part *part = PART(instr->mtd);
|
|
+ struct mtd_part *part = mtd_to_part(instr->mtd);
|
|
size_t wrlen = 0;
|
|
|
|
if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
|
|
@@ -330,13 +330,13 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
|
|
|
|
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_lock(part->master, ofs + part->offset, len);
|
|
}
|
|
|
|
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
|
|
ofs += part->offset;
|
|
if (mtd->flags & MTD_ERASE_PARTIAL) {
|
|
@@ -349,45 +349,45 @@ static int part_unlock(struct mtd_info *
|
|
|
|
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_is_locked(part->master, ofs + part->offset, len);
|
|
}
|
|
|
|
static void part_sync(struct mtd_info *mtd)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
part->master->_sync(part->master);
|
|
}
|
|
|
|
static int part_suspend(struct mtd_info *mtd)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return part->master->_suspend(part->master);
|
|
}
|
|
|
|
static void part_resume(struct mtd_info *mtd)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
part->master->_resume(part->master);
|
|
}
|
|
|
|
static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
ofs += part->offset;
|
|
return part->master->_block_isreserved(part->master, ofs);
|
|
}
|
|
|
|
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
ofs += part->offset;
|
|
return part->master->_block_isbad(part->master, ofs);
|
|
}
|
|
|
|
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|
{
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
int res;
|
|
|
|
ofs += part->offset;
|
|
@@ -397,6 +397,27 @@ static int part_block_markbad(struct mtd
|
|
return res;
|
|
}
|
|
|
|
+static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
+{
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
+
|
|
+ return mtd_ooblayout_ecc(part->master, section, oobregion);
|
|
+}
|
|
+
|
|
+static int part_ooblayout_free(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
+{
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
+
|
|
+ return mtd_ooblayout_free(part->master, section, oobregion);
|
|
+}
|
|
+
|
|
+static const struct mtd_ooblayout_ops part_ooblayout_ops = {
|
|
+ .ecc = part_ooblayout_ecc,
|
|
+ .free = part_ooblayout_free,
|
|
+};
|
|
+
|
|
static inline void free_partition(struct mtd_part *p)
|
|
{
|
|
kfree(p->mtd.name);
|
|
@@ -614,7 +635,7 @@ static struct mtd_part *allocate_partiti
|
|
slave->mtd.erasesize = slave->mtd.size;
|
|
}
|
|
|
|
- slave->mtd.ecclayout = master->ecclayout;
|
|
+ mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
|
|
slave->mtd.ecc_step_size = master->ecc_step_size;
|
|
slave->mtd.ecc_strength = master->ecc_strength;
|
|
slave->mtd.bitflip_threshold = master->bitflip_threshold;
|
|
@@ -639,7 +660,7 @@ static ssize_t mtd_partition_offset_show
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct mtd_info *mtd = dev_get_drvdata(dev);
|
|
- struct mtd_part *part = PART(mtd);
|
|
+ struct mtd_part *part = mtd_to_part(mtd);
|
|
return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
|
|
}
|
|
|
|
@@ -677,11 +698,10 @@ int mtd_add_partition(struct mtd_info *m
|
|
if (length <= 0)
|
|
return -EINVAL;
|
|
|
|
+ memset(&part, 0, sizeof(part));
|
|
part.name = name;
|
|
part.size = length;
|
|
part.offset = offset;
|
|
- part.mask_flags = 0;
|
|
- part.ecclayout = NULL;
|
|
|
|
new = allocate_partition(master, &part, -1, offset);
|
|
if (IS_ERR(new))
|
|
@@ -845,7 +865,7 @@ int add_mtd_partitions(struct mtd_info *
|
|
static DEFINE_SPINLOCK(part_parser_lock);
|
|
static LIST_HEAD(part_parsers);
|
|
|
|
-static struct mtd_part_parser *get_partition_parser(const char *name)
|
|
+static struct mtd_part_parser *mtd_part_parser_get(const char *name)
|
|
{
|
|
struct mtd_part_parser *p, *ret = NULL;
|
|
|
|
@@ -862,7 +882,20 @@ static struct mtd_part_parser *get_parti
|
|
return ret;
|
|
}
|
|
|
|
-#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
|
|
+static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
|
|
+{
|
|
+ module_put(p->owner);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Many partition parsers just expected the core to kfree() all their data in
|
|
+ * one chunk. Do that by default.
|
|
+ */
|
|
+static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
|
|
+ int nr_parts)
|
|
+{
|
|
+ kfree(pparts);
|
|
+}
|
|
|
|
static struct mtd_part_parser *
|
|
get_partition_parser_by_type(enum mtd_parser_type type,
|
|
@@ -874,7 +907,7 @@ get_partition_parser_by_type(enum mtd_pa
|
|
|
|
p = list_prepare_entry(start, &part_parsers, list);
|
|
if (start)
|
|
- put_partition_parser(start);
|
|
+ mtd_part_parser_put(start);
|
|
|
|
list_for_each_entry_continue(p, &part_parsers, list) {
|
|
if (p->type == type && try_module_get(p->owner)) {
|
|
@@ -888,13 +921,19 @@ get_partition_parser_by_type(enum mtd_pa
|
|
return ret;
|
|
}
|
|
|
|
-void register_mtd_parser(struct mtd_part_parser *p)
|
|
-{
|
|
+int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
|
|
+ {
|
|
+ p->owner = owner;
|
|
+
|
|
+ if (!p->cleanup)
|
|
+ p->cleanup = &mtd_part_parser_cleanup_default;
|
|
+
|
|
spin_lock(&part_parser_lock);
|
|
list_add(&p->list, &part_parsers);
|
|
spin_unlock(&part_parser_lock);
|
|
+ return 0;
|
|
}
|
|
-EXPORT_SYMBOL_GPL(register_mtd_parser);
|
|
+EXPORT_SYMBOL_GPL(__register_mtd_parser);
|
|
|
|
void deregister_mtd_parser(struct mtd_part_parser *p)
|
|
{
|
|
@@ -954,7 +993,7 @@ static const char * const default_mtd_pa
|
|
* parse_mtd_partitions - parse MTD partitions
|
|
* @master: the master partition (describes whole MTD device)
|
|
* @types: names of partition parsers to try or %NULL
|
|
- * @pparts: array of partitions found is returned here
|
|
+ * @pparts: info about partitions found is returned here
|
|
* @data: MTD partition parser-specific data
|
|
*
|
|
* This function tries to find partition on MTD device @master. It uses MTD
|
|
@@ -966,45 +1005,42 @@ static const char * const default_mtd_pa
|
|
*
|
|
* This function may return:
|
|
* o a negative error code in case of failure
|
|
- * o zero if no partitions were found
|
|
- * o a positive number of found partitions, in which case on exit @pparts will
|
|
- * point to an array containing this number of &struct mtd_info objects.
|
|
+ * o zero otherwise, and @pparts will describe the partitions, number of
|
|
+ * partitions, and the parser which parsed them. Caller must release
|
|
+ * resources with mtd_part_parser_cleanup() when finished with the returned
|
|
+ * data.
|
|
*/
|
|
int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
|
|
- struct mtd_partition **pparts,
|
|
+ struct mtd_partitions *pparts,
|
|
struct mtd_part_parser_data *data)
|
|
{
|
|
struct mtd_part_parser *parser;
|
|
int ret, err = 0;
|
|
const char *const *types_of = NULL;
|
|
|
|
- if (data && data->of_node) {
|
|
- types_of = of_get_probes(data->of_node);
|
|
- if (types_of != NULL)
|
|
- types = types_of;
|
|
- }
|
|
-
|
|
if (!types)
|
|
types = default_mtd_part_types;
|
|
|
|
for ( ; *types; types++) {
|
|
pr_debug("%s: parsing partitions %s\n", master->name, *types);
|
|
- parser = get_partition_parser(*types);
|
|
+ parser = mtd_part_parser_get(*types);
|
|
if (!parser && !request_module("%s", *types))
|
|
- parser = get_partition_parser(*types);
|
|
+ parser = mtd_part_parser_get(*types);
|
|
pr_debug("%s: got parser %s\n", master->name,
|
|
parser ? parser->name : NULL);
|
|
if (!parser)
|
|
continue;
|
|
- ret = (*parser->parse_fn)(master, pparts, data);
|
|
+ ret = (*parser->parse_fn)(master, &pparts->parts, data);
|
|
pr_debug("%s: parser %s: %i\n",
|
|
master->name, parser->name, ret);
|
|
- put_partition_parser(parser);
|
|
if (ret > 0) {
|
|
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
|
|
ret, parser->name, master->name);
|
|
- return ret;
|
|
+ pparts->nr_parts = ret;
|
|
+ pparts->parser = parser;
|
|
+ return 0;
|
|
}
|
|
+ mtd_part_parser_put(parser);
|
|
/*
|
|
* Stash the first error we see; only report it if no parser
|
|
* succeeds
|
|
@@ -1034,7 +1070,7 @@ int parse_mtd_partitions_by_type(struct
|
|
ret = (*parser->parse_fn)(master, pparts, data);
|
|
|
|
if (ret > 0) {
|
|
- put_partition_parser(parser);
|
|
+ mtd_part_parser_put(parser);
|
|
printk(KERN_NOTICE
|
|
"%d %s partitions found on MTD device %s\n",
|
|
ret, parser->name, master->name);
|
|
@@ -1048,6 +1084,22 @@ int parse_mtd_partitions_by_type(struct
|
|
}
|
|
EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
|
|
|
|
+void mtd_part_parser_cleanup(struct mtd_partitions *parts)
|
|
+{
|
|
+ const struct mtd_part_parser *parser;
|
|
+
|
|
+ if (!parts)
|
|
+ return;
|
|
+
|
|
+ parser = parts->parser;
|
|
+ if (parser) {
|
|
+ if (parser->cleanup)
|
|
+ parser->cleanup(parts->parts, parts->nr_parts);
|
|
+
|
|
+ mtd_part_parser_put(parser);
|
|
+ }
|
|
+}
|
|
+
|
|
int mtd_is_partition(const struct mtd_info *mtd)
|
|
{
|
|
struct mtd_part *part;
|
|
@@ -1070,7 +1122,7 @@ struct mtd_info *mtdpart_get_master(cons
|
|
if (!mtd_is_partition(mtd))
|
|
return (struct mtd_info *)mtd;
|
|
|
|
- return PART(mtd)->master;
|
|
+ return mtd_to_part(mtd)->master;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mtdpart_get_master);
|
|
|
|
@@ -1079,7 +1131,7 @@ uint64_t mtdpart_get_offset(const struct
|
|
if (!mtd_is_partition(mtd))
|
|
return 0;
|
|
|
|
- return PART(mtd)->offset;
|
|
+ return mtd_to_part(mtd)->offset;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mtdpart_get_offset);
|
|
|
|
@@ -1089,6 +1141,6 @@ uint64_t mtd_get_device_size(const struc
|
|
if (!mtd_is_partition(mtd))
|
|
return mtd->size;
|
|
|
|
- return PART(mtd)->master->size;
|
|
+ return mtd_to_part(mtd)->master->size;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mtd_get_device_size);
|
|
--- a/drivers/mtd/mtdswap.c
|
|
+++ b/drivers/mtd/mtdswap.c
|
|
@@ -346,7 +346,7 @@ static int mtdswap_read_markers(struct m
|
|
if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
|
|
return MTDSWAP_SCANNED_BAD;
|
|
|
|
- ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
|
|
+ ops.ooblen = 2 * d->mtd->oobavail;
|
|
ops.oobbuf = d->oob_buf;
|
|
ops.ooboffs = 0;
|
|
ops.datbuf = NULL;
|
|
@@ -359,7 +359,7 @@ static int mtdswap_read_markers(struct m
|
|
|
|
data = (struct mtdswap_oobdata *)d->oob_buf;
|
|
data2 = (struct mtdswap_oobdata *)
|
|
- (d->oob_buf + d->mtd->ecclayout->oobavail);
|
|
+ (d->oob_buf + d->mtd->oobavail);
|
|
|
|
if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
|
|
eb->erase_count = le32_to_cpu(data->count);
|
|
@@ -933,7 +933,7 @@ static unsigned int mtdswap_eblk_passes(
|
|
|
|
ops.mode = MTD_OPS_AUTO_OOB;
|
|
ops.len = mtd->writesize;
|
|
- ops.ooblen = mtd->ecclayout->oobavail;
|
|
+ ops.ooblen = mtd->oobavail;
|
|
ops.ooboffs = 0;
|
|
ops.datbuf = d->page_buf;
|
|
ops.oobbuf = d->oob_buf;
|
|
@@ -945,7 +945,7 @@ static unsigned int mtdswap_eblk_passes(
|
|
for (i = 0; i < mtd_pages; i++) {
|
|
patt = mtdswap_test_patt(test + i);
|
|
memset(d->page_buf, patt, mtd->writesize);
|
|
- memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
|
|
+ memset(d->oob_buf, patt, mtd->oobavail);
|
|
ret = mtd_write_oob(mtd, pos, &ops);
|
|
if (ret)
|
|
goto error;
|
|
@@ -964,7 +964,7 @@ static unsigned int mtdswap_eblk_passes(
|
|
if (p1[j] != patt)
|
|
goto error;
|
|
|
|
- for (j = 0; j < mtd->ecclayout->oobavail; j++)
|
|
+ for (j = 0; j < mtd->oobavail; j++)
|
|
if (p2[j] != (unsigned char)patt)
|
|
goto error;
|
|
|
|
@@ -1387,7 +1387,7 @@ static int mtdswap_init(struct mtdswap_d
|
|
if (!d->page_buf)
|
|
goto page_buf_fail;
|
|
|
|
- d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
|
|
+ d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
|
|
if (!d->oob_buf)
|
|
goto oob_buf_fail;
|
|
|
|
@@ -1417,7 +1417,6 @@ static void mtdswap_add_mtd(struct mtd_b
|
|
unsigned long part;
|
|
unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
|
|
uint64_t swap_size, use_size, size_limit;
|
|
- struct nand_ecclayout *oinfo;
|
|
int ret;
|
|
|
|
parts = &partitions[0];
|
|
@@ -1447,17 +1446,10 @@ static void mtdswap_add_mtd(struct mtd_b
|
|
return;
|
|
}
|
|
|
|
- oinfo = mtd->ecclayout;
|
|
- if (!oinfo) {
|
|
- printk(KERN_ERR "%s: mtd%d does not have OOB\n",
|
|
- MTDSWAP_PREFIX, mtd->index);
|
|
- return;
|
|
- }
|
|
-
|
|
- if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
|
|
+ if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
|
|
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
|
|
"%d available, %zu needed.\n",
|
|
- MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
|
|
+ MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
|
|
return;
|
|
}
|
|
|
|
--- a/drivers/mtd/nand/Kconfig
|
|
+++ b/drivers/mtd/nand/Kconfig
|
|
@@ -55,7 +55,7 @@ config MTD_NAND_DENALI_PCI
|
|
config MTD_NAND_DENALI_DT
|
|
tristate "Support Denali NAND controller as a DT device"
|
|
select MTD_NAND_DENALI
|
|
- depends on HAS_DMA && HAVE_CLK
|
|
+ depends on HAS_DMA && HAVE_CLK && OF
|
|
help
|
|
Enable the driver for NAND flash on platforms using a Denali NAND
|
|
controller as a DT device.
|
|
@@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
|
|
config MTD_NAND_GPIO
|
|
tristate "GPIO assisted NAND Flash driver"
|
|
depends on GPIOLIB || COMPILE_TEST
|
|
+ depends on HAS_IOMEM
|
|
help
|
|
This enables a NAND flash driver where control signals are
|
|
connected to GPIO pins, and commands and data are communicated
|
|
@@ -310,6 +311,7 @@ config MTD_NAND_CAFE
|
|
config MTD_NAND_CS553X
|
|
tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
|
|
depends on X86_32
|
|
+ depends on !UML && HAS_IOMEM
|
|
help
|
|
The CS553x companion chips for the AMD Geode processor
|
|
include NAND flash controllers with built-in hardware ECC
|
|
@@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
|
|
config MTD_NAND_VF610_NFC
|
|
tristate "Support for Freescale NFC for VF610/MPC5125"
|
|
depends on (SOC_VF610 || COMPILE_TEST)
|
|
+ depends on HAS_IOMEM
|
|
help
|
|
Enables support for NAND Flash Controller on some Freescale
|
|
processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
|
|
@@ -480,7 +483,7 @@ config MTD_NAND_MXC
|
|
|
|
config MTD_NAND_SH_FLCTL
|
|
tristate "Support for NAND on Renesas SuperH FLCTL"
|
|
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
|
|
+ depends on SUPERH || COMPILE_TEST
|
|
depends on HAS_IOMEM
|
|
depends on HAS_DMA
|
|
help
|
|
@@ -519,6 +522,13 @@ config MTD_NAND_JZ4740
|
|
help
|
|
Enables support for NAND Flash on JZ4740 SoC based boards.
|
|
|
|
+config MTD_NAND_JZ4780
|
|
+ tristate "Support for NAND on JZ4780 SoC"
|
|
+ depends on MACH_JZ4780 && JZ4780_NEMC
|
|
+ help
|
|
+ Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
|
|
+ based boards, using the BCH controller for hardware error correction.
|
|
+
|
|
config MTD_NAND_FSMC
|
|
tristate "Support for NAND on ST Micros FSMC"
|
|
depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
|
|
@@ -546,4 +556,11 @@ config MTD_NAND_HISI504
|
|
help
|
|
Enables support for NAND controller on Hisilicon SoC Hip04.
|
|
|
|
+config MTD_NAND_QCOM
|
|
+ tristate "Support for NAND on QCOM SoCs"
|
|
+ depends on ARCH_QCOM
|
|
+ help
|
|
+ Enables support for NAND flash chips on SoCs containing the EBI2 NAND
|
|
+ controller. This controller is found on IPQ806x SoC.
|
|
+
|
|
endif # MTD_NAND
|
|
--- a/drivers/mtd/nand/Makefile
|
|
+++ b/drivers/mtd/nand/Makefile
|
|
@@ -49,11 +49,13 @@ obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mp
|
|
obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
|
|
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
|
|
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
|
|
+obj-$(CONFIG_MTD_NAND_JZ4780) += jz4780_nand.o jz4780_bch.o
|
|
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
|
|
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
|
|
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
|
|
obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
|
|
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
|
|
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
|
|
+obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
|
|
|
|
nand-objs := nand_base.o nand_bbt.o nand_timings.o
|
|
--- a/drivers/mtd/nand/nand_base.c
|
|
+++ b/drivers/mtd/nand/nand_base.c
|
|
@@ -48,50 +48,6 @@
|
|
#include <linux/mtd/partitions.h>
|
|
#include <linux/of_mtd.h>
|
|
|
|
-/* Define default oob placement schemes for large and small page devices */
|
|
-static struct nand_ecclayout nand_oob_8 = {
|
|
- .eccbytes = 3,
|
|
- .eccpos = {0, 1, 2},
|
|
- .oobfree = {
|
|
- {.offset = 3,
|
|
- .length = 2},
|
|
- {.offset = 6,
|
|
- .length = 2} }
|
|
-};
|
|
-
|
|
-static struct nand_ecclayout nand_oob_16 = {
|
|
- .eccbytes = 6,
|
|
- .eccpos = {0, 1, 2, 3, 6, 7},
|
|
- .oobfree = {
|
|
- {.offset = 8,
|
|
- . length = 8} }
|
|
-};
|
|
-
|
|
-static struct nand_ecclayout nand_oob_64 = {
|
|
- .eccbytes = 24,
|
|
- .eccpos = {
|
|
- 40, 41, 42, 43, 44, 45, 46, 47,
|
|
- 48, 49, 50, 51, 52, 53, 54, 55,
|
|
- 56, 57, 58, 59, 60, 61, 62, 63},
|
|
- .oobfree = {
|
|
- {.offset = 2,
|
|
- .length = 38} }
|
|
-};
|
|
-
|
|
-static struct nand_ecclayout nand_oob_128 = {
|
|
- .eccbytes = 48,
|
|
- .eccpos = {
|
|
- 80, 81, 82, 83, 84, 85, 86, 87,
|
|
- 88, 89, 90, 91, 92, 93, 94, 95,
|
|
- 96, 97, 98, 99, 100, 101, 102, 103,
|
|
- 104, 105, 106, 107, 108, 109, 110, 111,
|
|
- 112, 113, 114, 115, 116, 117, 118, 119,
|
|
- 120, 121, 122, 123, 124, 125, 126, 127},
|
|
- .oobfree = {
|
|
- {.offset = 2,
|
|
- .length = 78} }
|
|
-};
|
|
-
|
|
static int nand_get_device(struct mtd_info *mtd, int new_state);
|
|
|
|
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
|
|
@@ -103,10 +59,96 @@ static int nand_do_write_oob(struct mtd_
|
|
*/
|
|
DEFINE_LED_TRIGGER(nand_led_trigger);
|
|
|
|
+/* Define default oob placement schemes for large and small page devices */
|
|
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
+{
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
|
|
+
|
|
+ if (section > 1)
|
|
+ return -ERANGE;
|
|
+
|
|
+ if (!section) {
|
|
+ oobregion->offset = 0;
|
|
+ oobregion->length = 4;
|
|
+ } else {
|
|
+ oobregion->offset = 6;
|
|
+ oobregion->length = ecc->total - 4;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
+{
|
|
+ if (section > 1)
|
|
+ return -ERANGE;
|
|
+
|
|
+ if (mtd->oobsize == 16) {
|
|
+ if (section)
|
|
+ return -ERANGE;
|
|
+
|
|
+ oobregion->length = 8;
|
|
+ oobregion->offset = 8;
|
|
+ } else {
|
|
+ oobregion->length = 2;
|
|
+ if (!section)
|
|
+ oobregion->offset = 3;
|
|
+ else
|
|
+ oobregion->offset = 6;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
|
|
+ .ecc = nand_ooblayout_ecc_sp,
|
|
+ .free = nand_ooblayout_free_sp,
|
|
+};
|
|
+EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
|
|
+
|
|
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
+{
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
|
|
+
|
|
+ if (section)
|
|
+ return -ERANGE;
|
|
+
|
|
+ oobregion->length = ecc->total;
|
|
+ oobregion->offset = mtd->oobsize - oobregion->length;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobregion)
|
|
+{
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
|
|
+
|
|
+ if (section)
|
|
+ return -ERANGE;
|
|
+
|
|
+ oobregion->length = mtd->oobsize - ecc->total - 2;
|
|
+ oobregion->offset = 2;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
|
|
+ .ecc = nand_ooblayout_ecc_lp,
|
|
+ .free = nand_ooblayout_free_lp,
|
|
+};
|
|
+EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
|
|
+
|
|
static int check_offs_len(struct mtd_info *mtd,
|
|
loff_t ofs, uint64_t len)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
int ret = 0;
|
|
|
|
/* Start address must align on block boundary */
|
|
@@ -132,7 +174,7 @@ static int check_offs_len(struct mtd_inf
|
|
*/
|
|
static void nand_release_device(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
/* Release the controller and the chip */
|
|
spin_lock(&chip->controller->lock);
|
|
@@ -150,7 +192,7 @@ static void nand_release_device(struct m
|
|
*/
|
|
static uint8_t nand_read_byte(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
return readb(chip->IO_ADDR_R);
|
|
}
|
|
|
|
@@ -163,7 +205,7 @@ static uint8_t nand_read_byte(struct mtd
|
|
*/
|
|
static uint8_t nand_read_byte16(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
|
|
}
|
|
|
|
@@ -175,7 +217,7 @@ static uint8_t nand_read_byte16(struct m
|
|
*/
|
|
static u16 nand_read_word(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
return readw(chip->IO_ADDR_R);
|
|
}
|
|
|
|
@@ -188,7 +230,7 @@ static u16 nand_read_word(struct mtd_inf
|
|
*/
|
|
static void nand_select_chip(struct mtd_info *mtd, int chipnr)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
switch (chipnr) {
|
|
case -1:
|
|
@@ -211,7 +253,7 @@ static void nand_select_chip(struct mtd_
|
|
*/
|
|
static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
chip->write_buf(mtd, &byte, 1);
|
|
}
|
|
@@ -225,7 +267,7 @@ static void nand_write_byte(struct mtd_i
|
|
*/
|
|
static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
uint16_t word = byte;
|
|
|
|
/*
|
|
@@ -257,7 +299,7 @@ static void nand_write_byte16(struct mtd
|
|
*/
|
|
static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
iowrite8_rep(chip->IO_ADDR_W, buf, len);
|
|
}
|
|
@@ -272,7 +314,7 @@ static void nand_write_buf(struct mtd_in
|
|
*/
|
|
static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
ioread8_rep(chip->IO_ADDR_R, buf, len);
|
|
}
|
|
@@ -287,7 +329,7 @@ static void nand_read_buf(struct mtd_inf
|
|
*/
|
|
static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
u16 *p = (u16 *) buf;
|
|
|
|
iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
|
|
@@ -303,7 +345,7 @@ static void nand_write_buf16(struct mtd_
|
|
*/
|
|
static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
u16 *p = (u16 *) buf;
|
|
|
|
ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
|
|
@@ -313,14 +355,13 @@ static void nand_read_buf16(struct mtd_i
|
|
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
|
|
* @mtd: MTD device structure
|
|
* @ofs: offset from device start
|
|
- * @getchip: 0, if the chip is already selected
|
|
*
|
|
* Check, if the block is bad.
|
|
*/
|
|
-static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
|
|
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
|
|
{
|
|
- int page, chipnr, res = 0, i = 0;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ int page, res = 0, i = 0;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
u16 bad;
|
|
|
|
if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
|
|
@@ -328,15 +369,6 @@ static int nand_block_bad(struct mtd_inf
|
|
|
|
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
|
|
|
|
- if (getchip) {
|
|
- chipnr = (int)(ofs >> chip->chip_shift);
|
|
-
|
|
- nand_get_device(mtd, FL_READING);
|
|
-
|
|
- /* Select the NAND device */
|
|
- chip->select_chip(mtd, chipnr);
|
|
- }
|
|
-
|
|
do {
|
|
if (chip->options & NAND_BUSWIDTH_16) {
|
|
chip->cmdfunc(mtd, NAND_CMD_READOOB,
|
|
@@ -361,11 +393,6 @@ static int nand_block_bad(struct mtd_inf
|
|
i++;
|
|
} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
|
|
|
|
- if (getchip) {
|
|
- chip->select_chip(mtd, -1);
|
|
- nand_release_device(mtd);
|
|
- }
|
|
-
|
|
return res;
|
|
}
|
|
|
|
@@ -380,7 +407,7 @@ static int nand_block_bad(struct mtd_inf
|
|
*/
|
|
static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct mtd_oob_ops ops;
|
|
uint8_t buf[2] = { 0, 0 };
|
|
int ret = 0, res, i = 0;
|
|
@@ -430,7 +457,7 @@ static int nand_default_block_markbad(st
|
|
*/
|
|
static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
int res, ret = 0;
|
|
|
|
if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
|
|
@@ -471,7 +498,7 @@ static int nand_block_markbad_lowlevel(s
|
|
*/
|
|
static int nand_check_wp(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
/* Broken xD cards report WP despite being writable */
|
|
if (chip->options & NAND_BROKEN_XD)
|
|
@@ -491,7 +518,7 @@ static int nand_check_wp(struct mtd_info
|
|
*/
|
|
static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
if (!chip->bbt)
|
|
return 0;
|
|
@@ -503,19 +530,17 @@ static int nand_block_isreserved(struct
|
|
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
|
|
* @mtd: MTD device structure
|
|
* @ofs: offset from device start
|
|
- * @getchip: 0, if the chip is already selected
|
|
* @allowbbt: 1, if its allowed to access the bbt area
|
|
*
|
|
* Check, if the block is bad. Either by reading the bad block table or
|
|
* calling of the scan function.
|
|
*/
|
|
-static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
|
|
- int allowbbt)
|
|
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
if (!chip->bbt)
|
|
- return chip->block_bad(mtd, ofs, getchip);
|
|
+ return chip->block_bad(mtd, ofs);
|
|
|
|
/* Return info from the table */
|
|
return nand_isbad_bbt(mtd, ofs, allowbbt);
|
|
@@ -531,7 +556,7 @@ static int nand_block_checkbad(struct mt
|
|
*/
|
|
static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
int i;
|
|
|
|
/* Wait for the device to get ready */
|
|
@@ -551,7 +576,7 @@ static void panic_nand_wait_ready(struct
|
|
*/
|
|
void nand_wait_ready(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
unsigned long timeo = 400;
|
|
|
|
if (in_interrupt() || oops_in_progress)
|
|
@@ -566,8 +591,8 @@ void nand_wait_ready(struct mtd_info *mt
|
|
cond_resched();
|
|
} while (time_before(jiffies, timeo));
|
|
|
|
- pr_warn_ratelimited(
|
|
- "timeout while waiting for chip to become ready\n");
|
|
+ if (!chip->dev_ready(mtd))
|
|
+ pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
|
|
out:
|
|
led_trigger_event(nand_led_trigger, LED_OFF);
|
|
}
|
|
@@ -582,7 +607,7 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
|
|
*/
|
|
static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
|
|
{
|
|
- register struct nand_chip *chip = mtd->priv;
|
|
+ register struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
timeo = jiffies + msecs_to_jiffies(timeo);
|
|
do {
|
|
@@ -605,7 +630,7 @@ static void nand_wait_status_ready(struc
|
|
static void nand_command(struct mtd_info *mtd, unsigned int command,
|
|
int column, int page_addr)
|
|
{
|
|
- register struct nand_chip *chip = mtd->priv;
|
|
+ register struct nand_chip *chip = mtd_to_nand(mtd);
|
|
int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
|
|
|
|
/* Write out the command to the device */
|
|
@@ -708,7 +733,7 @@ static void nand_command(struct mtd_info
|
|
static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
|
|
int column, int page_addr)
|
|
{
|
|
- register struct nand_chip *chip = mtd->priv;
|
|
+ register struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
/* Emulate NAND_CMD_READOOB */
|
|
if (command == NAND_CMD_READOOB) {
|
|
@@ -832,7 +857,7 @@ static void panic_nand_get_device(struct
|
|
static int
|
|
nand_get_device(struct mtd_info *mtd, int new_state)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
spinlock_t *lock = &chip->controller->lock;
|
|
wait_queue_head_t *wq = &chip->controller->wq;
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
@@ -952,7 +977,7 @@ static int __nand_unlock(struct mtd_info
|
|
{
|
|
int ret = 0;
|
|
int status, page;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
/* Submit address of first page to unlock */
|
|
page = ofs >> chip->page_shift;
|
|
@@ -987,7 +1012,7 @@ int nand_unlock(struct mtd_info *mtd, lo
|
|
{
|
|
int ret = 0;
|
|
int chipnr;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
pr_debug("%s: start = 0x%012llx, len = %llu\n",
|
|
__func__, (unsigned long long)ofs, len);
|
|
@@ -1050,7 +1075,7 @@ int nand_lock(struct mtd_info *mtd, loff
|
|
{
|
|
int ret = 0;
|
|
int chipnr, status, page;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
pr_debug("%s: start = 0x%012llx, len = %llu\n",
|
|
__func__, (unsigned long long)ofs, len);
|
|
@@ -1309,13 +1334,12 @@ static int nand_read_page_raw_syndrome(s
|
|
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
|
|
uint8_t *buf, int oob_required, int page)
|
|
{
|
|
- int i, eccsize = chip->ecc.size;
|
|
+ int i, eccsize = chip->ecc.size, ret;
|
|
int eccbytes = chip->ecc.bytes;
|
|
int eccsteps = chip->ecc.steps;
|
|
uint8_t *p = buf;
|
|
uint8_t *ecc_calc = chip->buffers->ecccalc;
|
|
uint8_t *ecc_code = chip->buffers->ecccode;
|
|
- uint32_t *eccpos = chip->ecc.layout->eccpos;
|
|
unsigned int max_bitflips = 0;
|
|
|
|
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
|
|
@@ -1323,8 +1347,10 @@ static int nand_read_page_swecc(struct m
|
|
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
|
|
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
|
|
|
|
- for (i = 0; i < chip->ecc.total; i++)
|
|
- ecc_code[i] = chip->oob_poi[eccpos[i]];
|
|
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
|
|
+ chip->ecc.total);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
eccsteps = chip->ecc.steps;
|
|
p = buf;
|
|
@@ -1356,14 +1382,14 @@ static int nand_read_subpage(struct mtd_
|
|
uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
|
|
int page)
|
|
{
|
|
- int start_step, end_step, num_steps;
|
|
- uint32_t *eccpos = chip->ecc.layout->eccpos;
|
|
+ int start_step, end_step, num_steps, ret;
|
|
uint8_t *p;
|
|
int data_col_addr, i, gaps = 0;
|
|
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
|
|
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
|
|
- int index;
|
|
+ int index, section = 0;
|
|
unsigned int max_bitflips = 0;
|
|
+ struct mtd_oob_region oobregion = { };
|
|
|
|
/* Column address within the page aligned to ECC size (256bytes) */
|
|
start_step = data_offs / chip->ecc.size;
|
|
@@ -1391,12 +1417,13 @@ static int nand_read_subpage(struct mtd_
|
|
* The performance is faster if we position offsets according to
|
|
* ecc.pos. Let's make sure that there are no gaps in ECC positions.
|
|
*/
|
|
- for (i = 0; i < eccfrag_len - 1; i++) {
|
|
- if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
|
|
- gaps = 1;
|
|
- break;
|
|
- }
|
|
- }
|
|
+ ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (oobregion.length < eccfrag_len)
|
|
+ gaps = 1;
|
|
+
|
|
if (gaps) {
|
|
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
|
|
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
|
|
@@ -1405,20 +1432,23 @@ static int nand_read_subpage(struct mtd_
|
|
* Send the command to read the particular ECC bytes take care
|
|
* about buswidth alignment in read_buf.
|
|
*/
|
|
- aligned_pos = eccpos[index] & ~(busw - 1);
|
|
+ aligned_pos = oobregion.offset & ~(busw - 1);
|
|
aligned_len = eccfrag_len;
|
|
- if (eccpos[index] & (busw - 1))
|
|
+ if (oobregion.offset & (busw - 1))
|
|
aligned_len++;
|
|
- if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
|
|
+ if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
|
|
+ (busw - 1))
|
|
aligned_len++;
|
|
|
|
chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
|
|
- mtd->writesize + aligned_pos, -1);
|
|
+ mtd->writesize + aligned_pos, -1);
|
|
chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
|
|
}
|
|
|
|
- for (i = 0; i < eccfrag_len; i++)
|
|
- chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
|
|
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
|
|
+ chip->oob_poi, index, eccfrag_len);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
p = bufpoi + data_col_addr;
|
|
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
|
|
@@ -1426,6 +1456,16 @@ static int nand_read_subpage(struct mtd_
|
|
|
|
stat = chip->ecc.correct(mtd, p,
|
|
&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
|
|
+ if (stat == -EBADMSG &&
|
|
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
|
|
+ /* check for empty pages with bitflips */
|
|
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
|
|
+ &chip->buffers->ecccode[i],
|
|
+ chip->ecc.bytes,
|
|
+ NULL, 0,
|
|
+ chip->ecc.strength);
|
|
+ }
|
|
+
|
|
if (stat < 0) {
|
|
mtd->ecc_stats.failed++;
|
|
} else {
|
|
@@ -1449,13 +1489,12 @@ static int nand_read_subpage(struct mtd_
|
|
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
|
|
uint8_t *buf, int oob_required, int page)
|
|
{
|
|
- int i, eccsize = chip->ecc.size;
|
|
+ int i, eccsize = chip->ecc.size, ret;
|
|
int eccbytes = chip->ecc.bytes;
|
|
int eccsteps = chip->ecc.steps;
|
|
uint8_t *p = buf;
|
|
uint8_t *ecc_calc = chip->buffers->ecccalc;
|
|
uint8_t *ecc_code = chip->buffers->ecccode;
|
|
- uint32_t *eccpos = chip->ecc.layout->eccpos;
|
|
unsigned int max_bitflips = 0;
|
|
|
|
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
|
|
@@ -1465,8 +1504,10 @@ static int nand_read_page_hwecc(struct m
|
|
}
|
|
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
|
|
|
|
- for (i = 0; i < chip->ecc.total; i++)
|
|
- ecc_code[i] = chip->oob_poi[eccpos[i]];
|
|
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
|
|
+ chip->ecc.total);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
eccsteps = chip->ecc.steps;
|
|
p = buf;
|
|
@@ -1475,6 +1516,15 @@ static int nand_read_page_hwecc(struct m
|
|
int stat;
|
|
|
|
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
|
|
+ if (stat == -EBADMSG &&
|
|
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
|
|
+ /* check for empty pages with bitflips */
|
|
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
|
|
+ &ecc_code[i], eccbytes,
|
|
+ NULL, 0,
|
|
+ chip->ecc.strength);
|
|
+ }
|
|
+
|
|
if (stat < 0) {
|
|
mtd->ecc_stats.failed++;
|
|
} else {
|
|
@@ -1502,12 +1552,11 @@ static int nand_read_page_hwecc(struct m
|
|
static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
|
|
struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
|
|
{
|
|
- int i, eccsize = chip->ecc.size;
|
|
+ int i, eccsize = chip->ecc.size, ret;
|
|
int eccbytes = chip->ecc.bytes;
|
|
int eccsteps = chip->ecc.steps;
|
|
uint8_t *p = buf;
|
|
uint8_t *ecc_code = chip->buffers->ecccode;
|
|
- uint32_t *eccpos = chip->ecc.layout->eccpos;
|
|
uint8_t *ecc_calc = chip->buffers->ecccalc;
|
|
unsigned int max_bitflips = 0;
|
|
|
|
@@ -1516,8 +1565,10 @@ static int nand_read_page_hwecc_oob_firs
|
|
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
|
|
chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
|
|
|
|
- for (i = 0; i < chip->ecc.total; i++)
|
|
- ecc_code[i] = chip->oob_poi[eccpos[i]];
|
|
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
|
|
+ chip->ecc.total);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
|
|
int stat;
|
|
@@ -1527,6 +1578,15 @@ static int nand_read_page_hwecc_oob_firs
|
|
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
|
|
|
|
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
|
|
+ if (stat == -EBADMSG &&
|
|
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
|
|
+ /* check for empty pages with bitflips */
|
|
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
|
|
+ &ecc_code[i], eccbytes,
|
|
+ NULL, 0,
|
|
+ chip->ecc.strength);
|
|
+ }
|
|
+
|
|
if (stat < 0) {
|
|
mtd->ecc_stats.failed++;
|
|
} else {
|
|
@@ -1554,6 +1614,7 @@ static int nand_read_page_syndrome(struc
|
|
int i, eccsize = chip->ecc.size;
|
|
int eccbytes = chip->ecc.bytes;
|
|
int eccsteps = chip->ecc.steps;
|
|
+ int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
|
|
uint8_t *p = buf;
|
|
uint8_t *oob = chip->oob_poi;
|
|
unsigned int max_bitflips = 0;
|
|
@@ -1573,19 +1634,29 @@ static int nand_read_page_syndrome(struc
|
|
chip->read_buf(mtd, oob, eccbytes);
|
|
stat = chip->ecc.correct(mtd, p, oob, NULL);
|
|
|
|
- if (stat < 0) {
|
|
- mtd->ecc_stats.failed++;
|
|
- } else {
|
|
- mtd->ecc_stats.corrected += stat;
|
|
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
|
|
- }
|
|
-
|
|
oob += eccbytes;
|
|
|
|
if (chip->ecc.postpad) {
|
|
chip->read_buf(mtd, oob, chip->ecc.postpad);
|
|
oob += chip->ecc.postpad;
|
|
}
|
|
+
|
|
+ if (stat == -EBADMSG &&
|
|
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
|
|
+ /* check for empty pages with bitflips */
|
|
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
|
|
+ oob - eccpadbytes,
|
|
+ eccpadbytes,
|
|
+ NULL, 0,
|
|
+ chip->ecc.strength);
|
|
+ }
|
|
+
|
|
+ if (stat < 0) {
|
|
+ mtd->ecc_stats.failed++;
|
|
+ } else {
|
|
+ mtd->ecc_stats.corrected += stat;
|
|
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
|
|
+ }
|
|
}
|
|
|
|
/* Calculate remaining oob bytes */
|
|
@@ -1598,14 +1669,17 @@ static int nand_read_page_syndrome(struc
|
|
|
|
/**
|
|
* nand_transfer_oob - [INTERN] Transfer oob to client buffer
|
|
- * @chip: nand chip structure
|
|
+ * @mtd: mtd info structure
|
|
* @oob: oob destination address
|
|
* @ops: oob ops structure
|
|
* @len: size of oob to transfer
|
|
*/
|
|
-static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
|
|
+static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
|
|
struct mtd_oob_ops *ops, size_t len)
|
|
{
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ int ret;
|
|
+
|
|
switch (ops->mode) {
|
|
|
|
case MTD_OPS_PLACE_OOB:
|
|
@@ -1613,31 +1687,12 @@ static uint8_t *nand_transfer_oob(struct
|
|
memcpy(oob, chip->oob_poi + ops->ooboffs, len);
|
|
return oob + len;
|
|
|
|
- case MTD_OPS_AUTO_OOB: {
|
|
- struct nand_oobfree *free = chip->ecc.layout->oobfree;
|
|
- uint32_t boffs = 0, roffs = ops->ooboffs;
|
|
- size_t bytes = 0;
|
|
-
|
|
- for (; free->length && len; free++, len -= bytes) {
|
|
- /* Read request not from offset 0? */
|
|
- if (unlikely(roffs)) {
|
|
- if (roffs >= free->length) {
|
|
- roffs -= free->length;
|
|
- continue;
|
|
- }
|
|
- boffs = free->offset + roffs;
|
|
- bytes = min_t(size_t, len,
|
|
- (free->length - roffs));
|
|
- roffs = 0;
|
|
- } else {
|
|
- bytes = min_t(size_t, len, free->length);
|
|
- boffs = free->offset;
|
|
- }
|
|
- memcpy(oob, chip->oob_poi + boffs, bytes);
|
|
- oob += bytes;
|
|
- }
|
|
- return oob;
|
|
- }
|
|
+ case MTD_OPS_AUTO_OOB:
|
|
+ ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
|
|
+ ops->ooboffs, len);
|
|
+ BUG_ON(ret);
|
|
+ return oob + len;
|
|
+
|
|
default:
|
|
BUG();
|
|
}
|
|
@@ -1655,7 +1710,7 @@ static uint8_t *nand_transfer_oob(struct
|
|
*/
|
|
static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
pr_debug("setting READ RETRY mode %d\n", retry_mode);
|
|
|
|
@@ -1680,12 +1735,11 @@ static int nand_do_read_ops(struct mtd_i
|
|
struct mtd_oob_ops *ops)
|
|
{
|
|
int chipnr, page, realpage, col, bytes, aligned, oob_required;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
int ret = 0;
|
|
uint32_t readlen = ops->len;
|
|
uint32_t oobreadlen = ops->ooblen;
|
|
- uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
|
|
- mtd->oobavail : mtd->oobsize;
|
|
+ uint32_t max_oobsize = mtd_oobavail(mtd, ops);
|
|
|
|
uint8_t *bufpoi, *oob, *buf;
|
|
int use_bufpoi;
|
|
@@ -1772,7 +1826,7 @@ read_retry:
|
|
int toread = min(oobreadlen, max_oobsize);
|
|
|
|
if (toread) {
|
|
- oob = nand_transfer_oob(chip,
|
|
+ oob = nand_transfer_oob(mtd,
|
|
oob, ops, toread);
|
|
oobreadlen -= toread;
|
|
}
|
|
@@ -2024,7 +2078,7 @@ static int nand_do_read_oob(struct mtd_i
|
|
struct mtd_oob_ops *ops)
|
|
{
|
|
int page, realpage, chipnr;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct mtd_ecc_stats stats;
|
|
int readlen = ops->ooblen;
|
|
int len;
|
|
@@ -2036,10 +2090,7 @@ static int nand_do_read_oob(struct mtd_i
|
|
|
|
stats = mtd->ecc_stats;
|
|
|
|
- if (ops->mode == MTD_OPS_AUTO_OOB)
|
|
- len = chip->ecc.layout->oobavail;
|
|
- else
|
|
- len = mtd->oobsize;
|
|
+ len = mtd_oobavail(mtd, ops);
|
|
|
|
if (unlikely(ops->ooboffs >= len)) {
|
|
pr_debug("%s: attempt to start read outside oob\n",
|
|
@@ -2073,7 +2124,7 @@ static int nand_do_read_oob(struct mtd_i
|
|
break;
|
|
|
|
len = min(len, readlen);
|
|
- buf = nand_transfer_oob(chip, buf, ops, len);
|
|
+ buf = nand_transfer_oob(mtd, buf, ops, len);
|
|
|
|
if (chip->options & NAND_NEED_READRDY) {
|
|
/* Apply delay or wait for ready/busy pin */
|
|
@@ -2232,19 +2283,20 @@ static int nand_write_page_swecc(struct
|
|
const uint8_t *buf, int oob_required,
|
|
int page)
|
|
{
|
|
- int i, eccsize = chip->ecc.size;
|
|
+ int i, eccsize = chip->ecc.size, ret;
|
|
int eccbytes = chip->ecc.bytes;
|
|
int eccsteps = chip->ecc.steps;
|
|
uint8_t *ecc_calc = chip->buffers->ecccalc;
|
|
const uint8_t *p = buf;
|
|
- uint32_t *eccpos = chip->ecc.layout->eccpos;
|
|
|
|
/* Software ECC calculation */
|
|
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
|
|
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
|
|
|
|
- for (i = 0; i < chip->ecc.total; i++)
|
|
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
|
|
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
|
|
+ chip->ecc.total);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
|
|
}
|
|
@@ -2261,12 +2313,11 @@ static int nand_write_page_hwecc(struct
|
|
const uint8_t *buf, int oob_required,
|
|
int page)
|
|
{
|
|
- int i, eccsize = chip->ecc.size;
|
|
+ int i, eccsize = chip->ecc.size, ret;
|
|
int eccbytes = chip->ecc.bytes;
|
|
int eccsteps = chip->ecc.steps;
|
|
uint8_t *ecc_calc = chip->buffers->ecccalc;
|
|
const uint8_t *p = buf;
|
|
- uint32_t *eccpos = chip->ecc.layout->eccpos;
|
|
|
|
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
|
|
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
|
|
@@ -2274,8 +2325,10 @@ static int nand_write_page_hwecc(struct
|
|
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
|
|
}
|
|
|
|
- for (i = 0; i < chip->ecc.total; i++)
|
|
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
|
|
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
|
|
+ chip->ecc.total);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
|
|
|
|
@@ -2303,11 +2356,10 @@ static int nand_write_subpage_hwecc(stru
|
|
int ecc_size = chip->ecc.size;
|
|
int ecc_bytes = chip->ecc.bytes;
|
|
int ecc_steps = chip->ecc.steps;
|
|
- uint32_t *eccpos = chip->ecc.layout->eccpos;
|
|
uint32_t start_step = offset / ecc_size;
|
|
uint32_t end_step = (offset + data_len - 1) / ecc_size;
|
|
int oob_bytes = mtd->oobsize / ecc_steps;
|
|
- int step, i;
|
|
+ int step, ret;
|
|
|
|
for (step = 0; step < ecc_steps; step++) {
|
|
/* configure controller for WRITE access */
|
|
@@ -2335,8 +2387,10 @@ static int nand_write_subpage_hwecc(stru
|
|
/* copy calculated ECC for whole page to chip->buffer->oob */
|
|
/* this include masked-value(0xFF) for unwritten subpages */
|
|
ecc_calc = chip->buffers->ecccalc;
|
|
- for (i = 0; i < chip->ecc.total; i++)
|
|
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
|
|
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
|
|
+ chip->ecc.total);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
/* write OOB buffer to NAND device */
|
|
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
|
|
@@ -2472,7 +2526,8 @@ static int nand_write_page(struct mtd_in
|
|
static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
|
|
struct mtd_oob_ops *ops)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ int ret;
|
|
|
|
/*
|
|
* Initialise to all 0xFF, to avoid the possibility of left over OOB
|
|
@@ -2487,31 +2542,12 @@ static uint8_t *nand_fill_oob(struct mtd
|
|
memcpy(chip->oob_poi + ops->ooboffs, oob, len);
|
|
return oob + len;
|
|
|
|
- case MTD_OPS_AUTO_OOB: {
|
|
- struct nand_oobfree *free = chip->ecc.layout->oobfree;
|
|
- uint32_t boffs = 0, woffs = ops->ooboffs;
|
|
- size_t bytes = 0;
|
|
-
|
|
- for (; free->length && len; free++, len -= bytes) {
|
|
- /* Write request not from offset 0? */
|
|
- if (unlikely(woffs)) {
|
|
- if (woffs >= free->length) {
|
|
- woffs -= free->length;
|
|
- continue;
|
|
- }
|
|
- boffs = free->offset + woffs;
|
|
- bytes = min_t(size_t, len,
|
|
- (free->length - woffs));
|
|
- woffs = 0;
|
|
- } else {
|
|
- bytes = min_t(size_t, len, free->length);
|
|
- boffs = free->offset;
|
|
- }
|
|
- memcpy(chip->oob_poi + boffs, oob, bytes);
|
|
- oob += bytes;
|
|
- }
|
|
- return oob;
|
|
- }
|
|
+ case MTD_OPS_AUTO_OOB:
|
|
+ ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
|
|
+ ops->ooboffs, len);
|
|
+ BUG_ON(ret);
|
|
+ return oob + len;
|
|
+
|
|
default:
|
|
BUG();
|
|
}
|
|
@@ -2532,12 +2568,11 @@ static int nand_do_write_ops(struct mtd_
|
|
struct mtd_oob_ops *ops)
|
|
{
|
|
int chipnr, realpage, page, blockmask, column;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
uint32_t writelen = ops->len;
|
|
|
|
uint32_t oobwritelen = ops->ooblen;
|
|
- uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
|
|
- mtd->oobavail : mtd->oobsize;
|
|
+ uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
|
|
|
|
uint8_t *oob = ops->oobbuf;
|
|
uint8_t *buf = ops->datbuf;
|
|
@@ -2662,7 +2697,7 @@ err_out:
|
|
static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
size_t *retlen, const uint8_t *buf)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct mtd_oob_ops ops;
|
|
int ret;
|
|
|
|
@@ -2722,15 +2757,12 @@ static int nand_do_write_oob(struct mtd_
|
|
struct mtd_oob_ops *ops)
|
|
{
|
|
int chipnr, page, status, len;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
pr_debug("%s: to = 0x%08x, len = %i\n",
|
|
__func__, (unsigned int)to, (int)ops->ooblen);
|
|
|
|
- if (ops->mode == MTD_OPS_AUTO_OOB)
|
|
- len = chip->ecc.layout->oobavail;
|
|
- else
|
|
- len = mtd->oobsize;
|
|
+ len = mtd_oobavail(mtd, ops);
|
|
|
|
/* Do not allow write past end of page */
|
|
if ((ops->ooboffs + ops->ooblen) > len) {
|
|
@@ -2847,7 +2879,7 @@ out:
|
|
*/
|
|
static int single_erase(struct mtd_info *mtd, int page)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
/* Send commands to erase a block */
|
|
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
|
|
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
|
|
@@ -2879,7 +2911,7 @@ int nand_erase_nand(struct mtd_info *mtd
|
|
int allowbbt)
|
|
{
|
|
int page, status, pages_per_block, ret, chipnr;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
loff_t len;
|
|
|
|
pr_debug("%s: start = 0x%012llx, len = %llu\n",
|
|
@@ -2918,7 +2950,7 @@ int nand_erase_nand(struct mtd_info *mtd
|
|
while (len) {
|
|
/* Check if we have a bad block, we do not erase bad blocks! */
|
|
if (nand_block_checkbad(mtd, ((loff_t) page) <<
|
|
- chip->page_shift, 0, allowbbt)) {
|
|
+ chip->page_shift, allowbbt)) {
|
|
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
|
|
__func__, page);
|
|
instr->state = MTD_ERASE_FAILED;
|
|
@@ -3005,7 +3037,20 @@ static void nand_sync(struct mtd_info *m
|
|
*/
|
|
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
|
|
{
|
|
- return nand_block_checkbad(mtd, offs, 1, 0);
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ int chipnr = (int)(offs >> chip->chip_shift);
|
|
+ int ret;
|
|
+
|
|
+ /* Select the NAND device */
|
|
+ nand_get_device(mtd, FL_READING);
|
|
+ chip->select_chip(mtd, chipnr);
|
|
+
|
|
+ ret = nand_block_checkbad(mtd, offs, 0);
|
|
+
|
|
+ chip->select_chip(mtd, -1);
|
|
+ nand_release_device(mtd);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
@@ -3094,7 +3139,7 @@ static int nand_suspend(struct mtd_info
|
|
*/
|
|
static void nand_resume(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
if (chip->state == FL_PM_SUSPENDED)
|
|
nand_release_device(mtd);
|
|
@@ -3266,7 +3311,7 @@ ext_out:
|
|
|
|
static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
|
|
|
|
return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
|
|
@@ -3937,10 +3982,13 @@ ident_done:
|
|
return type;
|
|
}
|
|
|
|
-static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
|
|
- struct device_node *dn)
|
|
+static int nand_dt_init(struct nand_chip *chip)
|
|
{
|
|
- int ecc_mode, ecc_strength, ecc_step;
|
|
+ struct device_node *dn = nand_get_flash_node(chip);
|
|
+ int ecc_mode, ecc_algo, ecc_strength, ecc_step;
|
|
+
|
|
+ if (!dn)
|
|
+ return 0;
|
|
|
|
if (of_get_nand_bus_width(dn) == 16)
|
|
chip->options |= NAND_BUSWIDTH_16;
|
|
@@ -3949,6 +3997,7 @@ static int nand_dt_init(struct mtd_info
|
|
chip->bbt_options |= NAND_BBT_USE_FLASH;
|
|
|
|
ecc_mode = of_get_nand_ecc_mode(dn);
|
|
+ ecc_algo = of_get_nand_ecc_algo(dn);
|
|
ecc_strength = of_get_nand_ecc_strength(dn);
|
|
ecc_step = of_get_nand_ecc_step_size(dn);
|
|
|
|
@@ -3961,6 +4010,9 @@ static int nand_dt_init(struct mtd_info
|
|
if (ecc_mode >= 0)
|
|
chip->ecc.mode = ecc_mode;
|
|
|
|
+ if (ecc_algo >= 0)
|
|
+ chip->ecc.algo = ecc_algo;
|
|
+
|
|
if (ecc_strength >= 0)
|
|
chip->ecc.strength = ecc_strength;
|
|
|
|
@@ -3984,15 +4036,16 @@ int nand_scan_ident(struct mtd_info *mtd
|
|
struct nand_flash_dev *table)
|
|
{
|
|
int i, nand_maf_id, nand_dev_id;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct nand_flash_dev *type;
|
|
int ret;
|
|
|
|
- if (chip->flash_node) {
|
|
- ret = nand_dt_init(mtd, chip, chip->flash_node);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
+ ret = nand_dt_init(chip);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (!mtd->name && mtd->dev.parent)
|
|
+ mtd->name = dev_name(mtd->dev.parent);
|
|
|
|
if (!mtd->name && mtd->dev.parent)
|
|
mtd->name = dev_name(mtd->dev.parent);
|
|
@@ -4055,7 +4108,7 @@ EXPORT_SYMBOL(nand_scan_ident);
|
|
*/
|
|
static bool nand_ecc_strength_good(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct nand_ecc_ctrl *ecc = &chip->ecc;
|
|
int corr, ds_corr;
|
|
|
|
@@ -4083,10 +4136,10 @@ static bool nand_ecc_strength_good(struc
|
|
*/
|
|
int nand_scan_tail(struct mtd_info *mtd)
|
|
{
|
|
- int i;
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct nand_ecc_ctrl *ecc = &chip->ecc;
|
|
struct nand_buffers *nbuf;
|
|
+ int ret;
|
|
|
|
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
|
|
BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
|
|
@@ -4113,19 +4166,15 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|
/*
|
|
* If no default placement scheme is given, select an appropriate one.
|
|
*/
|
|
- if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
|
|
+ if (!mtd->ooblayout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
|
|
switch (mtd->oobsize) {
|
|
case 8:
|
|
- ecc->layout = &nand_oob_8;
|
|
- break;
|
|
case 16:
|
|
- ecc->layout = &nand_oob_16;
|
|
+ mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
|
|
break;
|
|
case 64:
|
|
- ecc->layout = &nand_oob_64;
|
|
- break;
|
|
case 128:
|
|
- ecc->layout = &nand_oob_128;
|
|
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
|
|
break;
|
|
default:
|
|
pr_warn("No oob scheme defined for oobsize %d\n",
|
|
@@ -4168,7 +4217,7 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|
ecc->write_oob = nand_write_oob_std;
|
|
if (!ecc->read_subpage)
|
|
ecc->read_subpage = nand_read_subpage;
|
|
- if (!ecc->write_subpage)
|
|
+ if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
|
|
ecc->write_subpage = nand_write_subpage_hwecc;
|
|
|
|
case NAND_ECC_HW_SYNDROME:
|
|
@@ -4246,10 +4295,8 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|
}
|
|
|
|
/* See nand_bch_init() for details. */
|
|
- ecc->bytes = DIV_ROUND_UP(
|
|
- ecc->strength * fls(8 * ecc->size), 8);
|
|
- ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
|
|
- &ecc->layout);
|
|
+ ecc->bytes = 0;
|
|
+ ecc->priv = nand_bch_init(mtd);
|
|
if (!ecc->priv) {
|
|
pr_warn("BCH ECC initialization failed!\n");
|
|
BUG();
|
|
@@ -4280,20 +4327,9 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|
if (!ecc->write_oob_raw)
|
|
ecc->write_oob_raw = ecc->write_oob;
|
|
|
|
- /*
|
|
- * The number of bytes available for a client to place data into
|
|
- * the out of band area.
|
|
- */
|
|
- ecc->layout->oobavail = 0;
|
|
- for (i = 0; ecc->layout->oobfree[i].length
|
|
- && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
|
|
- ecc->layout->oobavail += ecc->layout->oobfree[i].length;
|
|
- mtd->oobavail = ecc->layout->oobavail;
|
|
-
|
|
- /* ECC sanity check: warn if it's too weak */
|
|
- if (!nand_ecc_strength_good(mtd))
|
|
- pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
|
|
- mtd->name);
|
|
+ /* propagate ecc info to mtd_info */
|
|
+ mtd->ecc_strength = ecc->strength;
|
|
+ mtd->ecc_step_size = ecc->size;
|
|
|
|
/*
|
|
* Set the number of read / write steps for one page depending on ECC
|
|
@@ -4306,6 +4342,21 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|
}
|
|
ecc->total = ecc->steps * ecc->bytes;
|
|
|
|
+ /*
|
|
+ * The number of bytes available for a client to place data into
|
|
+ * the out of band area.
|
|
+ */
|
|
+ ret = mtd_ooblayout_count_freebytes(mtd);
|
|
+ if (ret < 0)
|
|
+ ret = 0;
|
|
+
|
|
+ mtd->oobavail = ret;
|
|
+
|
|
+ /* ECC sanity check: warn if it's too weak */
|
|
+ if (!nand_ecc_strength_good(mtd))
|
|
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
|
|
+ mtd->name);
|
|
+
|
|
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
|
|
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
|
|
switch (ecc->steps) {
|
|
@@ -4362,10 +4413,6 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|
mtd->_block_markbad = nand_block_markbad;
|
|
mtd->writebufsize = mtd->writesize;
|
|
|
|
- /* propagate ecc info to mtd_info */
|
|
- mtd->ecclayout = ecc->layout;
|
|
- mtd->ecc_strength = ecc->strength;
|
|
- mtd->ecc_step_size = ecc->size;
|
|
/*
|
|
* Initialize bitflip_threshold to its default prior scan_bbt() call.
|
|
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
|
|
@@ -4421,7 +4468,7 @@ EXPORT_SYMBOL(nand_scan);
|
|
*/
|
|
void nand_release(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
|
|
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
|
|
--- a/drivers/mtd/nand/nand_bbt.c
|
|
+++ b/drivers/mtd/nand/nand_bbt.c
|
|
@@ -172,7 +172,7 @@ static int read_bbt(struct mtd_info *mtd
|
|
struct nand_bbt_descr *td, int offs)
|
|
{
|
|
int res, ret = 0, i, j, act = 0;
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
size_t retlen, len, totlen;
|
|
loff_t from;
|
|
int bits = td->options & NAND_BBT_NRBITS_MSK;
|
|
@@ -263,7 +263,7 @@ static int read_bbt(struct mtd_info *mtd
|
|
*/
|
|
static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int res = 0, i;
|
|
|
|
if (td->options & NAND_BBT_PERCHIP) {
|
|
@@ -388,7 +388,7 @@ static u32 bbt_get_ver_offs(struct mtd_i
|
|
static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
|
|
struct nand_bbt_descr *td, struct nand_bbt_descr *md)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
|
|
/* Read the primary version, if available */
|
|
if (td->options & NAND_BBT_VERSION) {
|
|
@@ -454,7 +454,7 @@ static int scan_block_fast(struct mtd_in
|
|
static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
|
|
struct nand_bbt_descr *bd, int chip)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int i, numblocks, numpages;
|
|
int startblock;
|
|
loff_t from;
|
|
@@ -523,7 +523,7 @@ static int create_bbt(struct mtd_info *m
|
|
*/
|
|
static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int i, chips;
|
|
int startblock, block, dir;
|
|
int scanlen = mtd->writesize + mtd->oobsize;
|
|
@@ -618,7 +618,7 @@ static int write_bbt(struct mtd_info *mt
|
|
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
|
|
int chipsel)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
struct erase_info einfo;
|
|
int i, res, chip = 0;
|
|
int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
|
|
@@ -819,7 +819,7 @@ static int write_bbt(struct mtd_info *mt
|
|
*/
|
|
static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
|
|
return create_bbt(mtd, this->buffers->databuf, bd, -1);
|
|
}
|
|
@@ -838,7 +838,7 @@ static inline int nand_memory_bbt(struct
|
|
static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
|
|
{
|
|
int i, chips, writeops, create, chipsel, res, res2;
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
struct nand_bbt_descr *td = this->bbt_td;
|
|
struct nand_bbt_descr *md = this->bbt_md;
|
|
struct nand_bbt_descr *rd, *rd2;
|
|
@@ -962,7 +962,7 @@ static int check_create(struct mtd_info
|
|
*/
|
|
static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int i, j, chips, block, nrblocks, update;
|
|
uint8_t oldval;
|
|
|
|
@@ -1022,7 +1022,7 @@ static void mark_bbt_region(struct mtd_i
|
|
*/
|
|
static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
u32 pattern_len;
|
|
u32 bits;
|
|
u32 table_size;
|
|
@@ -1074,7 +1074,7 @@ static void verify_bbt_descr(struct mtd_
|
|
*/
|
|
static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int len, res;
|
|
uint8_t *buf;
|
|
struct nand_bbt_descr *td = this->bbt_td;
|
|
@@ -1147,7 +1147,7 @@ err:
|
|
*/
|
|
static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int len, res = 0;
|
|
int chip, chipsel;
|
|
uint8_t *buf;
|
|
@@ -1281,7 +1281,7 @@ static int nand_create_badblock_pattern(
|
|
*/
|
|
int nand_default_bbt(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int ret;
|
|
|
|
/* Is a flash based bad block table requested? */
|
|
@@ -1317,7 +1317,7 @@ int nand_default_bbt(struct mtd_info *mt
|
|
*/
|
|
int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int block;
|
|
|
|
block = (int)(offs >> this->bbt_erase_shift);
|
|
@@ -1332,7 +1332,7 @@ int nand_isreserved_bbt(struct mtd_info
|
|
*/
|
|
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int block, res;
|
|
|
|
block = (int)(offs >> this->bbt_erase_shift);
|
|
@@ -1359,7 +1359,7 @@ int nand_isbad_bbt(struct mtd_info *mtd,
|
|
*/
|
|
int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
|
|
{
|
|
- struct nand_chip *this = mtd->priv;
|
|
+ struct nand_chip *this = mtd_to_nand(mtd);
|
|
int block, ret = 0;
|
|
|
|
block = (int)(offs >> this->bbt_erase_shift);
|
|
@@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mt
|
|
|
|
return ret;
|
|
}
|
|
-
|
|
-EXPORT_SYMBOL(nand_scan_bbt);
|
|
--- a/drivers/mtd/nand/nand_bch.c
|
|
+++ b/drivers/mtd/nand/nand_bch.c
|
|
@@ -32,13 +32,11 @@
|
|
/**
|
|
* struct nand_bch_control - private NAND BCH control structure
|
|
* @bch: BCH control structure
|
|
- * @ecclayout: private ecc layout for this BCH configuration
|
|
* @errloc: error location array
|
|
* @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
|
|
*/
|
|
struct nand_bch_control {
|
|
struct bch_control *bch;
|
|
- struct nand_ecclayout ecclayout;
|
|
unsigned int *errloc;
|
|
unsigned char *eccmask;
|
|
};
|
|
@@ -52,7 +50,7 @@ struct nand_bch_control {
|
|
int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
|
|
unsigned char *code)
|
|
{
|
|
- const struct nand_chip *chip = mtd->priv;
|
|
+ const struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct nand_bch_control *nbc = chip->ecc.priv;
|
|
unsigned int i;
|
|
|
|
@@ -79,7 +77,7 @@ EXPORT_SYMBOL(nand_bch_calculate_ecc);
|
|
int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
|
|
unsigned char *read_ecc, unsigned char *calc_ecc)
|
|
{
|
|
- const struct nand_chip *chip = mtd->priv;
|
|
+ const struct nand_chip *chip = mtd_to_nand(mtd);
|
|
struct nand_bch_control *nbc = chip->ecc.priv;
|
|
unsigned int *errloc = nbc->errloc;
|
|
int i, count;
|
|
@@ -98,7 +96,7 @@ int nand_bch_correct_data(struct mtd_inf
|
|
}
|
|
} else if (count < 0) {
|
|
printk(KERN_ERR "ecc unrecoverable error\n");
|
|
- count = -1;
|
|
+ count = -EBADMSG;
|
|
}
|
|
return count;
|
|
}
|
|
@@ -107,9 +105,6 @@ EXPORT_SYMBOL(nand_bch_correct_data);
|
|
/**
|
|
* nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
|
|
* @mtd: MTD block structure
|
|
- * @eccsize: ecc block size in bytes
|
|
- * @eccbytes: ecc length in bytes
|
|
- * @ecclayout: output default layout
|
|
*
|
|
* Returns:
|
|
* a pointer to a new NAND BCH control structure, or NULL upon failure
|
|
@@ -123,14 +118,20 @@ EXPORT_SYMBOL(nand_bch_correct_data);
|
|
* @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
|
|
* @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
|
|
*/
|
|
-struct nand_bch_control *
|
|
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
|
|
- struct nand_ecclayout **ecclayout)
|
|
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
|
|
{
|
|
+ struct nand_chip *nand = mtd_to_nand(mtd);
|
|
unsigned int m, t, eccsteps, i;
|
|
- struct nand_ecclayout *layout;
|
|
struct nand_bch_control *nbc = NULL;
|
|
unsigned char *erased_page;
|
|
+ unsigned int eccsize = nand->ecc.size;
|
|
+ unsigned int eccbytes = nand->ecc.bytes;
|
|
+ unsigned int eccstrength = nand->ecc.strength;
|
|
+
|
|
+ if (!eccbytes && eccstrength) {
|
|
+ eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
|
|
+ nand->ecc.bytes = eccbytes;
|
|
+ }
|
|
|
|
if (!eccsize || !eccbytes) {
|
|
printk(KERN_WARNING "ecc parameters not supplied\n");
|
|
@@ -158,7 +159,7 @@ nand_bch_init(struct mtd_info *mtd, unsi
|
|
eccsteps = mtd->writesize/eccsize;
|
|
|
|
/* if no ecc placement scheme was provided, build one */
|
|
- if (!*ecclayout) {
|
|
+ if (!mtd->ooblayout) {
|
|
|
|
/* handle large page devices only */
|
|
if (mtd->oobsize < 64) {
|
|
@@ -167,24 +168,7 @@ nand_bch_init(struct mtd_info *mtd, unsi
|
|
goto fail;
|
|
}
|
|
|
|
- layout = &nbc->ecclayout;
|
|
- layout->eccbytes = eccsteps*eccbytes;
|
|
-
|
|
- /* reserve 2 bytes for bad block marker */
|
|
- if (layout->eccbytes+2 > mtd->oobsize) {
|
|
- printk(KERN_WARNING "no suitable oob scheme available "
|
|
- "for oobsize %d eccbytes %u\n", mtd->oobsize,
|
|
- eccbytes);
|
|
- goto fail;
|
|
- }
|
|
- /* put ecc bytes at oob tail */
|
|
- for (i = 0; i < layout->eccbytes; i++)
|
|
- layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
|
|
-
|
|
- layout->oobfree[0].offset = 2;
|
|
- layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
|
|
-
|
|
- *ecclayout = layout;
|
|
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
|
|
}
|
|
|
|
/* sanity checks */
|
|
@@ -192,7 +176,8 @@ nand_bch_init(struct mtd_info *mtd, unsi
|
|
printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
|
|
goto fail;
|
|
}
|
|
- if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
|
|
+
|
|
+ if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
|
|
printk(KERN_WARNING "invalid ecc layout\n");
|
|
goto fail;
|
|
}
|
|
@@ -216,6 +201,9 @@ nand_bch_init(struct mtd_info *mtd, unsi
|
|
for (i = 0; i < eccbytes; i++)
|
|
nbc->eccmask[i] ^= 0xff;
|
|
|
|
+ if (!eccstrength)
|
|
+ nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
|
|
+
|
|
return nbc;
|
|
fail:
|
|
nand_bch_free(nbc);
|
|
--- a/drivers/mtd/nand/nand_ecc.c
|
|
+++ b/drivers/mtd/nand/nand_ecc.c
|
|
@@ -424,7 +424,7 @@ int nand_calculate_ecc(struct mtd_info *
|
|
unsigned char *code)
|
|
{
|
|
__nand_calculate_ecc(buf,
|
|
- ((struct nand_chip *)mtd->priv)->ecc.size, code);
|
|
+ mtd_to_nand(mtd)->ecc.size, code);
|
|
|
|
return 0;
|
|
}
|
|
@@ -524,7 +524,7 @@ int nand_correct_data(struct mtd_info *m
|
|
unsigned char *read_ecc, unsigned char *calc_ecc)
|
|
{
|
|
return __nand_correct_data(buf, read_ecc, calc_ecc,
|
|
- ((struct nand_chip *)mtd->priv)->ecc.size);
|
|
+ mtd_to_nand(mtd)->ecc.size);
|
|
}
|
|
EXPORT_SYMBOL(nand_correct_data);
|
|
|
|
--- a/drivers/mtd/nand/nand_ids.c
|
|
+++ b/drivers/mtd/nand/nand_ids.c
|
|
@@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] =
|
|
SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
|
|
{"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
|
|
{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
|
|
- SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
|
|
- 4 },
|
|
+ SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
|
|
+ NAND_ECC_INFO(40, SZ_1K), 4 },
|
|
|
|
LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
|
|
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
|
|
--- a/drivers/mtd/nand/nandsim.c
|
|
+++ b/drivers/mtd/nand/nandsim.c
|
|
@@ -666,8 +666,8 @@ static char *get_partition_name(int i)
|
|
*/
|
|
static int init_nandsim(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
- struct nandsim *ns = chip->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nandsim *ns = nand_get_controller_data(chip);
|
|
int i, ret = 0;
|
|
uint64_t remains;
|
|
uint64_t next_offset;
|
|
@@ -1908,7 +1908,8 @@ static void switch_state(struct nandsim
|
|
|
|
static u_char ns_nand_read_byte(struct mtd_info *mtd)
|
|
{
|
|
- struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nandsim *ns = nand_get_controller_data(chip);
|
|
u_char outb = 0x00;
|
|
|
|
/* Sanity and correctness checks */
|
|
@@ -1969,7 +1970,8 @@ static u_char ns_nand_read_byte(struct m
|
|
|
|
static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
|
|
{
|
|
- struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nandsim *ns = nand_get_controller_data(chip);
|
|
|
|
/* Sanity and correctness checks */
|
|
if (!ns->lines.ce) {
|
|
@@ -2123,7 +2125,8 @@ static void ns_nand_write_byte(struct mt
|
|
|
|
static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
|
|
{
|
|
- struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nandsim *ns = nand_get_controller_data(chip);
|
|
|
|
ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
|
|
ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
|
|
@@ -2141,7 +2144,7 @@ static int ns_device_ready(struct mtd_in
|
|
|
|
static uint16_t ns_nand_read_word(struct mtd_info *mtd)
|
|
{
|
|
- struct nand_chip *chip = (struct nand_chip *)mtd->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
|
|
NS_DBG("read_word\n");
|
|
|
|
@@ -2150,7 +2153,8 @@ static uint16_t ns_nand_read_word(struct
|
|
|
|
static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
|
|
{
|
|
- struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nandsim *ns = nand_get_controller_data(chip);
|
|
|
|
/* Check that chip is expecting data input */
|
|
if (!(ns->state & STATE_DATAIN_MASK)) {
|
|
@@ -2177,7 +2181,8 @@ static void ns_nand_write_buf(struct mtd
|
|
|
|
static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
|
|
{
|
|
- struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(mtd);
|
|
+ struct nandsim *ns = nand_get_controller_data(chip);
|
|
|
|
/* Sanity and correctness checks */
|
|
if (!ns->lines.ce) {
|
|
@@ -2198,7 +2203,7 @@ static void ns_nand_read_buf(struct mtd_
|
|
int i;
|
|
|
|
for (i = 0; i < len; i++)
|
|
- buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
|
|
+ buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
|
|
|
|
return;
|
|
}
|
|
@@ -2236,16 +2241,15 @@ static int __init ns_init_module(void)
|
|
}
|
|
|
|
/* Allocate and initialize mtd_info, nand_chip and nandsim structures */
|
|
- nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
|
|
- + sizeof(struct nandsim), GFP_KERNEL);
|
|
- if (!nsmtd) {
|
|
+ chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
|
|
+ GFP_KERNEL);
|
|
+ if (!chip) {
|
|
NS_ERR("unable to allocate core structures.\n");
|
|
return -ENOMEM;
|
|
}
|
|
- chip = (struct nand_chip *)(nsmtd + 1);
|
|
- nsmtd->priv = (void *)chip;
|
|
+ nsmtd = nand_to_mtd(chip);
|
|
nand = (struct nandsim *)(chip + 1);
|
|
- chip->priv = (void *)nand;
|
|
+ nand_set_controller_data(chip, (void *)nand);
|
|
|
|
/*
|
|
* Register simulator's callbacks.
|
|
@@ -2257,6 +2261,7 @@ static int __init ns_init_module(void)
|
|
chip->read_buf = ns_nand_read_buf;
|
|
chip->read_word = ns_nand_read_word;
|
|
chip->ecc.mode = NAND_ECC_SOFT;
|
|
+ chip->ecc.algo = NAND_ECC_HAMMING;
|
|
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
|
|
/* and 'badblocks' parameters to work */
|
|
chip->options |= NAND_SKIP_BBTSCAN;
|
|
@@ -2335,6 +2340,7 @@ static int __init ns_init_module(void)
|
|
goto error;
|
|
}
|
|
chip->ecc.mode = NAND_ECC_SOFT_BCH;
|
|
+ chip->ecc.algo = NAND_ECC_BCH;
|
|
chip->ecc.size = 512;
|
|
chip->ecc.strength = bch;
|
|
chip->ecc.bytes = eccbytes;
|
|
@@ -2392,7 +2398,7 @@ err_exit:
|
|
for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
|
|
kfree(nand->partitions[i].name);
|
|
error:
|
|
- kfree(nsmtd);
|
|
+ kfree(chip);
|
|
free_lists();
|
|
|
|
return retval;
|
|
@@ -2405,7 +2411,8 @@ module_init(ns_init_module);
|
|
*/
|
|
static void __exit ns_cleanup_module(void)
|
|
{
|
|
- struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
|
|
+ struct nand_chip *chip = mtd_to_nand(nsmtd);
|
|
+ struct nandsim *ns = nand_get_controller_data(chip);
|
|
int i;
|
|
|
|
nandsim_debugfs_remove(ns);
|
|
@@ -2413,7 +2420,7 @@ static void __exit ns_cleanup_module(voi
|
|
nand_release(nsmtd); /* Unregister driver */
|
|
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
|
|
kfree(ns->partitions[i].name);
|
|
- kfree(nsmtd); /* Free other structures */
|
|
+ kfree(mtd_to_nand(nsmtd)); /* Free other structures */
|
|
free_lists();
|
|
}
|
|
|
|
--- a/drivers/mtd/ofpart.c
|
|
+++ b/drivers/mtd/ofpart.c
|
|
@@ -26,9 +26,10 @@ static bool node_has_compatible(struct d
|
|
}
|
|
|
|
static int parse_ofpart_partitions(struct mtd_info *master,
|
|
- struct mtd_partition **pparts,
|
|
+ const struct mtd_partition **pparts,
|
|
struct mtd_part_parser_data *data)
|
|
{
|
|
+ struct mtd_partition *parts;
|
|
struct device_node *mtd_node;
|
|
struct device_node *ofpart_node;
|
|
const char *partname;
|
|
@@ -37,10 +38,8 @@ static int parse_ofpart_partitions(struc
|
|
bool dedicated = true;
|
|
|
|
|
|
- if (!data)
|
|
- return 0;
|
|
-
|
|
- mtd_node = data->of_node;
|
|
+ /* Pull of_node from the master device node */
|
|
+ mtd_node = mtd_get_of_node(master);
|
|
if (!mtd_node)
|
|
return 0;
|
|
|
|
@@ -72,8 +71,8 @@ static int parse_ofpart_partitions(struc
|
|
if (nr_parts == 0)
|
|
return 0;
|
|
|
|
- *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
|
|
- if (!*pparts)
|
|
+ parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
|
|
+ if (!parts)
|
|
return -ENOMEM;
|
|
|
|
i = 0;
|
|
@@ -107,19 +106,19 @@ static int parse_ofpart_partitions(struc
|
|
goto ofpart_fail;
|
|
}
|
|
|
|
- (*pparts)[i].offset = of_read_number(reg, a_cells);
|
|
- (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
|
|
+ parts[i].offset = of_read_number(reg, a_cells);
|
|
+ parts[i].size = of_read_number(reg + a_cells, s_cells);
|
|
|
|
partname = of_get_property(pp, "label", &len);
|
|
if (!partname)
|
|
partname = of_get_property(pp, "name", &len);
|
|
- (*pparts)[i].name = partname;
|
|
+ parts[i].name = partname;
|
|
|
|
if (of_get_property(pp, "read-only", &len))
|
|
- (*pparts)[i].mask_flags |= MTD_WRITEABLE;
|
|
+ parts[i].mask_flags |= MTD_WRITEABLE;
|
|
|
|
if (of_get_property(pp, "lock", &len))
|
|
- (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
|
|
+ parts[i].mask_flags |= MTD_POWERUP_LOCK;
|
|
|
|
i++;
|
|
}
|
|
@@ -127,6 +126,7 @@ static int parse_ofpart_partitions(struc
|
|
if (!nr_parts)
|
|
goto ofpart_none;
|
|
|
|
+ *pparts = parts;
|
|
return nr_parts;
|
|
|
|
ofpart_fail:
|
|
@@ -135,21 +135,20 @@ ofpart_fail:
|
|
ret = -EINVAL;
|
|
ofpart_none:
|
|
of_node_put(pp);
|
|
- kfree(*pparts);
|
|
- *pparts = NULL;
|
|
+ kfree(parts);
|
|
return ret;
|
|
}
|
|
|
|
static struct mtd_part_parser ofpart_parser = {
|
|
- .owner = THIS_MODULE,
|
|
.parse_fn = parse_ofpart_partitions,
|
|
.name = "ofpart",
|
|
};
|
|
|
|
static int parse_ofoldpart_partitions(struct mtd_info *master,
|
|
- struct mtd_partition **pparts,
|
|
+ const struct mtd_partition **pparts,
|
|
struct mtd_part_parser_data *data)
|
|
{
|
|
+ struct mtd_partition *parts;
|
|
struct device_node *dp;
|
|
int i, plen, nr_parts;
|
|
const struct {
|
|
@@ -157,10 +156,8 @@ static int parse_ofoldpart_partitions(st
|
|
} *part;
|
|
const char *names;
|
|
|
|
- if (!data)
|
|
- return 0;
|
|
-
|
|
- dp = data->of_node;
|
|
+ /* Pull of_node from the master device node */
|
|
+ dp = mtd_get_of_node(master);
|
|
if (!dp)
|
|
return 0;
|
|
|
|
@@ -173,37 +170,37 @@ static int parse_ofoldpart_partitions(st
|
|
|
|
nr_parts = plen / sizeof(part[0]);
|
|
|
|
- *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
|
|
- if (!*pparts)
|
|
+ parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
|
|
+ if (!parts)
|
|
return -ENOMEM;
|
|
|
|
names = of_get_property(dp, "partition-names", &plen);
|
|
|
|
for (i = 0; i < nr_parts; i++) {
|
|
- (*pparts)[i].offset = be32_to_cpu(part->offset);
|
|
- (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
|
|
+ parts[i].offset = be32_to_cpu(part->offset);
|
|
+ parts[i].size = be32_to_cpu(part->len) & ~1;
|
|
/* bit 0 set signifies read only partition */
|
|
if (be32_to_cpu(part->len) & 1)
|
|
- (*pparts)[i].mask_flags = MTD_WRITEABLE;
|
|
+ parts[i].mask_flags = MTD_WRITEABLE;
|
|
|
|
if (names && (plen > 0)) {
|
|
int len = strlen(names) + 1;
|
|
|
|
- (*pparts)[i].name = names;
|
|
+ parts[i].name = names;
|
|
plen -= len;
|
|
names += len;
|
|
} else {
|
|
- (*pparts)[i].name = "unnamed";
|
|
+ parts[i].name = "unnamed";
|
|
}
|
|
|
|
part++;
|
|
}
|
|
|
|
+ *pparts = parts;
|
|
return nr_parts;
|
|
}
|
|
|
|
static struct mtd_part_parser ofoldpart_parser = {
|
|
- .owner = THIS_MODULE,
|
|
.parse_fn = parse_ofoldpart_partitions,
|
|
.name = "ofoldpart",
|
|
};
|
|
--- a/drivers/mtd/spi-nor/Kconfig
|
|
+++ b/drivers/mtd/spi-nor/Kconfig
|
|
@@ -7,6 +7,14 @@ menuconfig MTD_SPI_NOR
|
|
|
|
if MTD_SPI_NOR
|
|
|
|
+config MTD_MT81xx_NOR
|
|
+ tristate "Mediatek MT81xx SPI NOR flash controller"
|
|
+ depends on HAS_IOMEM
|
|
+ help
|
|
+ This enables access to SPI NOR flash, using MT81xx SPI NOR flash
|
|
+ controller. This controller does not support generic SPI BUS, it only
|
|
+ supports SPI NOR Flash.
|
|
+
|
|
config MTD_SPI_NOR_USE_4K_SECTORS
|
|
bool "Use small 4096 B erase sectors"
|
|
default y
|
|
@@ -23,7 +31,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS
|
|
|
|
config SPI_FSL_QUADSPI
|
|
tristate "Freescale Quad SPI controller"
|
|
- depends on ARCH_MXC || COMPILE_TEST
|
|
+ depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
|
|
depends on HAS_IOMEM
|
|
help
|
|
This enables support for the Quad SPI controller in master mode.
|
|
--- a/drivers/mtd/spi-nor/Makefile
|
|
+++ b/drivers/mtd/spi-nor/Makefile
|
|
@@ -1,3 +1,4 @@
|
|
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
|
|
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
|
|
+obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
|
|
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
|
|
--- /dev/null
|
|
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
|
|
@@ -0,0 +1,485 @@
|
|
+/*
|
|
+ * Copyright (c) 2015 MediaTek Inc.
|
|
+ * Author: Bayi Cheng <bayi.cheng@mediatek.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ */
|
|
+
|
|
+#include <linux/clk.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/iopoll.h>
|
|
+#include <linux/ioport.h>
|
|
+#include <linux/math64.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/pinctrl/consumer.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <linux/mtd/partitions.h>
|
|
+#include <linux/mtd/spi-nor.h>
|
|
+
|
|
+#define MTK_NOR_CMD_REG 0x00
|
|
+#define MTK_NOR_CNT_REG 0x04
|
|
+#define MTK_NOR_RDSR_REG 0x08
|
|
+#define MTK_NOR_RDATA_REG 0x0c
|
|
+#define MTK_NOR_RADR0_REG 0x10
|
|
+#define MTK_NOR_RADR1_REG 0x14
|
|
+#define MTK_NOR_RADR2_REG 0x18
|
|
+#define MTK_NOR_WDATA_REG 0x1c
|
|
+#define MTK_NOR_PRGDATA0_REG 0x20
|
|
+#define MTK_NOR_PRGDATA1_REG 0x24
|
|
+#define MTK_NOR_PRGDATA2_REG 0x28
|
|
+#define MTK_NOR_PRGDATA3_REG 0x2c
|
|
+#define MTK_NOR_PRGDATA4_REG 0x30
|
|
+#define MTK_NOR_PRGDATA5_REG 0x34
|
|
+#define MTK_NOR_SHREG0_REG 0x38
|
|
+#define MTK_NOR_SHREG1_REG 0x3c
|
|
+#define MTK_NOR_SHREG2_REG 0x40
|
|
+#define MTK_NOR_SHREG3_REG 0x44
|
|
+#define MTK_NOR_SHREG4_REG 0x48
|
|
+#define MTK_NOR_SHREG5_REG 0x4c
|
|
+#define MTK_NOR_SHREG6_REG 0x50
|
|
+#define MTK_NOR_SHREG7_REG 0x54
|
|
+#define MTK_NOR_SHREG8_REG 0x58
|
|
+#define MTK_NOR_SHREG9_REG 0x5c
|
|
+#define MTK_NOR_CFG1_REG 0x60
|
|
+#define MTK_NOR_CFG2_REG 0x64
|
|
+#define MTK_NOR_CFG3_REG 0x68
|
|
+#define MTK_NOR_STATUS0_REG 0x70
|
|
+#define MTK_NOR_STATUS1_REG 0x74
|
|
+#define MTK_NOR_STATUS2_REG 0x78
|
|
+#define MTK_NOR_STATUS3_REG 0x7c
|
|
+#define MTK_NOR_FLHCFG_REG 0x84
|
|
+#define MTK_NOR_TIME_REG 0x94
|
|
+#define MTK_NOR_PP_DATA_REG 0x98
|
|
+#define MTK_NOR_PREBUF_STUS_REG 0x9c
|
|
+#define MTK_NOR_DELSEL0_REG 0xa0
|
|
+#define MTK_NOR_DELSEL1_REG 0xa4
|
|
+#define MTK_NOR_INTRSTUS_REG 0xa8
|
|
+#define MTK_NOR_INTREN_REG 0xac
|
|
+#define MTK_NOR_CHKSUM_CTL_REG 0xb8
|
|
+#define MTK_NOR_CHKSUM_REG 0xbc
|
|
+#define MTK_NOR_CMD2_REG 0xc0
|
|
+#define MTK_NOR_WRPROT_REG 0xc4
|
|
+#define MTK_NOR_RADR3_REG 0xc8
|
|
+#define MTK_NOR_DUAL_REG 0xcc
|
|
+#define MTK_NOR_DELSEL2_REG 0xd0
|
|
+#define MTK_NOR_DELSEL3_REG 0xd4
|
|
+#define MTK_NOR_DELSEL4_REG 0xd8
|
|
+
|
|
+/* commands for mtk nor controller */
|
|
+#define MTK_NOR_READ_CMD 0x0
|
|
+#define MTK_NOR_RDSR_CMD 0x2
|
|
+#define MTK_NOR_PRG_CMD 0x4
|
|
+#define MTK_NOR_WR_CMD 0x10
|
|
+#define MTK_NOR_PIO_WR_CMD 0x90
|
|
+#define MTK_NOR_WRSR_CMD 0x20
|
|
+#define MTK_NOR_PIO_READ_CMD 0x81
|
|
+#define MTK_NOR_WR_BUF_ENABLE 0x1
|
|
+#define MTK_NOR_WR_BUF_DISABLE 0x0
|
|
+#define MTK_NOR_ENABLE_SF_CMD 0x30
|
|
+#define MTK_NOR_DUAD_ADDR_EN 0x8
|
|
+#define MTK_NOR_QUAD_READ_EN 0x4
|
|
+#define MTK_NOR_DUAL_ADDR_EN 0x2
|
|
+#define MTK_NOR_DUAL_READ_EN 0x1
|
|
+#define MTK_NOR_DUAL_DISABLE 0x0
|
|
+#define MTK_NOR_FAST_READ 0x1
|
|
+
|
|
+#define SFLASH_WRBUF_SIZE 128
|
|
+
|
|
+/* Can shift up to 48 bits (6 bytes) of TX/RX */
|
|
+#define MTK_NOR_MAX_RX_TX_SHIFT 6
|
|
+/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
|
|
+#define MTK_NOR_MAX_SHIFT 7
|
|
+
|
|
+/* Helpers for accessing the program data / shift data registers */
|
|
+#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
|
|
+#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
|
|
+
|
|
+struct mt8173_nor {
|
|
+ struct spi_nor nor;
|
|
+ struct device *dev;
|
|
+ void __iomem *base; /* nor flash base address */
|
|
+ struct clk *spi_clk;
|
|
+ struct clk *nor_clk;
|
|
+};
|
|
+
|
|
+static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
|
|
+{
|
|
+ struct spi_nor *nor = &mt8173_nor->nor;
|
|
+
|
|
+ switch (nor->flash_read) {
|
|
+ case SPI_NOR_FAST:
|
|
+ writeb(nor->read_opcode, mt8173_nor->base +
|
|
+ MTK_NOR_PRGDATA3_REG);
|
|
+ writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
|
|
+ MTK_NOR_CFG1_REG);
|
|
+ break;
|
|
+ case SPI_NOR_DUAL:
|
|
+ writeb(nor->read_opcode, mt8173_nor->base +
|
|
+ MTK_NOR_PRGDATA3_REG);
|
|
+ writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
|
|
+ MTK_NOR_DUAL_REG);
|
|
+ break;
|
|
+ case SPI_NOR_QUAD:
|
|
+ writeb(nor->read_opcode, mt8173_nor->base +
|
|
+ MTK_NOR_PRGDATA4_REG);
|
|
+ writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
|
|
+ MTK_NOR_DUAL_REG);
|
|
+ break;
|
|
+ default:
|
|
+ writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
|
|
+ MTK_NOR_DUAL_REG);
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
|
|
+{
|
|
+ int reg;
|
|
+ u8 val = cmdval & 0x1f;
|
|
+
|
|
+ writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
|
|
+ return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
|
|
+ !(reg & val), 100, 10000);
|
|
+}
|
|
+
|
|
+static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
|
|
+ u8 *tx, int txlen, u8 *rx, int rxlen)
|
|
+{
|
|
+ int len = 1 + txlen + rxlen;
|
|
+ int i, ret, idx;
|
|
+
|
|
+ if (len > MTK_NOR_MAX_SHIFT)
|
|
+ return -EINVAL;
|
|
+
|
|
+ writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
|
|
+
|
|
+ /* start at PRGDATA5, go down to PRGDATA0 */
|
|
+ idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
|
|
+
|
|
+ /* opcode */
|
|
+ writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
|
|
+ idx--;
|
|
+
|
|
+ /* program TX data */
|
|
+ for (i = 0; i < txlen; i++, idx--)
|
|
+ writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
|
|
+
|
|
+ /* clear out rest of TX registers */
|
|
+ while (idx >= 0) {
|
|
+ writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
|
|
+ idx--;
|
|
+ }
|
|
+
|
|
+ ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* restart at first RX byte */
|
|
+ idx = rxlen - 1;
|
|
+
|
|
+ /* read out RX data */
|
|
+ for (i = 0; i < rxlen; i++, idx--)
|
|
+ rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Do a WRSR (Write Status Register) command */
|
|
+static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
|
|
+{
|
|
+ writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
|
|
+ writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
|
|
+ return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
|
|
+}
|
|
+
|
|
+static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
|
|
+{
|
|
+ u8 reg;
|
|
+
|
|
+ /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer
|
|
+ * 0: pre-fetch buffer use for read
|
|
+ * 1: pre-fetch buffer use for page program
|
|
+ */
|
|
+ writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
|
|
+ return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
|
|
+ 0x01 == (reg & 0x01), 100, 10000);
|
|
+}
|
|
+
|
|
+static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
|
|
+{
|
|
+ u8 reg;
|
|
+
|
|
+ writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
|
|
+ return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
|
|
+ MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
|
|
+ 10000);
|
|
+}
|
|
+
|
|
+static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < 3; i++) {
|
|
+ writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
|
|
+ addr >>= 8;
|
|
+ }
|
|
+ /* Last register is non-contiguous */
|
|
+ writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
|
|
+}
|
|
+
|
|
+static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
|
|
+ size_t *retlen, u_char *buffer)
|
|
+{
|
|
+ int i, ret;
|
|
+ int addr = (int)from;
|
|
+ u8 *buf = (u8 *)buffer;
|
|
+ struct mt8173_nor *mt8173_nor = nor->priv;
|
|
+
|
|
+ /* set mode for fast read mode ,dual mode or quad mode */
|
|
+ mt8173_nor_set_read_mode(mt8173_nor);
|
|
+ mt8173_nor_set_addr(mt8173_nor, addr);
|
|
+
|
|
+ for (i = 0; i < length; i++, (*retlen)++) {
|
|
+ ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
|
|
+ int addr, int length, u8 *data)
|
|
+{
|
|
+ int i, ret;
|
|
+
|
|
+ mt8173_nor_set_addr(mt8173_nor, addr);
|
|
+
|
|
+ for (i = 0; i < length; i++) {
|
|
+ writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
|
|
+ ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
|
|
+ const u8 *buf)
|
|
+{
|
|
+ int i, bufidx, data;
|
|
+
|
|
+ mt8173_nor_set_addr(mt8173_nor, addr);
|
|
+
|
|
+ bufidx = 0;
|
|
+ for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
|
|
+ data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
|
|
+ buf[bufidx + 1]<<8 | buf[bufidx];
|
|
+ bufidx += 4;
|
|
+ writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
|
|
+ }
|
|
+ return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
|
|
+}
|
|
+
|
|
+static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
|
|
+ size_t *retlen, const u_char *buf)
|
|
+{
|
|
+ int ret;
|
|
+ struct mt8173_nor *mt8173_nor = nor->priv;
|
|
+
|
|
+ ret = mt8173_nor_write_buffer_enable(mt8173_nor);
|
|
+ if (ret < 0)
|
|
+ dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
|
|
+
|
|
+ while (len >= SFLASH_WRBUF_SIZE) {
|
|
+ ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
|
|
+ if (ret < 0)
|
|
+ dev_err(mt8173_nor->dev, "write buffer failed!\n");
|
|
+ len -= SFLASH_WRBUF_SIZE;
|
|
+ to += SFLASH_WRBUF_SIZE;
|
|
+ buf += SFLASH_WRBUF_SIZE;
|
|
+ (*retlen) += SFLASH_WRBUF_SIZE;
|
|
+ }
|
|
+ ret = mt8173_nor_write_buffer_disable(mt8173_nor);
|
|
+ if (ret < 0)
|
|
+ dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
|
|
+
|
|
+ if (len) {
|
|
+ ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len,
|
|
+ (u8 *)buf);
|
|
+ if (ret < 0)
|
|
+ dev_err(mt8173_nor->dev, "write single byte failed!\n");
|
|
+ (*retlen) += len;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
|
|
+{
|
|
+ int ret;
|
|
+ struct mt8173_nor *mt8173_nor = nor->priv;
|
|
+
|
|
+ switch (opcode) {
|
|
+ case SPINOR_OP_RDSR:
|
|
+ ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ if (len == 1)
|
|
+ *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
|
|
+ else
|
|
+ dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
|
|
+ break;
|
|
+ default:
|
|
+ ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
|
|
+ break;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
|
|
+ int len)
|
|
+{
|
|
+ int ret;
|
|
+ struct mt8173_nor *mt8173_nor = nor->priv;
|
|
+
|
|
+ switch (opcode) {
|
|
+ case SPINOR_OP_WRSR:
|
|
+ /* We only handle 1 byte */
|
|
+ ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
|
|
+ break;
|
|
+ default:
|
|
+ ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
|
|
+ if (ret)
|
|
+ dev_warn(mt8173_nor->dev, "write reg failure!\n");
|
|
+ break;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
|
|
+ struct device_node *flash_node)
|
|
+{
|
|
+ int ret;
|
|
+ struct spi_nor *nor;
|
|
+
|
|
+ /* initialize controller to accept commands */
|
|
+ writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
|
|
+
|
|
+ nor = &mt8173_nor->nor;
|
|
+ nor->dev = mt8173_nor->dev;
|
|
+ nor->priv = mt8173_nor;
|
|
+ spi_nor_set_flash_node(nor, flash_node);
|
|
+
|
|
+ /* fill the hooks to spi nor */
|
|
+ nor->read = mt8173_nor_read;
|
|
+ nor->read_reg = mt8173_nor_read_reg;
|
|
+ nor->write = mt8173_nor_write;
|
|
+ nor->write_reg = mt8173_nor_write_reg;
|
|
+ nor->mtd.name = "mtk_nor";
|
|
+ /* initialized with NULL */
|
|
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_DUAL);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return mtd_device_register(&nor->mtd, NULL, 0);
|
|
+}
|
|
+
|
|
+static int mtk_nor_drv_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device_node *flash_np;
|
|
+ struct resource *res;
|
|
+ int ret;
|
|
+ struct mt8173_nor *mt8173_nor;
|
|
+
|
|
+ if (!pdev->dev.of_node) {
|
|
+ dev_err(&pdev->dev, "No DT found\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
|
|
+ if (!mt8173_nor)
|
|
+ return -ENOMEM;
|
|
+ platform_set_drvdata(pdev, mt8173_nor);
|
|
+
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
|
|
+ if (IS_ERR(mt8173_nor->base))
|
|
+ return PTR_ERR(mt8173_nor->base);
|
|
+
|
|
+ mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
|
|
+ if (IS_ERR(mt8173_nor->spi_clk))
|
|
+ return PTR_ERR(mt8173_nor->spi_clk);
|
|
+
|
|
+ mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
|
|
+ if (IS_ERR(mt8173_nor->nor_clk))
|
|
+ return PTR_ERR(mt8173_nor->nor_clk);
|
|
+
|
|
+ mt8173_nor->dev = &pdev->dev;
|
|
+ ret = clk_prepare_enable(mt8173_nor->spi_clk);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = clk_prepare_enable(mt8173_nor->nor_clk);
|
|
+ if (ret) {
|
|
+ clk_disable_unprepare(mt8173_nor->spi_clk);
|
|
+ return ret;
|
|
+ }
|
|
+ /* only support one attached flash */
|
|
+ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
|
|
+ if (!flash_np) {
|
|
+ dev_err(&pdev->dev, "no SPI flash device to configure\n");
|
|
+ ret = -ENODEV;
|
|
+ goto nor_free;
|
|
+ }
|
|
+ ret = mtk_nor_init(mt8173_nor, flash_np);
|
|
+
|
|
+nor_free:
|
|
+ if (ret) {
|
|
+ clk_disable_unprepare(mt8173_nor->spi_clk);
|
|
+ clk_disable_unprepare(mt8173_nor->nor_clk);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mtk_nor_drv_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
|
|
+
|
|
+ clk_disable_unprepare(mt8173_nor->spi_clk);
|
|
+ clk_disable_unprepare(mt8173_nor->nor_clk);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id mtk_nor_of_ids[] = {
|
|
+ { .compatible = "mediatek,mt8173-nor"},
|
|
+ { /* sentinel */ }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, mtk_nor_of_ids);
|
|
+
|
|
+static struct platform_driver mtk_nor_driver = {
|
|
+ .probe = mtk_nor_drv_probe,
|
|
+ .remove = mtk_nor_drv_remove,
|
|
+ .driver = {
|
|
+ .name = "mtk-nor",
|
|
+ .of_match_table = mtk_nor_of_ids,
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(mtk_nor_driver);
|
|
+MODULE_LICENSE("GPL v2");
|
|
+MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
|
|
--- a/drivers/mtd/spi-nor/spi-nor.c
|
|
+++ b/drivers/mtd/spi-nor/spi-nor.c
|
|
@@ -38,6 +38,7 @@
|
|
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
|
|
|
|
#define SPI_NOR_MAX_ID_LEN 6
|
|
+#define SPI_NOR_MAX_ADDR_WIDTH 4
|
|
|
|
struct flash_info {
|
|
char *name;
|
|
@@ -60,15 +61,20 @@ struct flash_info {
|
|
u16 addr_width;
|
|
|
|
u16 flags;
|
|
-#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
|
|
-#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
|
|
-#define SST_WRITE 0x04 /* use SST byte programming */
|
|
-#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
|
|
-#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
|
|
-#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
|
|
-#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
|
|
-#define USE_FSR 0x80 /* use flag status register */
|
|
-#define SPI_NOR_HAS_LOCK 0x100 /* Flash supports lock/unlock via SR */
|
|
+#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
|
|
+#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
|
|
+#define SST_WRITE BIT(2) /* use SST byte programming */
|
|
+#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
|
|
+#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
|
|
+#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
|
|
+#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
|
|
+#define USE_FSR BIT(7) /* use flag status register */
|
|
+#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
|
|
+#define SPI_NOR_HAS_TB BIT(9) /*
|
|
+ * Flash SR has Top/Bottom (TB) protect
|
|
+ * bit. Must be used with
|
|
+ * SPI_NOR_HAS_LOCK.
|
|
+ */
|
|
};
|
|
|
|
#define JEDEC_MFR(info) ((info)->id[0])
|
|
@@ -314,6 +320,29 @@ static void spi_nor_unlock_and_unprep(st
|
|
}
|
|
|
|
/*
|
|
+ * Initiate the erasure of a single sector
|
|
+ */
|
|
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
|
|
+{
|
|
+ u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
|
|
+ int i;
|
|
+
|
|
+ if (nor->erase)
|
|
+ return nor->erase(nor, addr);
|
|
+
|
|
+ /*
|
|
+ * Default implementation, if driver doesn't have a specialized HW
|
|
+ * control
|
|
+ */
|
|
+ for (i = nor->addr_width - 1; i >= 0; i--) {
|
|
+ buf[i] = addr & 0xff;
|
|
+ addr >>= 8;
|
|
+ }
|
|
+
|
|
+ return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
|
|
+}
|
|
+
|
|
+/*
|
|
* Erase an address range on the nor chip. The address range may extend
|
|
* one or more erase sectors. Return an error is there is a problem erasing.
|
|
*/
|
|
@@ -372,10 +401,9 @@ static int spi_nor_erase(struct mtd_info
|
|
while (len) {
|
|
write_enable(nor);
|
|
|
|
- if (nor->erase(nor, addr)) {
|
|
- ret = -EIO;
|
|
+ ret = spi_nor_erase_sector(nor, addr);
|
|
+ if (ret)
|
|
goto erase_err;
|
|
- }
|
|
|
|
addr += mtd->erasesize;
|
|
len -= mtd->erasesize;
|
|
@@ -388,17 +416,13 @@ static int spi_nor_erase(struct mtd_info
|
|
|
|
write_disable(nor);
|
|
|
|
+erase_err:
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
|
|
|
|
- instr->state = MTD_ERASE_DONE;
|
|
+ instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
|
|
mtd_erase_callback(instr);
|
|
|
|
return ret;
|
|
-
|
|
-erase_err:
|
|
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
|
|
- instr->state = MTD_ERASE_FAILED;
|
|
- return ret;
|
|
}
|
|
|
|
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
|
|
@@ -416,32 +440,58 @@ static void stm_get_locked_range(struct
|
|
} else {
|
|
pow = ((sr & mask) ^ mask) >> shift;
|
|
*len = mtd->size >> pow;
|
|
- *ofs = mtd->size - *len;
|
|
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
|
|
+ *ofs = 0;
|
|
+ else
|
|
+ *ofs = mtd->size - *len;
|
|
}
|
|
}
|
|
|
|
/*
|
|
- * Return 1 if the entire region is locked, 0 otherwise
|
|
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
|
|
+ * @locked is false); 0 otherwise
|
|
*/
|
|
-static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
|
|
- u8 sr)
|
|
+static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
|
|
+ u8 sr, bool locked)
|
|
{
|
|
loff_t lock_offs;
|
|
uint64_t lock_len;
|
|
|
|
+ if (!len)
|
|
+ return 1;
|
|
+
|
|
stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
|
|
|
|
- return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
|
|
+ if (locked)
|
|
+ /* Requested range is a sub-range of locked range */
|
|
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
|
|
+ else
|
|
+ /* Requested range does not overlap with locked range */
|
|
+ return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
|
|
+}
|
|
+
|
|
+static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
|
|
+ u8 sr)
|
|
+{
|
|
+ return stm_check_lock_status_sr(nor, ofs, len, sr, true);
|
|
+}
|
|
+
|
|
+static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
|
|
+ u8 sr)
|
|
+{
|
|
+ return stm_check_lock_status_sr(nor, ofs, len, sr, false);
|
|
}
|
|
|
|
/*
|
|
* Lock a region of the flash. Compatible with ST Micro and similar flash.
|
|
- * Supports only the block protection bits BP{0,1,2} in the status register
|
|
+ * Supports the block protection bits BP{0,1,2} in the status register
|
|
* (SR). Does not support these features found in newer SR bitfields:
|
|
- * - TB: top/bottom protect - only handle TB=0 (top protect)
|
|
* - SEC: sector/block protect - only handle SEC=0 (block protect)
|
|
* - CMP: complement protect - only support CMP=0 (range is not complemented)
|
|
*
|
|
+ * Support for the following is provided conditionally for some flash:
|
|
+ * - TB: top/bottom protect
|
|
+ *
|
|
* Sample table portion for 8MB flash (Winbond w25q64fw):
|
|
*
|
|
* SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
|
|
@@ -454,26 +504,55 @@ static int stm_is_locked_sr(struct spi_n
|
|
* 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
|
|
* 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
|
|
* X | X | 1 | 1 | 1 | 8 MB | ALL
|
|
+ * ------|-------|-------|-------|-------|---------------|-------------------
|
|
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
|
|
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
|
|
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
|
|
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
|
|
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
|
|
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
|
|
*
|
|
* Returns negative on errors, 0 on success.
|
|
*/
|
|
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
|
|
{
|
|
struct mtd_info *mtd = &nor->mtd;
|
|
- u8 status_old, status_new;
|
|
+ int status_old, status_new;
|
|
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
|
|
u8 shift = ffs(mask) - 1, pow, val;
|
|
+ loff_t lock_len;
|
|
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
|
|
+ bool use_top;
|
|
+ int ret;
|
|
|
|
status_old = read_sr(nor);
|
|
+ if (status_old < 0)
|
|
+ return status_old;
|
|
|
|
- /* SPI NOR always locks to the end */
|
|
- if (ofs + len != mtd->size) {
|
|
- /* Does combined region extend to end? */
|
|
- if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
|
|
- status_old))
|
|
- return -EINVAL;
|
|
- len = mtd->size - ofs;
|
|
- }
|
|
+ /* If nothing in our range is unlocked, we don't need to do anything */
|
|
+ if (stm_is_locked_sr(nor, ofs, len, status_old))
|
|
+ return 0;
|
|
+
|
|
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
|
|
+ if (!stm_is_locked_sr(nor, 0, ofs, status_old))
|
|
+ can_be_bottom = false;
|
|
+
|
|
+ /* If anything above us is unlocked, we can't use 'top' protection */
|
|
+ if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
|
|
+ status_old))
|
|
+ can_be_top = false;
|
|
+
|
|
+ if (!can_be_bottom && !can_be_top)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* Prefer top, if both are valid */
|
|
+ use_top = can_be_top;
|
|
+
|
|
+ /* lock_len: length of region that should end up locked */
|
|
+ if (use_top)
|
|
+ lock_len = mtd->size - ofs;
|
|
+ else
|
|
+ lock_len = ofs + len;
|
|
|
|
/*
|
|
* Need smallest pow such that:
|
|
@@ -484,7 +563,7 @@ static int stm_lock(struct spi_nor *nor,
|
|
*
|
|
* pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
|
|
*/
|
|
- pow = ilog2(mtd->size) - ilog2(len);
|
|
+ pow = ilog2(mtd->size) - ilog2(lock_len);
|
|
val = mask - (pow << shift);
|
|
if (val & ~mask)
|
|
return -EINVAL;
|
|
@@ -492,14 +571,27 @@ static int stm_lock(struct spi_nor *nor,
|
|
if (!(val & mask))
|
|
return -EINVAL;
|
|
|
|
- status_new = (status_old & ~mask) | val;
|
|
+ status_new = (status_old & ~mask & ~SR_TB) | val;
|
|
+
|
|
+ /* Disallow further writes if WP pin is asserted */
|
|
+ status_new |= SR_SRWD;
|
|
+
|
|
+ if (!use_top)
|
|
+ status_new |= SR_TB;
|
|
+
|
|
+ /* Don't bother if they're the same */
|
|
+ if (status_new == status_old)
|
|
+ return 0;
|
|
|
|
/* Only modify protection if it will not unlock other areas */
|
|
- if ((status_new & mask) <= (status_old & mask))
|
|
+ if ((status_new & mask) < (status_old & mask))
|
|
return -EINVAL;
|
|
|
|
write_enable(nor);
|
|
- return write_sr(nor, status_new);
|
|
+ ret = write_sr(nor, status_new);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ return spi_nor_wait_till_ready(nor);
|
|
}
|
|
|
|
/*
|
|
@@ -510,17 +602,43 @@ static int stm_lock(struct spi_nor *nor,
|
|
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
|
|
{
|
|
struct mtd_info *mtd = &nor->mtd;
|
|
- uint8_t status_old, status_new;
|
|
+ int status_old, status_new;
|
|
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
|
|
u8 shift = ffs(mask) - 1, pow, val;
|
|
+ loff_t lock_len;
|
|
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
|
|
+ bool use_top;
|
|
+ int ret;
|
|
|
|
status_old = read_sr(nor);
|
|
+ if (status_old < 0)
|
|
+ return status_old;
|
|
+
|
|
+ /* If nothing in our range is locked, we don't need to do anything */
|
|
+ if (stm_is_unlocked_sr(nor, ofs, len, status_old))
|
|
+ return 0;
|
|
+
|
|
+ /* If anything below us is locked, we can't use 'top' protection */
|
|
+ if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
|
|
+ can_be_top = false;
|
|
+
|
|
+ /* If anything above us is locked, we can't use 'bottom' protection */
|
|
+ if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
|
|
+ status_old))
|
|
+ can_be_bottom = false;
|
|
|
|
- /* Cannot unlock; would unlock larger region than requested */
|
|
- if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
|
|
- status_old))
|
|
+ if (!can_be_bottom && !can_be_top)
|
|
return -EINVAL;
|
|
|
|
+ /* Prefer top, if both are valid */
|
|
+ use_top = can_be_top;
|
|
+
|
|
+ /* lock_len: length of region that should remain locked */
|
|
+ if (use_top)
|
|
+ lock_len = mtd->size - (ofs + len);
|
|
+ else
|
|
+ lock_len = ofs;
|
|
+
|
|
/*
|
|
* Need largest pow such that:
|
|
*
|
|
@@ -530,8 +648,8 @@ static int stm_unlock(struct spi_nor *no
|
|
*
|
|
* pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
|
|
*/
|
|
- pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
|
|
- if (ofs + len == mtd->size) {
|
|
+ pow = ilog2(mtd->size) - order_base_2(lock_len);
|
|
+ if (lock_len == 0) {
|
|
val = 0; /* fully unlocked */
|
|
} else {
|
|
val = mask - (pow << shift);
|
|
@@ -540,14 +658,28 @@ static int stm_unlock(struct spi_nor *no
|
|
return -EINVAL;
|
|
}
|
|
|
|
- status_new = (status_old & ~mask) | val;
|
|
+ status_new = (status_old & ~mask & ~SR_TB) | val;
|
|
+
|
|
+ /* Don't protect status register if we're fully unlocked */
|
|
+ if (lock_len == mtd->size)
|
|
+ status_new &= ~SR_SRWD;
|
|
+
|
|
+ if (!use_top)
|
|
+ status_new |= SR_TB;
|
|
+
|
|
+ /* Don't bother if they're the same */
|
|
+ if (status_new == status_old)
|
|
+ return 0;
|
|
|
|
/* Only modify protection if it will not lock other areas */
|
|
- if ((status_new & mask) >= (status_old & mask))
|
|
+ if ((status_new & mask) > (status_old & mask))
|
|
return -EINVAL;
|
|
|
|
write_enable(nor);
|
|
- return write_sr(nor, status_new);
|
|
+ ret = write_sr(nor, status_new);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ return spi_nor_wait_till_ready(nor);
|
|
}
|
|
|
|
/*
|
|
@@ -737,8 +869,8 @@ static const struct flash_info spi_nor_i
|
|
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
|
|
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
|
|
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
|
|
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
|
|
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
|
|
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
|
|
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
|
|
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
|
|
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
|
|
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
|
|
@@ -772,6 +904,7 @@ static const struct flash_info spi_nor_i
|
|
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
|
|
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
|
|
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
|
|
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
|
|
@@ -835,11 +968,23 @@ static const struct flash_info spi_nor_i
|
|
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
|
|
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
|
|
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
|
|
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
+ {
|
|
+ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
|
|
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
|
|
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
|
|
+ },
|
|
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
|
|
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
|
|
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
- { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
+ {
|
|
+ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
|
|
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
|
|
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
|
|
+ },
|
|
+ {
|
|
+ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
|
|
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
|
|
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
|
|
+ },
|
|
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
|
|
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
|
|
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
|
|
@@ -862,7 +1007,7 @@ static const struct flash_info *spi_nor_
|
|
|
|
tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
|
|
if (tmp < 0) {
|
|
- dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
|
|
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
|
|
return ERR_PTR(tmp);
|
|
}
|
|
|
|
@@ -873,7 +1018,7 @@ static const struct flash_info *spi_nor_
|
|
return &spi_nor_ids[tmp];
|
|
}
|
|
}
|
|
- dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
|
|
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
|
|
id[0], id[1], id[2]);
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
@@ -1019,6 +1164,8 @@ static int macronix_quad_enable(struct s
|
|
int ret, val;
|
|
|
|
val = read_sr(nor);
|
|
+ if (val < 0)
|
|
+ return val;
|
|
write_enable(nor);
|
|
|
|
write_sr(nor, val | SR_QUAD_EN_MX);
|
|
@@ -1107,7 +1254,7 @@ static int set_quad_mode(struct spi_nor
|
|
static int spi_nor_check(struct spi_nor *nor)
|
|
{
|
|
if (!nor->dev || !nor->read || !nor->write ||
|
|
- !nor->read_reg || !nor->write_reg || !nor->erase) {
|
|
+ !nor->read_reg || !nor->write_reg) {
|
|
pr_err("spi-nor: please fill all the necessary fields!\n");
|
|
return -EINVAL;
|
|
}
|
|
@@ -1120,7 +1267,7 @@ int spi_nor_scan(struct spi_nor *nor, co
|
|
const struct flash_info *info = NULL;
|
|
struct device *dev = nor->dev;
|
|
struct mtd_info *mtd = &nor->mtd;
|
|
- struct device_node *np = nor->flash_node;
|
|
+ struct device_node *np = spi_nor_get_flash_node(nor);
|
|
int ret;
|
|
int i;
|
|
|
|
@@ -1174,6 +1321,7 @@ int spi_nor_scan(struct spi_nor *nor, co
|
|
info->flags & SPI_NOR_HAS_LOCK) {
|
|
write_enable(nor);
|
|
write_sr(nor, 0);
|
|
+ spi_nor_wait_till_ready(nor);
|
|
}
|
|
|
|
if (!mtd->name)
|
|
@@ -1208,6 +1356,8 @@ int spi_nor_scan(struct spi_nor *nor, co
|
|
|
|
if (info->flags & USE_FSR)
|
|
nor->flags |= SNOR_F_USE_FSR;
|
|
+ if (info->flags & SPI_NOR_HAS_TB)
|
|
+ nor->flags |= SNOR_F_HAS_SR_TB;
|
|
|
|
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
|
|
/* prefer "small sector" erase if possible */
|
|
@@ -1310,6 +1460,12 @@ int spi_nor_scan(struct spi_nor *nor, co
|
|
nor->addr_width = 3;
|
|
}
|
|
|
|
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
|
|
+ dev_err(dev, "address width is too large: %u\n",
|
|
+ nor->addr_width);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
nor->read_dummy = spi_nor_read_dummy_cycles(nor);
|
|
|
|
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
|
|
--- a/drivers/mtd/tests/mtd_nandecctest.c
|
|
+++ b/drivers/mtd/tests/mtd_nandecctest.c
|
|
@@ -187,7 +187,7 @@ static int double_bit_error_detect(void
|
|
__nand_calculate_ecc(error_data, size, calc_ecc);
|
|
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
|
|
|
|
- return (ret == -1) ? 0 : -EINVAL;
|
|
+ return (ret == -EBADMSG) ? 0 : -EINVAL;
|
|
}
|
|
|
|
static const struct nand_ecc_test nand_ecc_test[] = {
|
|
--- a/drivers/mtd/tests/oobtest.c
|
|
+++ b/drivers/mtd/tests/oobtest.c
|
|
@@ -215,19 +215,19 @@ static int verify_eraseblock(int ebnum)
|
|
pr_info("ignoring error as within bitflip_limit\n");
|
|
}
|
|
|
|
- if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
|
|
+ if (use_offset != 0 || use_len < mtd->oobavail) {
|
|
int k;
|
|
|
|
ops.mode = MTD_OPS_AUTO_OOB;
|
|
ops.len = 0;
|
|
ops.retlen = 0;
|
|
- ops.ooblen = mtd->ecclayout->oobavail;
|
|
+ ops.ooblen = mtd->oobavail;
|
|
ops.oobretlen = 0;
|
|
ops.ooboffs = 0;
|
|
ops.datbuf = NULL;
|
|
ops.oobbuf = readbuf;
|
|
err = mtd_read_oob(mtd, addr, &ops);
|
|
- if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
|
|
+ if (err || ops.oobretlen != mtd->oobavail) {
|
|
pr_err("error: readoob failed at %#llx\n",
|
|
(long long)addr);
|
|
errcnt += 1;
|
|
@@ -244,7 +244,7 @@ static int verify_eraseblock(int ebnum)
|
|
/* verify post-(use_offset + use_len) area for 0xff */
|
|
k = use_offset + use_len;
|
|
bitflips += memffshow(addr, k, readbuf + k,
|
|
- mtd->ecclayout->oobavail - k);
|
|
+ mtd->oobavail - k);
|
|
|
|
if (bitflips > bitflip_limit) {
|
|
pr_err("error: verify failed at %#llx\n",
|
|
@@ -269,8 +269,8 @@ static int verify_eraseblock_in_one_go(i
|
|
struct mtd_oob_ops ops;
|
|
int err = 0;
|
|
loff_t addr = (loff_t)ebnum * mtd->erasesize;
|
|
- size_t len = mtd->ecclayout->oobavail * pgcnt;
|
|
- size_t oobavail = mtd->ecclayout->oobavail;
|
|
+ size_t len = mtd->oobavail * pgcnt;
|
|
+ size_t oobavail = mtd->oobavail;
|
|
size_t bitflips;
|
|
int i;
|
|
|
|
@@ -394,8 +394,8 @@ static int __init mtd_oobtest_init(void)
|
|
goto out;
|
|
|
|
use_offset = 0;
|
|
- use_len = mtd->ecclayout->oobavail;
|
|
- use_len_max = mtd->ecclayout->oobavail;
|
|
+ use_len = mtd->oobavail;
|
|
+ use_len_max = mtd->oobavail;
|
|
vary_offset = 0;
|
|
|
|
/* First test: write all OOB, read it back and verify */
|
|
@@ -460,8 +460,8 @@ static int __init mtd_oobtest_init(void)
|
|
|
|
/* Write all eraseblocks */
|
|
use_offset = 0;
|
|
- use_len = mtd->ecclayout->oobavail;
|
|
- use_len_max = mtd->ecclayout->oobavail;
|
|
+ use_len = mtd->oobavail;
|
|
+ use_len_max = mtd->oobavail;
|
|
vary_offset = 1;
|
|
prandom_seed_state(&rnd_state, 5);
|
|
|
|
@@ -471,8 +471,8 @@ static int __init mtd_oobtest_init(void)
|
|
|
|
/* Check all eraseblocks */
|
|
use_offset = 0;
|
|
- use_len = mtd->ecclayout->oobavail;
|
|
- use_len_max = mtd->ecclayout->oobavail;
|
|
+ use_len = mtd->oobavail;
|
|
+ use_len_max = mtd->oobavail;
|
|
vary_offset = 1;
|
|
prandom_seed_state(&rnd_state, 5);
|
|
err = verify_all_eraseblocks();
|
|
@@ -480,8 +480,8 @@ static int __init mtd_oobtest_init(void)
|
|
goto out;
|
|
|
|
use_offset = 0;
|
|
- use_len = mtd->ecclayout->oobavail;
|
|
- use_len_max = mtd->ecclayout->oobavail;
|
|
+ use_len = mtd->oobavail;
|
|
+ use_len_max = mtd->oobavail;
|
|
vary_offset = 0;
|
|
|
|
/* Fourth test: try to write off end of device */
|
|
@@ -501,7 +501,7 @@ static int __init mtd_oobtest_init(void)
|
|
ops.retlen = 0;
|
|
ops.ooblen = 1;
|
|
ops.oobretlen = 0;
|
|
- ops.ooboffs = mtd->ecclayout->oobavail;
|
|
+ ops.ooboffs = mtd->oobavail;
|
|
ops.datbuf = NULL;
|
|
ops.oobbuf = writebuf;
|
|
pr_info("attempting to start write past end of OOB\n");
|
|
@@ -521,7 +521,7 @@ static int __init mtd_oobtest_init(void)
|
|
ops.retlen = 0;
|
|
ops.ooblen = 1;
|
|
ops.oobretlen = 0;
|
|
- ops.ooboffs = mtd->ecclayout->oobavail;
|
|
+ ops.ooboffs = mtd->oobavail;
|
|
ops.datbuf = NULL;
|
|
ops.oobbuf = readbuf;
|
|
pr_info("attempting to start read past end of OOB\n");
|
|
@@ -543,7 +543,7 @@ static int __init mtd_oobtest_init(void)
|
|
ops.mode = MTD_OPS_AUTO_OOB;
|
|
ops.len = 0;
|
|
ops.retlen = 0;
|
|
- ops.ooblen = mtd->ecclayout->oobavail + 1;
|
|
+ ops.ooblen = mtd->oobavail + 1;
|
|
ops.oobretlen = 0;
|
|
ops.ooboffs = 0;
|
|
ops.datbuf = NULL;
|
|
@@ -563,7 +563,7 @@ static int __init mtd_oobtest_init(void)
|
|
ops.mode = MTD_OPS_AUTO_OOB;
|
|
ops.len = 0;
|
|
ops.retlen = 0;
|
|
- ops.ooblen = mtd->ecclayout->oobavail + 1;
|
|
+ ops.ooblen = mtd->oobavail + 1;
|
|
ops.oobretlen = 0;
|
|
ops.ooboffs = 0;
|
|
ops.datbuf = NULL;
|
|
@@ -587,7 +587,7 @@ static int __init mtd_oobtest_init(void)
|
|
ops.mode = MTD_OPS_AUTO_OOB;
|
|
ops.len = 0;
|
|
ops.retlen = 0;
|
|
- ops.ooblen = mtd->ecclayout->oobavail;
|
|
+ ops.ooblen = mtd->oobavail;
|
|
ops.oobretlen = 0;
|
|
ops.ooboffs = 1;
|
|
ops.datbuf = NULL;
|
|
@@ -607,7 +607,7 @@ static int __init mtd_oobtest_init(void)
|
|
ops.mode = MTD_OPS_AUTO_OOB;
|
|
ops.len = 0;
|
|
ops.retlen = 0;
|
|
- ops.ooblen = mtd->ecclayout->oobavail;
|
|
+ ops.ooblen = mtd->oobavail;
|
|
ops.oobretlen = 0;
|
|
ops.ooboffs = 1;
|
|
ops.datbuf = NULL;
|
|
@@ -638,7 +638,7 @@ static int __init mtd_oobtest_init(void)
|
|
for (i = 0; i < ebcnt - 1; ++i) {
|
|
int cnt = 2;
|
|
int pg;
|
|
- size_t sz = mtd->ecclayout->oobavail;
|
|
+ size_t sz = mtd->oobavail;
|
|
if (bbt[i] || bbt[i + 1])
|
|
continue;
|
|
addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
|
|
@@ -673,13 +673,12 @@ static int __init mtd_oobtest_init(void)
|
|
for (i = 0; i < ebcnt - 1; ++i) {
|
|
if (bbt[i] || bbt[i + 1])
|
|
continue;
|
|
- prandom_bytes_state(&rnd_state, writebuf,
|
|
- mtd->ecclayout->oobavail * 2);
|
|
+ prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
|
|
addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
|
|
ops.mode = MTD_OPS_AUTO_OOB;
|
|
ops.len = 0;
|
|
ops.retlen = 0;
|
|
- ops.ooblen = mtd->ecclayout->oobavail * 2;
|
|
+ ops.ooblen = mtd->oobavail * 2;
|
|
ops.oobretlen = 0;
|
|
ops.ooboffs = 0;
|
|
ops.datbuf = NULL;
|
|
@@ -688,7 +687,7 @@ static int __init mtd_oobtest_init(void)
|
|
if (err)
|
|
goto out;
|
|
if (memcmpshow(addr, readbuf, writebuf,
|
|
- mtd->ecclayout->oobavail * 2)) {
|
|
+ mtd->oobavail * 2)) {
|
|
pr_err("error: verify failed at %#llx\n",
|
|
(long long)addr);
|
|
errcnt += 1;
|
|
--- a/drivers/mtd/tests/pagetest.c
|
|
+++ b/drivers/mtd/tests/pagetest.c
|
|
@@ -127,13 +127,12 @@ static int crosstest(void)
|
|
unsigned char *pp1, *pp2, *pp3, *pp4;
|
|
|
|
pr_info("crosstest\n");
|
|
- pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
|
|
+ pp1 = kzalloc(pgsize * 4, GFP_KERNEL);
|
|
if (!pp1)
|
|
return -ENOMEM;
|
|
pp2 = pp1 + pgsize;
|
|
pp3 = pp2 + pgsize;
|
|
pp4 = pp3 + pgsize;
|
|
- memset(pp1, 0, pgsize * 4);
|
|
|
|
addr0 = 0;
|
|
for (i = 0; i < ebcnt && bbt[i]; ++i)
|
|
--- a/include/linux/mtd/bbm.h
|
|
+++ b/include/linux/mtd/bbm.h
|
|
@@ -166,7 +166,6 @@ struct bbm_info {
|
|
};
|
|
|
|
/* OneNAND BBT interface */
|
|
-extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
|
|
extern int onenand_default_bbt(struct mtd_info *mtd);
|
|
|
|
#endif /* __LINUX_MTD_BBM_H */
|
|
--- a/include/linux/mtd/fsmc.h
|
|
+++ b/include/linux/mtd/fsmc.h
|
|
@@ -103,24 +103,6 @@
|
|
|
|
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
|
|
|
|
-/*
|
|
- * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
|
|
- * and it has to be read consecutively and immediately after the 512
|
|
- * byte data block for hardware to generate the error bit offsets
|
|
- * Managing the ecc bytes in the following way is easier. This way is
|
|
- * similar to oobfree structure maintained already in u-boot nand driver
|
|
- */
|
|
-#define MAX_ECCPLACE_ENTRIES 32
|
|
-
|
|
-struct fsmc_nand_eccplace {
|
|
- uint8_t offset;
|
|
- uint8_t length;
|
|
-};
|
|
-
|
|
-struct fsmc_eccplace {
|
|
- struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
|
|
-};
|
|
-
|
|
struct fsmc_nand_timings {
|
|
uint8_t tclr;
|
|
uint8_t tar;
|
|
--- a/include/linux/mtd/inftl.h
|
|
+++ b/include/linux/mtd/inftl.h
|
|
@@ -44,7 +44,6 @@ struct INFTLrecord {
|
|
unsigned int nb_blocks; /* number of physical blocks */
|
|
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
|
|
struct erase_info instr;
|
|
- struct nand_ecclayout oobinfo;
|
|
};
|
|
|
|
int INFTL_mount(struct INFTLrecord *s);
|
|
--- a/include/linux/mtd/map.h
|
|
+++ b/include/linux/mtd/map.h
|
|
@@ -137,7 +137,9 @@
|
|
#endif
|
|
|
|
#ifndef map_bankwidth
|
|
+#ifdef CONFIG_MTD
|
|
#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
|
|
+#endif
|
|
static inline int map_bankwidth(void *map)
|
|
{
|
|
BUG();
|
|
@@ -233,8 +235,11 @@ struct map_info {
|
|
If there is no cache to care about this can be set to NULL. */
|
|
void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
|
|
|
|
- /* set_vpp() must handle being reentered -- enable, enable, disable
|
|
- must leave it enabled. */
|
|
+ /* This will be called with 1 as parameter when the first map user
|
|
+ * needs VPP, and called with 0 when the last user exits. The map
|
|
+ * core maintains a reference counter, and assumes that VPP is a
|
|
+ * global resource applying to all mapped flash chips on the system.
|
|
+ */
|
|
void (*set_vpp)(struct map_info *, int);
|
|
|
|
unsigned long pfow_base;
|
|
--- a/include/linux/mtd/mtd.h
|
|
+++ b/include/linux/mtd/mtd.h
|
|
@@ -100,17 +100,35 @@ struct mtd_oob_ops {
|
|
|
|
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
|
|
#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
|
|
+/**
|
|
+ * struct mtd_oob_region - oob region definition
|
|
+ * @offset: region offset
|
|
+ * @length: region length
|
|
+ *
|
|
+ * This structure describes a region of the OOB area, and is used
|
|
+ * to retrieve ECC or free bytes sections.
|
|
+ * Each section is defined by an offset within the OOB area and a
|
|
+ * length.
|
|
+ */
|
|
+struct mtd_oob_region {
|
|
+ u32 offset;
|
|
+ u32 length;
|
|
+};
|
|
+
|
|
/*
|
|
- * Internal ECC layout control structure. For historical reasons, there is a
|
|
- * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
|
|
- * for export to user-space via the ECCGETLAYOUT ioctl.
|
|
- * nand_ecclayout should be expandable in the future simply by the above macros.
|
|
+ * struct mtd_ooblayout_ops - NAND OOB layout operations
|
|
+ * @ecc: function returning an ECC region in the OOB area.
|
|
+ * Should return -ERANGE if %section exceeds the total number of
|
|
+ * ECC sections.
|
|
+ * @free: function returning a free region in the OOB area.
|
|
+ * Should return -ERANGE if %section exceeds the total number of
|
|
+ * free sections.
|
|
*/
|
|
-struct nand_ecclayout {
|
|
- __u32 eccbytes;
|
|
- __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
|
|
- __u32 oobavail;
|
|
- struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
|
|
+struct mtd_ooblayout_ops {
|
|
+ int (*ecc)(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobecc);
|
|
+ int (*free)(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobfree);
|
|
};
|
|
|
|
struct module; /* only needed for owner field in mtd_info */
|
|
@@ -171,8 +189,8 @@ struct mtd_info {
|
|
const char *name;
|
|
int index;
|
|
|
|
- /* ECC layout structure pointer - read only! */
|
|
- struct nand_ecclayout *ecclayout;
|
|
+ /* OOB layout description */
|
|
+ const struct mtd_ooblayout_ops *ooblayout;
|
|
|
|
/* the ecc step size. */
|
|
unsigned int ecc_step_size;
|
|
@@ -258,6 +276,46 @@ struct mtd_info {
|
|
int usecount;
|
|
};
|
|
|
|
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobecc);
|
|
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
|
|
+ int *section,
|
|
+ struct mtd_oob_region *oobregion);
|
|
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
|
|
+ const u8 *oobbuf, int start, int nbytes);
|
|
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
|
|
+ u8 *oobbuf, int start, int nbytes);
|
|
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oobfree);
|
|
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
|
|
+ const u8 *oobbuf, int start, int nbytes);
|
|
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
|
|
+ u8 *oobbuf, int start, int nbytes);
|
|
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
|
|
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
|
|
+
|
|
+static inline void mtd_set_ooblayout(struct mtd_info *mtd,
|
|
+ const struct mtd_ooblayout_ops *ooblayout)
|
|
+{
|
|
+ mtd->ooblayout = ooblayout;
|
|
+}
|
|
+
|
|
+static inline void mtd_set_of_node(struct mtd_info *mtd,
|
|
+ struct device_node *np)
|
|
+{
|
|
+ mtd->dev.of_node = np;
|
|
+}
|
|
+
|
|
+static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
|
|
+{
|
|
+ return mtd->dev.of_node;
|
|
+}
|
|
+
|
|
+static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
|
|
+{
|
|
+ return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
|
|
+}
|
|
+
|
|
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
|
|
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
|
|
void **virt, resource_size_t *phys);
|
|
--- a/include/linux/mtd/nand.h
|
|
+++ b/include/linux/mtd/nand.h
|
|
@@ -119,6 +119,12 @@ typedef enum {
|
|
NAND_ECC_SOFT_BCH,
|
|
} nand_ecc_modes_t;
|
|
|
|
+enum nand_ecc_algo {
|
|
+ NAND_ECC_UNKNOWN,
|
|
+ NAND_ECC_HAMMING,
|
|
+ NAND_ECC_BCH,
|
|
+};
|
|
+
|
|
/*
|
|
* Constants for Hardware ECC
|
|
*/
|
|
@@ -129,6 +135,14 @@ typedef enum {
|
|
/* Enable Hardware ECC before syndrome is read back from flash */
|
|
#define NAND_ECC_READSYN 2
|
|
|
|
+/*
|
|
+ * Enable generic NAND 'page erased' check. This check is only done when
|
|
+ * ecc.correct() returns -EBADMSG.
|
|
+ * Set this flag if your implementation does not fix bitflips in erased
|
|
+ * pages and you want to rely on the default implementation.
|
|
+ */
|
|
+#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
|
|
+
|
|
/* Bit mask for flags passed to do_nand_read_ecc */
|
|
#define NAND_GET_DEVICE 0x80
|
|
|
|
@@ -160,6 +174,12 @@ typedef enum {
|
|
/* Device supports subpage reads */
|
|
#define NAND_SUBPAGE_READ 0x00001000
|
|
|
|
+/*
|
|
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
|
|
+ * patterns.
|
|
+ */
|
|
+#define NAND_NEED_SCRAMBLING 0x00002000
|
|
+
|
|
/* Options valid for Samsung large page devices */
|
|
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
|
|
|
|
@@ -276,15 +296,15 @@ struct nand_onfi_params {
|
|
__le16 t_r;
|
|
__le16 t_ccs;
|
|
__le16 src_sync_timing_mode;
|
|
- __le16 src_ssync_features;
|
|
+ u8 src_ssync_features;
|
|
__le16 clk_pin_capacitance_typ;
|
|
__le16 io_pin_capacitance_typ;
|
|
__le16 input_pin_capacitance_typ;
|
|
u8 input_pin_capacitance_max;
|
|
u8 driver_strength_support;
|
|
__le16 t_int_r;
|
|
- __le16 t_ald;
|
|
- u8 reserved4[7];
|
|
+ __le16 t_adl;
|
|
+ u8 reserved4[8];
|
|
|
|
/* vendor */
|
|
__le16 vendor_revision;
|
|
@@ -407,7 +427,7 @@ struct nand_jedec_params {
|
|
__le16 input_pin_capacitance_typ;
|
|
__le16 clk_pin_capacitance_typ;
|
|
u8 driver_strength_support;
|
|
- __le16 t_ald;
|
|
+ __le16 t_adl;
|
|
u8 reserved4[36];
|
|
|
|
/* ECC and endurance block */
|
|
@@ -444,6 +464,7 @@ struct nand_hw_control {
|
|
/**
|
|
* struct nand_ecc_ctrl - Control structure for ECC
|
|
* @mode: ECC mode
|
|
+ * @algo: ECC algorithm
|
|
* @steps: number of ECC steps per page
|
|
* @size: data bytes per ECC step
|
|
* @bytes: ECC bytes per step
|
|
@@ -451,12 +472,18 @@ struct nand_hw_control {
|
|
* @total: total number of ECC bytes per page
|
|
* @prepad: padding information for syndrome based ECC generators
|
|
* @postpad: padding information for syndrome based ECC generators
|
|
- * @layout: ECC layout control struct pointer
|
|
+ * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
|
|
* @priv: pointer to private ECC control data
|
|
* @hwctl: function to control hardware ECC generator. Must only
|
|
* be provided if an hardware ECC is available
|
|
* @calculate: function for ECC calculation or readback from ECC hardware
|
|
- * @correct: function for ECC correction, matching to ECC generator (sw/hw)
|
|
+ * @correct: function for ECC correction, matching to ECC generator (sw/hw).
|
|
+ * Should return a positive number representing the number of
|
|
+ * corrected bitflips, -EBADMSG if the number of bitflips exceed
|
|
+ * ECC strength, or any other error code if the error is not
|
|
+ * directly related to correction.
|
|
+ * If -EBADMSG is returned the input buffers should be left
|
|
+ * untouched.
|
|
* @read_page_raw: function to read a raw page without ECC. This function
|
|
* should hide the specific layout used by the ECC
|
|
* controller and always return contiguous in-band and
|
|
@@ -487,6 +514,7 @@ struct nand_hw_control {
|
|
*/
|
|
struct nand_ecc_ctrl {
|
|
nand_ecc_modes_t mode;
|
|
+ enum nand_ecc_algo algo;
|
|
int steps;
|
|
int size;
|
|
int bytes;
|
|
@@ -494,7 +522,7 @@ struct nand_ecc_ctrl {
|
|
int strength;
|
|
int prepad;
|
|
int postpad;
|
|
- struct nand_ecclayout *layout;
|
|
+ unsigned int options;
|
|
void *priv;
|
|
void (*hwctl)(struct mtd_info *mtd, int mode);
|
|
int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
|
|
@@ -540,11 +568,11 @@ struct nand_buffers {
|
|
|
|
/**
|
|
* struct nand_chip - NAND Private Flash Chip Data
|
|
+ * @mtd: MTD device registered to the MTD framework
|
|
* @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
|
|
* flash device
|
|
* @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
|
|
* flash device.
|
|
- * @flash_node: [BOARDSPECIFIC] device node describing this instance
|
|
* @read_byte: [REPLACEABLE] read one byte from the chip
|
|
* @read_word: [REPLACEABLE] read one word from the chip
|
|
* @write_byte: [REPLACEABLE] write a single byte to the chip on the
|
|
@@ -640,18 +668,17 @@ struct nand_buffers {
|
|
*/
|
|
|
|
struct nand_chip {
|
|
+ struct mtd_info mtd;
|
|
void __iomem *IO_ADDR_R;
|
|
void __iomem *IO_ADDR_W;
|
|
|
|
- struct device_node *flash_node;
|
|
-
|
|
uint8_t (*read_byte)(struct mtd_info *mtd);
|
|
u16 (*read_word)(struct mtd_info *mtd);
|
|
void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
|
|
void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
|
|
void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
|
|
void (*select_chip)(struct mtd_info *mtd, int chip);
|
|
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
|
|
+ int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
|
|
int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
|
|
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
|
|
int (*dev_ready)(struct mtd_info *mtd);
|
|
@@ -719,6 +746,40 @@ struct nand_chip {
|
|
void *priv;
|
|
};
|
|
|
|
+extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
|
|
+extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
|
|
+
|
|
+static inline void nand_set_flash_node(struct nand_chip *chip,
|
|
+ struct device_node *np)
|
|
+{
|
|
+ mtd_set_of_node(&chip->mtd, np);
|
|
+}
|
|
+
|
|
+static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
|
|
+{
|
|
+ return mtd_get_of_node(&chip->mtd);
|
|
+}
|
|
+
|
|
+static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
|
|
+{
|
|
+ return container_of(mtd, struct nand_chip, mtd);
|
|
+}
|
|
+
|
|
+static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
|
|
+{
|
|
+ return &chip->mtd;
|
|
+}
|
|
+
|
|
+static inline void *nand_get_controller_data(struct nand_chip *chip)
|
|
+{
|
|
+ return chip->priv;
|
|
+}
|
|
+
|
|
+static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
|
|
+{
|
|
+ chip->priv = priv;
|
|
+}
|
|
+
|
|
/*
|
|
* NAND Flash Manufacturer ID Codes
|
|
*/
|
|
@@ -850,7 +911,6 @@ extern int nand_do_read(struct mtd_info
|
|
* @chip_delay: R/B delay value in us
|
|
* @options: Option flags, e.g. 16bit buswidth
|
|
* @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
|
|
- * @ecclayout: ECC layout info structure
|
|
* @part_probe_types: NULL-terminated array of probe types
|
|
*/
|
|
struct platform_nand_chip {
|
|
@@ -858,7 +918,6 @@ struct platform_nand_chip {
|
|
int chip_offset;
|
|
int nr_partitions;
|
|
struct mtd_partition *partitions;
|
|
- struct nand_ecclayout *ecclayout;
|
|
int chip_delay;
|
|
unsigned int options;
|
|
unsigned int bbt_options;
|
|
@@ -908,15 +967,6 @@ struct platform_nand_data {
|
|
struct platform_nand_ctrl ctrl;
|
|
};
|
|
|
|
-/* Some helpers to access the data structures */
|
|
-static inline
|
|
-struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
|
|
-{
|
|
- struct nand_chip *chip = mtd->priv;
|
|
-
|
|
- return chip->priv;
|
|
-}
|
|
-
|
|
/* return the supported features. */
|
|
static inline int onfi_feature(struct nand_chip *chip)
|
|
{
|
|
--- a/include/linux/mtd/nand_bch.h
|
|
+++ b/include/linux/mtd/nand_bch.h
|
|
@@ -32,9 +32,7 @@ int nand_bch_correct_data(struct mtd_inf
|
|
/*
|
|
* Initialize BCH encoder/decoder
|
|
*/
|
|
-struct nand_bch_control *
|
|
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
|
|
- unsigned int eccbytes, struct nand_ecclayout **ecclayout);
|
|
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
|
|
/*
|
|
* Release BCH encoder/decoder resources
|
|
*/
|
|
@@ -55,12 +53,10 @@ static inline int
|
|
nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
|
|
unsigned char *read_ecc, unsigned char *calc_ecc)
|
|
{
|
|
- return -1;
|
|
+ return -ENOTSUPP;
|
|
}
|
|
|
|
-static inline struct nand_bch_control *
|
|
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
|
|
- unsigned int eccbytes, struct nand_ecclayout **ecclayout)
|
|
+static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
|
|
{
|
|
return NULL;
|
|
}
|
|
--- a/include/linux/mtd/nftl.h
|
|
+++ b/include/linux/mtd/nftl.h
|
|
@@ -50,7 +50,6 @@ struct NFTLrecord {
|
|
unsigned int nb_blocks; /* number of physical blocks */
|
|
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
|
|
struct erase_info instr;
|
|
- struct nand_ecclayout oobinfo;
|
|
};
|
|
|
|
int NFTL_mount(struct NFTLrecord *s);
|
|
--- a/include/linux/mtd/onenand.h
|
|
+++ b/include/linux/mtd/onenand.h
|
|
@@ -80,7 +80,6 @@ struct onenand_bufferram {
|
|
* @page_buf: [INTERN] page main data buffer
|
|
* @oob_buf: [INTERN] page oob data buffer
|
|
* @subpagesize: [INTERN] holds the subpagesize
|
|
- * @ecclayout: [REPLACEABLE] the default ecc placement scheme
|
|
* @bbm: [REPLACEABLE] pointer to Bad Block Management
|
|
* @priv: [OPTIONAL] pointer to private chip date
|
|
*/
|
|
@@ -134,7 +133,6 @@ struct onenand_chip {
|
|
#endif
|
|
|
|
int subpagesize;
|
|
- struct nand_ecclayout *ecclayout;
|
|
|
|
void *bbm;
|
|
|
|
--- a/include/linux/mtd/partitions.h
|
|
+++ b/include/linux/mtd/partitions.h
|
|
@@ -42,7 +42,6 @@ struct mtd_partition {
|
|
uint64_t size; /* partition size */
|
|
uint64_t offset; /* offset within the master MTD space */
|
|
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
|
|
- struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
|
|
};
|
|
|
|
#define MTDPART_OFS_RETAIN (-3)
|
|
@@ -56,11 +55,9 @@ struct device_node;
|
|
/**
|
|
* struct mtd_part_parser_data - used to pass data to MTD partition parsers.
|
|
* @origin: for RedBoot, start address of MTD device
|
|
- * @of_node: for OF parsers, device node containing partitioning information
|
|
*/
|
|
struct mtd_part_parser_data {
|
|
unsigned long origin;
|
|
- struct device_node *of_node;
|
|
};
|
|
|
|
|
|
@@ -78,14 +75,34 @@ struct mtd_part_parser {
|
|
struct list_head list;
|
|
struct module *owner;
|
|
const char *name;
|
|
- int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
|
|
+ int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
|
|
struct mtd_part_parser_data *);
|
|
+ void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
|
|
enum mtd_parser_type type;
|
|
};
|
|
|
|
-extern void register_mtd_parser(struct mtd_part_parser *parser);
|
|
+/* Container for passing around a set of parsed partitions */
|
|
+struct mtd_partitions {
|
|
+ const struct mtd_partition *parts;
|
|
+ int nr_parts;
|
|
+ const struct mtd_part_parser *parser;
|
|
+};
|
|
+
|
|
+extern int __register_mtd_parser(struct mtd_part_parser *parser,
|
|
+ struct module *owner);
|
|
+#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE)
|
|
+
|
|
extern void deregister_mtd_parser(struct mtd_part_parser *parser);
|
|
|
|
+/*
|
|
+ * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't
|
|
+ * do anything special in module init/exit. Each driver may only use this macro
|
|
+ * once, and calling it replaces module_init() and module_exit().
|
|
+ */
|
|
+#define module_mtd_part_parser(__mtd_part_parser) \
|
|
+ module_driver(__mtd_part_parser, register_mtd_parser, \
|
|
+ deregister_mtd_parser)
|
|
+
|
|
int mtd_is_partition(const struct mtd_info *mtd);
|
|
int mtd_add_partition(struct mtd_info *master, const char *name,
|
|
long long offset, long long length);
|
|
--- a/include/linux/mtd/sh_flctl.h
|
|
+++ b/include/linux/mtd/sh_flctl.h
|
|
@@ -143,11 +143,11 @@ enum flctl_ecc_res_t {
|
|
struct dma_chan;
|
|
|
|
struct sh_flctl {
|
|
- struct mtd_info mtd;
|
|
struct nand_chip chip;
|
|
struct platform_device *pdev;
|
|
struct dev_pm_qos_request pm_qos;
|
|
void __iomem *reg;
|
|
+ resource_size_t fifo;
|
|
|
|
uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
|
|
int read_bytes;
|
|
@@ -186,7 +186,7 @@ struct sh_flctl_platform_data {
|
|
|
|
static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
|
|
{
|
|
- return container_of(mtdinfo, struct sh_flctl, mtd);
|
|
+ return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip);
|
|
}
|
|
|
|
#endif /* __SH_FLCTL_H__ */
|
|
--- a/include/linux/mtd/sharpsl.h
|
|
+++ b/include/linux/mtd/sharpsl.h
|
|
@@ -14,7 +14,7 @@
|
|
|
|
struct sharpsl_nand_platform_data {
|
|
struct nand_bbt_descr *badblock_pattern;
|
|
- struct nand_ecclayout *ecc_layout;
|
|
+ const struct mtd_ooblayout_ops *ecc_layout;
|
|
struct mtd_partition *partitions;
|
|
unsigned int nr_partitions;
|
|
};
|
|
--- a/include/uapi/mtd/mtd-abi.h
|
|
+++ b/include/uapi/mtd/mtd-abi.h
|
|
@@ -228,7 +228,7 @@ struct nand_oobfree {
|
|
* complete set of ECC information. The ioctl truncates the larger internal
|
|
* structure to retain binary compatibility with the static declaration of the
|
|
* ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of
|
|
- * the user struct, not the MAX size of the internal struct nand_ecclayout.
|
|
+ * the user struct, not the MAX size of the internal OOB layout representation.
|
|
*/
|
|
struct nand_ecclayout_user {
|
|
__u32 eccbytes;
|
|
--- a/fs/jffs2/wbuf.c
|
|
+++ b/fs/jffs2/wbuf.c
|
|
@@ -1153,7 +1153,7 @@ static struct jffs2_sb_info *work_to_sb(
|
|
{
|
|
struct delayed_work *dwork;
|
|
|
|
- dwork = container_of(work, struct delayed_work, work);
|
|
+ dwork = to_delayed_work(work);
|
|
return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
|
|
}
|
|
|
|
@@ -1183,22 +1183,20 @@ void jffs2_dirty_trigger(struct jffs2_sb
|
|
|
|
int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
|
|
{
|
|
- struct nand_ecclayout *oinfo = c->mtd->ecclayout;
|
|
-
|
|
if (!c->mtd->oobsize)
|
|
return 0;
|
|
|
|
/* Cleanmarker is out-of-band, so inline size zero */
|
|
c->cleanmarker_size = 0;
|
|
|
|
- if (!oinfo || oinfo->oobavail == 0) {
|
|
+ if (c->mtd->oobavail == 0) {
|
|
pr_err("inconsistent device description\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
jffs2_dbg(1, "using OOB on NAND\n");
|
|
|
|
- c->oobavail = oinfo->oobavail;
|
|
+ c->oobavail = c->mtd->oobavail;
|
|
|
|
/* Initialise write buffer */
|
|
init_rwsem(&c->wbuf_sem);
|
|
--- a/include/linux/mtd/spi-nor.h
|
|
+++ b/include/linux/mtd/spi-nor.h
|
|
@@ -85,6 +85,7 @@
|
|
#define SR_BP0 BIT(2) /* Block protect 0 */
|
|
#define SR_BP1 BIT(3) /* Block protect 1 */
|
|
#define SR_BP2 BIT(4) /* Block protect 2 */
|
|
+#define SR_TB BIT(5) /* Top/Bottom protect */
|
|
#define SR_SRWD BIT(7) /* SR write protect */
|
|
|
|
#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
|
|
@@ -116,6 +117,7 @@ enum spi_nor_ops {
|
|
|
|
enum spi_nor_option_flags {
|
|
SNOR_F_USE_FSR = BIT(0),
|
|
+ SNOR_F_HAS_SR_TB = BIT(1),
|
|
};
|
|
|
|
/**
|
|
@@ -123,7 +125,6 @@ enum spi_nor_option_flags {
|
|
* @mtd: point to a mtd_info structure
|
|
* @lock: the lock for the read/write/erase/lock/unlock operations
|
|
* @dev: point to a spi device, or a spi nor controller device.
|
|
- * @flash_node: point to a device node describing this flash instance.
|
|
* @page_size: the page size of the SPI NOR
|
|
* @addr_width: number of address bytes
|
|
* @erase_opcode: the opcode for erasing a sector
|
|
@@ -143,7 +144,8 @@ enum spi_nor_option_flags {
|
|
* @read: [DRIVER-SPECIFIC] read data from the SPI NOR
|
|
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
|
|
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
|
|
- * at the offset @offs
|
|
+ * at the offset @offs; if not provided by the driver,
|
|
+ * spi-nor will send the erase opcode via write_reg()
|
|
* @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
|
|
* @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
|
|
* @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
|
|
@@ -154,7 +156,6 @@ struct spi_nor {
|
|
struct mtd_info mtd;
|
|
struct mutex lock;
|
|
struct device *dev;
|
|
- struct device_node *flash_node;
|
|
u32 page_size;
|
|
u8 addr_width;
|
|
u8 erase_opcode;
|
|
@@ -184,6 +185,17 @@ struct spi_nor {
|
|
void *priv;
|
|
};
|
|
|
|
+static inline void spi_nor_set_flash_node(struct spi_nor *nor,
|
|
+ struct device_node *np)
|
|
+{
|
|
+ mtd_set_of_node(&nor->mtd, np);
|
|
+}
|
|
+
|
|
+static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
|
|
+{
|
|
+ return mtd_get_of_node(&nor->mtd);
|
|
+}
|
|
+
|
|
/**
|
|
* spi_nor_scan() - scan the SPI NOR
|
|
* @nor: the spi_nor structure
|