mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-25 16:31:13 +00:00
e8e2b88f5f
The 1st generation MediaTek PCIe host bridge cannot handle Message Signaled Interrupts (MSIs). The core PCI code is not aware that MSI is not available. This results in warnings of the form: WARNING: CPU: 2 PID: 112 at include/linux/msi.h:219 pci_msi_setup_msi_irqs.constprop.8+0x64/0x6c Modules linked in: ahci(+) libahci libata sd_mod scsi_mod gpio_button_hotplug CPU: 2 PID: 112 Comm: kmodloader Not tainted 5.10.52 #0 Hardware name: Mediatek Cortex-A7 (Device Tree) Import patches that introduce the 'no_msi' attribute to signal missing MSI support to the core PCI. Refresh patches: - 000-spi-fix-fifo.patch - 330-mtk-bmt-support.patch - 510-net-mediatek-add-flow-offload-for-mt7623.patch - 601-PCI-mediatek-Use-regmap-to-get-shared-pcie-cfg-base.patch - 610-pcie-mediatek-fix-clearing-interrupt-status.patch - 700-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch - 710-pci-pcie-mediatek-add-support-for-coherent-DMA.patch Signed-off-by: Nick Hainke <vincent@systemli.org>
856 lines
20 KiB
Diff
856 lines
20 KiB
Diff
--- a/drivers/mtd/nand/Kconfig
|
|
+++ b/drivers/mtd/nand/Kconfig
|
|
@@ -15,6 +15,10 @@ config MTD_NAND_ECC
|
|
bool
|
|
depends on MTD_NAND_CORE
|
|
|
|
+config MTD_NAND_MTK_BMT
|
|
+ bool "Support MediaTek NAND Bad-block Management Table"
|
|
+ default n
|
|
+
|
|
endmenu
|
|
|
|
endmenu
|
|
--- a/drivers/mtd/nand/Makefile
|
|
+++ b/drivers/mtd/nand/Makefile
|
|
@@ -2,6 +2,7 @@
|
|
|
|
nandcore-objs := core.o bbt.o
|
|
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
|
|
+obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o
|
|
|
|
obj-y += onenand/
|
|
obj-y += raw/
|
|
--- /dev/null
|
|
+++ b/drivers/mtd/nand/mtk_bmt.c
|
|
@@ -0,0 +1,781 @@
|
|
+/*
|
|
+ * Copyright (c) 2017 MediaTek Inc.
|
|
+ * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
|
|
+ * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ */
|
|
+
|
|
+#include <linux/slab.h>
|
|
+#include <linux/gfp.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/mtd/nand.h>
|
|
+#include <linux/mtd/partitions.h>
|
|
+#include <linux/mtd/mtk_bmt.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/debugfs.h>
|
|
+
|
|
+#define MAIN_SIGNATURE_OFFSET 0
|
|
+#define OOB_SIGNATURE_OFFSET 1
|
|
+#define BBPOOL_RATIO 2
|
|
+
|
|
+#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
|
|
+
|
|
+/* Maximum 8k blocks */
|
|
+#define BB_TABLE_MAX bmtd.table_size
|
|
+#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
|
|
+#define BMT_TBL_DEF_VAL 0x0
|
|
+
|
|
+/*
|
|
+ * Burner Bad Block Table
|
|
+ * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
|
|
+ */
|
|
+
|
|
+struct bbbt {
|
|
+ char signature[3];
|
|
+ /* This version is used to distinguish the legacy and new algorithm */
|
|
+#define BBMT_VERSION 2
|
|
+ unsigned char version;
|
|
+ /* Below 2 tables will be written in SLC */
|
|
+ u16 bb_tbl[];
|
|
+};
|
|
+
|
|
+struct bbmt {
|
|
+ u16 block;
|
|
+#define NO_MAPPED 0
|
|
+#define NORMAL_MAPPED 1
|
|
+#define BMT_MAPPED 2
|
|
+ u16 mapped;
|
|
+};
|
|
+
|
|
+static struct bmt_desc {
|
|
+ struct mtd_info *mtd;
|
|
+
|
|
+ int (*_read_oob) (struct mtd_info *mtd, loff_t from,
|
|
+ struct mtd_oob_ops *ops);
|
|
+ int (*_write_oob) (struct mtd_info *mtd, loff_t to,
|
|
+ struct mtd_oob_ops *ops);
|
|
+ const struct nand_ops *nand_ops;
|
|
+
|
|
+ struct bbbt *bbt;
|
|
+
|
|
+ struct dentry *debugfs_dir;
|
|
+
|
|
+ u32 table_size;
|
|
+ u32 pg_size;
|
|
+ u32 blk_size;
|
|
+ u16 pg_shift;
|
|
+ u16 blk_shift;
|
|
+ /* bbt logical address */
|
|
+ u16 pool_lba;
|
|
+ /* bbt physical address */
|
|
+ u16 pool_pba;
|
|
+ /* Maximum count of bad blocks that the vendor guaranteed */
|
|
+ u16 bb_max;
|
|
+ /* Total blocks of the Nand Chip */
|
|
+ u16 total_blks;
|
|
+ /* The block(n) BMT is located at (bmt_tbl[n]) */
|
|
+ u16 bmt_blk_idx;
|
|
+ /* How many pages needs to store 'struct bbbt' */
|
|
+ u32 bmt_pgs;
|
|
+
|
|
+ /* to compensate for driver level remapping */
|
|
+ u8 oob_offset;
|
|
+} bmtd = {0};
|
|
+
|
|
+static unsigned char *nand_bbt_buf;
|
|
+static unsigned char *nand_data_buf;
|
|
+
|
|
+/* -------- Unit conversions -------- */
|
|
+static inline u32 blk_pg(u16 block)
|
|
+{
|
|
+ return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
|
|
+}
|
|
+
|
|
+/* -------- Nand operations wrapper -------- */
|
|
+static inline int
|
|
+bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
|
|
+ unsigned char *fdm, int fdm_len)
|
|
+{
|
|
+ struct mtd_oob_ops ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .ooboffs = bmtd.oob_offset,
|
|
+ .oobbuf = fdm,
|
|
+ .ooblen = fdm_len,
|
|
+ .datbuf = dat,
|
|
+ .len = dat_len,
|
|
+ };
|
|
+
|
|
+ return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
|
|
+}
|
|
+
|
|
+static inline int bbt_nand_erase(u16 block)
|
|
+{
|
|
+ struct nand_device *nand = mtd_to_nanddev(bmtd.mtd);
|
|
+ loff_t addr = (loff_t)block << bmtd.blk_shift;
|
|
+ struct nand_pos pos;
|
|
+
|
|
+ nanddev_offs_to_pos(nand, addr, &pos);
|
|
+ return bmtd.nand_ops->erase(nand, &pos);
|
|
+}
|
|
+
|
|
+/* -------- Bad Blocks Management -------- */
|
|
+static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
|
|
+{
|
|
+ return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
|
|
+}
|
|
+
|
|
+static int
|
|
+read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
|
|
+{
|
|
+ u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
|
|
+
|
|
+ return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
|
|
+}
|
|
+
|
|
+static int write_bmt(u16 block, unsigned char *dat)
|
|
+{
|
|
+ struct mtd_oob_ops ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
|
|
+ .oobbuf = "bmt",
|
|
+ .ooblen = 3,
|
|
+ .datbuf = dat,
|
|
+ .len = bmtd.bmt_pgs << bmtd.pg_shift,
|
|
+ };
|
|
+ loff_t addr = (loff_t)block << bmtd.blk_shift;
|
|
+
|
|
+ return bmtd._write_oob(bmtd.mtd, addr, &ops);
|
|
+}
|
|
+
|
|
+static u16 find_valid_block(u16 block)
|
|
+{
|
|
+ u8 fdm[4];
|
|
+ int ret;
|
|
+ int loop = 0;
|
|
+
|
|
+retry:
|
|
+ if (block >= bmtd.total_blks)
|
|
+ return 0;
|
|
+
|
|
+ ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
|
|
+ fdm, sizeof(fdm));
|
|
+ /* Read the 1st byte of FDM to judge whether it's a bad
|
|
+ * or not
|
|
+ */
|
|
+ if (ret || fdm[0] != 0xff) {
|
|
+ pr_info("nand: found bad block 0x%x\n", block);
|
|
+ if (loop >= bmtd.bb_max) {
|
|
+ pr_info("nand: FATAL ERR: too many bad blocks!!\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ loop++;
|
|
+ block++;
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ return block;
|
|
+}
|
|
+
|
|
+/* Find out all bad blocks, and fill in the mapping table */
|
|
+static int scan_bad_blocks(struct bbbt *bbt)
|
|
+{
|
|
+ int i;
|
|
+ u16 block = 0;
|
|
+
|
|
+ /* First time download, the block0 MUST NOT be a bad block,
|
|
+ * this is guaranteed by vendor
|
|
+ */
|
|
+ bbt->bb_tbl[0] = 0;
|
|
+
|
|
+ /*
|
|
+ * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
|
|
+ * G - Good block; B - Bad block
|
|
+ * ---------------------------
|
|
+ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
|
|
+ * ---------------------------
|
|
+ * What bb_tbl[i] looks like:
|
|
+ * physical block(i):
|
|
+ * 0 1 2 3 4 5 6 7 8 9 a b c
|
|
+ * mapped block(bb_tbl[i]):
|
|
+ * 0 1 3 6 7 8 9 b ......
|
|
+ * ATTENTION:
|
|
+ * If new bad block ocurred(n), search bmt_tbl to find
|
|
+ * a available block(x), and fill in the bb_tbl[n] = x;
|
|
+ */
|
|
+ for (i = 1; i < bmtd.pool_lba; i++) {
|
|
+ bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
|
|
+ BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
|
|
+ if (bbt->bb_tbl[i] == 0)
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ /* Physical Block start Address of BMT pool */
|
|
+ bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
|
|
+ if (bmtd.pool_pba >= bmtd.total_blks - 2) {
|
|
+ pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
|
|
+ i = 0;
|
|
+ block = bmtd.pool_pba;
|
|
+ /*
|
|
+ * The bmt table is used for runtime bad block mapping
|
|
+ * G - Good block; B - Bad block
|
|
+ * ---------------------------
|
|
+ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
|
|
+ * ---------------------------
|
|
+ * block: 0 1 2 3 4 5 6 7 8 9 a b c
|
|
+ * What bmt_tbl[i] looks like in initial state:
|
|
+ * i:
|
|
+ * 0 1 2 3 4 5 6 7
|
|
+ * bmt_tbl[i].block:
|
|
+ * 0 1 3 6 7 8 9 b
|
|
+ * bmt_tbl[i].mapped:
|
|
+ * N N N N N N N B
|
|
+ * N - Not mapped(Available)
|
|
+ * M - Mapped
|
|
+ * B - BMT
|
|
+ * ATTENTION:
|
|
+ * BMT always in the last valid block in pool
|
|
+ */
|
|
+ while ((block = find_valid_block(block)) != 0) {
|
|
+ bmt_tbl(bbt)[i].block = block;
|
|
+ bmt_tbl(bbt)[i].mapped = NO_MAPPED;
|
|
+ BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
|
|
+ block++;
|
|
+ i++;
|
|
+ }
|
|
+
|
|
+ /* i - How many available blocks in pool, which is the length of bmt_tbl[]
|
|
+ * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
|
|
+ */
|
|
+ bmtd.bmt_blk_idx = i - 1;
|
|
+ bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
|
|
+
|
|
+ if (i < 1) {
|
|
+ pr_info("nand: FATAL ERR: no space to store BMT!!\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ pr_info("[BBT] %d available blocks in BMT pool\n", i);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
|
|
+{
|
|
+ struct bbbt *bbt = (struct bbbt *)buf;
|
|
+ u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
|
|
+
|
|
+
|
|
+ if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
|
|
+ memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
|
|
+ if (bbt->version == BBMT_VERSION)
|
|
+ return true;
|
|
+ }
|
|
+ BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
|
|
+ sig[0], sig[1], sig[2],
|
|
+ fdm[1], fdm[2], fdm[3]);
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static u16 get_bmt_index(struct bbmt *bmt)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ while (bmt[i].block != BMT_TBL_DEF_VAL) {
|
|
+ if (bmt[i].mapped == BMT_MAPPED)
|
|
+ return i;
|
|
+ i++;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct bbbt *scan_bmt(u16 block)
|
|
+{
|
|
+ u8 fdm[4];
|
|
+
|
|
+ if (block < bmtd.pool_lba)
|
|
+ return NULL;
|
|
+
|
|
+ if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
|
|
+ return scan_bmt(block - 1);
|
|
+
|
|
+ if (is_valid_bmt(nand_bbt_buf, fdm)) {
|
|
+ bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
|
|
+ if (bmtd.bmt_blk_idx == 0) {
|
|
+ pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
|
|
+ return NULL;
|
|
+ }
|
|
+ pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
|
|
+ return (struct bbbt *)nand_bbt_buf;
|
|
+ } else
|
|
+ return scan_bmt(block - 1);
|
|
+}
|
|
+
|
|
+/* Write the Burner Bad Block Table to Nand Flash
|
|
+ * n - write BMT to bmt_tbl[n]
|
|
+ */
|
|
+static u16 upload_bmt(struct bbbt *bbt, int n)
|
|
+{
|
|
+ u16 block;
|
|
+
|
|
+retry:
|
|
+ if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
|
|
+ pr_info("nand: FATAL ERR: no space to store BMT!\n");
|
|
+ return (u16)-1;
|
|
+ }
|
|
+
|
|
+ block = bmt_tbl(bbt)[n].block;
|
|
+ BBT_LOG("n = 0x%x, block = 0x%x", n, block);
|
|
+ if (bbt_nand_erase(block)) {
|
|
+ bmt_tbl(bbt)[n].block = 0;
|
|
+ /* erase failed, try the previous block: bmt_tbl[n - 1].block */
|
|
+ n--;
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ /* The signature offset is fixed set to 0,
|
|
+ * oob signature offset is fixed set to 1
|
|
+ */
|
|
+ memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
|
|
+ bbt->version = BBMT_VERSION;
|
|
+
|
|
+ if (write_bmt(block, (unsigned char *)bbt)) {
|
|
+ bmt_tbl(bbt)[n].block = 0;
|
|
+
|
|
+ /* write failed, try the previous block in bmt_tbl[n - 1] */
|
|
+ n--;
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
|
|
+ return n;
|
|
+}
|
|
+
|
|
+static u16 find_valid_block_in_pool(struct bbbt *bbt)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (bmtd.bmt_blk_idx == 0)
|
|
+ goto error;
|
|
+
|
|
+ for (i = 0; i < bmtd.bmt_blk_idx; i++) {
|
|
+ if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
|
|
+ bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
|
|
+ return bmt_tbl(bbt)[i].block;
|
|
+ }
|
|
+ }
|
|
+
|
|
+error:
|
|
+ pr_info("nand: FATAL ERR: BMT pool is run out!\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* We met a bad block, mark it as bad and map it to a valid block in pool,
|
|
+ * if it's a write failure, we need to write the data to mapped block
|
|
+ */
|
|
+static bool update_bmt(u16 block)
|
|
+{
|
|
+ u16 mapped_blk;
|
|
+ struct bbbt *bbt;
|
|
+
|
|
+ bbt = bmtd.bbt;
|
|
+ mapped_blk = find_valid_block_in_pool(bbt);
|
|
+ if (mapped_blk == 0)
|
|
+ return false;
|
|
+
|
|
+ /* Map new bad block to available block in pool */
|
|
+ bbt->bb_tbl[block] = mapped_blk;
|
|
+ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+u16 get_mapping_block_index(int block)
|
|
+{
|
|
+ int mapping_block;
|
|
+
|
|
+ if (block < bmtd.pool_lba)
|
|
+ mapping_block = bmtd.bbt->bb_tbl[block];
|
|
+ else
|
|
+ mapping_block = block;
|
|
+ BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
|
|
+
|
|
+ return mapping_block;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_read(struct mtd_info *mtd, loff_t from,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct mtd_oob_ops cur_ops = *ops;
|
|
+ int retry_count = 0;
|
|
+ loff_t cur_from;
|
|
+ int ret;
|
|
+
|
|
+ ops->retlen = 0;
|
|
+ ops->oobretlen = 0;
|
|
+
|
|
+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
|
|
+ u32 offset = from & (bmtd.blk_size - 1);
|
|
+ u32 block = from >> bmtd.blk_shift;
|
|
+ u32 cur_block;
|
|
+
|
|
+ cur_block = get_mapping_block_index(block);
|
|
+ cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
|
|
+
|
|
+ cur_ops.oobretlen = 0;
|
|
+ cur_ops.retlen = 0;
|
|
+ cur_ops.len = min_t(u32, mtd->erasesize - offset,
|
|
+ ops->len - ops->retlen);
|
|
+ ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
|
|
+ if (ret < 0) {
|
|
+ update_bmt(block);
|
|
+ if (retry_count++ < 10)
|
|
+ continue;
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ops->retlen += cur_ops.retlen;
|
|
+ ops->oobretlen += cur_ops.oobretlen;
|
|
+
|
|
+ cur_ops.ooboffs = 0;
|
|
+ cur_ops.datbuf += cur_ops.retlen;
|
|
+ cur_ops.oobbuf += cur_ops.oobretlen;
|
|
+ cur_ops.ooblen -= cur_ops.oobretlen;
|
|
+
|
|
+ if (!cur_ops.len)
|
|
+ cur_ops.len = mtd->erasesize - offset;
|
|
+
|
|
+ from += cur_ops.len;
|
|
+ retry_count = 0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_write(struct mtd_info *mtd, loff_t to,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct mtd_oob_ops cur_ops = *ops;
|
|
+ int retry_count = 0;
|
|
+ loff_t cur_to;
|
|
+ int ret;
|
|
+
|
|
+ ops->retlen = 0;
|
|
+ ops->oobretlen = 0;
|
|
+
|
|
+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
|
|
+ u32 offset = to & (bmtd.blk_size - 1);
|
|
+ u32 block = to >> bmtd.blk_shift;
|
|
+ u32 cur_block;
|
|
+
|
|
+ cur_block = get_mapping_block_index(block);
|
|
+ cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
|
|
+
|
|
+ cur_ops.oobretlen = 0;
|
|
+ cur_ops.retlen = 0;
|
|
+ cur_ops.len = min_t(u32, bmtd.blk_size - offset,
|
|
+ ops->len - ops->retlen);
|
|
+ ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
|
|
+ if (ret < 0) {
|
|
+ update_bmt(block);
|
|
+ if (retry_count++ < 10)
|
|
+ continue;
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ops->retlen += cur_ops.retlen;
|
|
+ ops->oobretlen += cur_ops.oobretlen;
|
|
+
|
|
+ cur_ops.ooboffs = 0;
|
|
+ cur_ops.datbuf += cur_ops.retlen;
|
|
+ cur_ops.oobbuf += cur_ops.oobretlen;
|
|
+ cur_ops.ooblen -= cur_ops.oobretlen;
|
|
+
|
|
+ if (!cur_ops.len)
|
|
+ cur_ops.len = mtd->erasesize - offset;
|
|
+
|
|
+ to += cur_ops.len;
|
|
+ retry_count = 0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+static int
|
|
+mtk_bmt_erase(struct nand_device *nand, const struct nand_pos *pos)
|
|
+{
|
|
+ struct nand_pos new_pos = *pos;
|
|
+ int retry_count = 0;
|
|
+ int ret;
|
|
+
|
|
+retry:
|
|
+ new_pos.eraseblock = get_mapping_block_index(pos->eraseblock);
|
|
+
|
|
+ ret = bmtd.nand_ops->erase(nand, &new_pos);
|
|
+ if (ret) {
|
|
+ update_bmt(pos->eraseblock);
|
|
+ if (retry_count++ < 10)
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static bool
|
|
+mtk_bmt_isbad(struct nand_device *nand, const struct nand_pos *pos)
|
|
+{
|
|
+ struct nand_pos new_pos = *pos;
|
|
+ int retry_count = 0;
|
|
+ bool ret;
|
|
+
|
|
+retry:
|
|
+ new_pos.eraseblock = get_mapping_block_index(pos->eraseblock);
|
|
+
|
|
+ ret = bmtd.nand_ops->isbad(nand, &new_pos);
|
|
+ if (ret) {
|
|
+ update_bmt(pos->eraseblock);
|
|
+ if (retry_count++ < 10)
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_markbad(struct nand_device *nand, const struct nand_pos *pos)
|
|
+{
|
|
+ struct nand_pos new_pos = *pos;
|
|
+
|
|
+ new_pos.eraseblock = get_mapping_block_index(new_pos.eraseblock);
|
|
+ update_bmt(pos->eraseblock);
|
|
+
|
|
+ return bmtd.nand_ops->markbad(nand, &new_pos);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_bmt_replace_ops(struct mtd_info *mtd)
|
|
+{
|
|
+ static const struct nand_ops mtk_bmt_nand_ops = {
|
|
+ .erase = mtk_bmt_erase,
|
|
+ .isbad = mtk_bmt_isbad,
|
|
+ .markbad = mtk_bmt_markbad,
|
|
+ };
|
|
+ struct nand_device *nand = mtd_to_nanddev(mtd);
|
|
+
|
|
+ bmtd.nand_ops = nand->ops;
|
|
+ bmtd._read_oob = mtd->_read_oob;
|
|
+ bmtd._write_oob = mtd->_write_oob;
|
|
+
|
|
+ mtd->_read_oob = mtk_bmt_read;
|
|
+ mtd->_write_oob = mtk_bmt_write;
|
|
+ nand->ops = &mtk_bmt_nand_ops;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_mark_good(void *data, u64 val)
|
|
+{
|
|
+ u32 block = val >> bmtd.blk_shift;
|
|
+
|
|
+ bmtd.bbt->bb_tbl[block] = block;
|
|
+ bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_mark_bad(void *data, u64 val)
|
|
+{
|
|
+ u32 block = val >> bmtd.blk_shift;
|
|
+
|
|
+ update_bmt(block);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
|
|
+DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
|
|
+
|
|
+static void
|
|
+mtk_bmt_add_debugfs(void)
|
|
+{
|
|
+ struct dentry *dir;
|
|
+
|
|
+ dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
|
|
+ if (!dir)
|
|
+ return;
|
|
+
|
|
+ debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
|
|
+ debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
|
|
+}
|
|
+
|
|
+void mtk_bmt_detach(struct mtd_info *mtd)
|
|
+{
|
|
+ struct nand_device *nand = mtd_to_nanddev(mtd);
|
|
+
|
|
+ if (bmtd.mtd != mtd)
|
|
+ return;
|
|
+
|
|
+ if (bmtd.debugfs_dir)
|
|
+ debugfs_remove_recursive(bmtd.debugfs_dir);
|
|
+ bmtd.debugfs_dir = NULL;
|
|
+
|
|
+ kfree(nand_bbt_buf);
|
|
+ kfree(nand_data_buf);
|
|
+
|
|
+ mtd->_read_oob = bmtd._read_oob;
|
|
+ mtd->_write_oob = bmtd._write_oob;
|
|
+ mtd->size = bmtd.total_blks << bmtd.blk_shift;
|
|
+ nand->ops = bmtd.nand_ops;
|
|
+
|
|
+ memset(&bmtd, 0, sizeof(bmtd));
|
|
+}
|
|
+
|
|
+/* total_blocks - The total count of blocks that the Nand Chip has */
|
|
+int mtk_bmt_attach(struct mtd_info *mtd)
|
|
+{
|
|
+ struct device_node *np;
|
|
+ struct bbbt *bbt;
|
|
+ u32 bufsz;
|
|
+ u32 block;
|
|
+ u16 total_blocks, pmt_block;
|
|
+ int ret = 0;
|
|
+ u32 bmt_pool_size, bmt_table_size;
|
|
+
|
|
+ if (bmtd.mtd)
|
|
+ return -ENOSPC;
|
|
+
|
|
+ np = mtd_get_of_node(mtd);
|
|
+ if (!np)
|
|
+ return 0;
|
|
+
|
|
+ if (!of_property_read_bool(np, "mediatek,bmt-v2"))
|
|
+ return 0;
|
|
+
|
|
+ if (of_property_read_u32(np, "mediatek,bmt-pool-size",
|
|
+ &bmt_pool_size) != 0)
|
|
+ bmt_pool_size = 80;
|
|
+
|
|
+ if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
|
|
+ &bmtd.oob_offset) != 0)
|
|
+ bmtd.oob_offset = 8;
|
|
+
|
|
+ if (of_property_read_u32(np, "mediatek,bmt-table-size",
|
|
+ &bmt_table_size) != 0)
|
|
+ bmt_table_size = 0x2000U;
|
|
+
|
|
+ bmtd.mtd = mtd;
|
|
+ mtk_bmt_replace_ops(mtd);
|
|
+
|
|
+ bmtd.table_size = bmt_table_size;
|
|
+ bmtd.blk_size = mtd->erasesize;
|
|
+ bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
|
|
+ bmtd.pg_size = mtd->writesize;
|
|
+ bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
|
|
+ total_blocks = mtd->size >> bmtd.blk_shift;
|
|
+ pmt_block = total_blocks - bmt_pool_size - 2;
|
|
+
|
|
+ mtd->size = pmt_block << bmtd.blk_shift;
|
|
+
|
|
+ /*
|
|
+ * ---------------------------------------
|
|
+ * | PMT(2blks) | BMT POOL(totalblks * 2%) |
|
|
+ * ---------------------------------------
|
|
+ * ^ ^
|
|
+ * | |
|
|
+ * pmt_block pmt_block + 2blocks(pool_lba)
|
|
+ *
|
|
+ * ATTETION!!!!!!
|
|
+ * The blocks ahead of the boundary block are stored in bb_tbl
|
|
+ * and blocks behind are stored in bmt_tbl
|
|
+ */
|
|
+
|
|
+ bmtd.pool_lba = (u16)(pmt_block + 2);
|
|
+ bmtd.total_blks = total_blocks;
|
|
+ bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
|
|
+
|
|
+ /* 3 buffers we need */
|
|
+ bufsz = round_up(sizeof(struct bbbt) +
|
|
+ bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
|
|
+ bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
|
|
+
|
|
+ nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
|
|
+ nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
|
|
+
|
|
+ if (!nand_bbt_buf || !nand_data_buf) {
|
|
+ pr_info("nand: FATAL ERR: allocate buffer failed!\n");
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ memset(nand_bbt_buf, 0xff, bufsz);
|
|
+ memset(nand_data_buf, 0xff, bmtd.pg_size);
|
|
+
|
|
+ BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
|
|
+ nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
|
|
+ BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
|
|
+ bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
|
|
+
|
|
+ /* Scanning start from the first page of the last block
|
|
+ * of whole flash
|
|
+ */
|
|
+ bbt = scan_bmt(bmtd.total_blks - 1);
|
|
+ if (!bbt) {
|
|
+ /* BMT not found */
|
|
+ if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
|
|
+ pr_info("nand: FATAL: Too many blocks, can not support!\n");
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ bbt = (struct bbbt *)nand_bbt_buf;
|
|
+ memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
|
|
+
|
|
+ if (scan_bad_blocks(bbt)) {
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ /* BMT always in the last valid block in pool */
|
|
+ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
|
|
+ block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
|
|
+ pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
|
|
+
|
|
+ if (bmtd.bmt_blk_idx == 0)
|
|
+ pr_info("nand: Warning: no available block in BMT pool!\n");
|
|
+ else if (bmtd.bmt_blk_idx == (u16)-1) {
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+ }
|
|
+ mtk_bmt_add_debugfs();
|
|
+
|
|
+ bmtd.bbt = bbt;
|
|
+ return 0;
|
|
+
|
|
+error:
|
|
+ mtk_bmt_detach(mtd);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
|
|
+MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
|
|
+
|
|
--- a/drivers/mtd/nand/spi/core.c
|
|
+++ b/drivers/mtd/nand/spi/core.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <linux/string.h>
|
|
#include <linux/spi/spi.h>
|
|
#include <linux/spi/spi-mem.h>
|
|
+#include <linux/mtd/mtk_bmt.h>
|
|
|
|
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
|
|
{
|
|
@@ -1140,6 +1141,8 @@ static int spinand_probe(struct spi_mem
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ mtk_bmt_attach(mtd);
|
|
+
|
|
ret = mtd_device_register(mtd, NULL, 0);
|
|
if (ret)
|
|
goto err_spinand_cleanup;
|
|
@@ -1165,6 +1168,7 @@ static int spinand_remove(struct spi_mem
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ mtk_bmt_detach(mtd);
|
|
spinand_cleanup(spinand);
|
|
|
|
return 0;
|
|
--- /dev/null
|
|
+++ b/include/linux/mtd/mtk_bmt.h
|
|
@@ -0,0 +1,18 @@
|
|
+#ifndef __MTK_BMT_H
|
|
+#define __MTK_BMT_H
|
|
+
|
|
+#ifdef CONFIG_MTD_NAND_MTK_BMT
|
|
+int mtk_bmt_attach(struct mtd_info *mtd);
|
|
+void mtk_bmt_detach(struct mtd_info *mtd);
|
|
+#else
|
|
+static inline int mtk_bmt_attach(struct mtd_info *mtd)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline void mtk_bmt_detach(struct mtd_info *mtd)
|
|
+{
|
|
+}
|
|
+#endif
|
|
+
|
|
+#endif
|