[PATCH 9/9] driver:mtd:ubi:add bakvol module in UBI layer

From: Bean Huo 霍斌斌 (beanhuo)
Date: Mon Sep 28 2015 - 03:05:40 EST


Add bakvol module in UBI layer.
This patch is based on NAND dual plane program.
Currently, because different NAND vender with different paired page sequence,
so this patch now only support Micron 70s/80s/90s MLC NAND.


Signed-off-by: Bean Huo <beanhuo@xxxxxxxxxx>
---
drivers/mtd/ubi/Kconfig | 15 +
drivers/mtd/ubi/Makefile | 2 +
drivers/mtd/ubi/bakvol.c | 1106 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 1123 insertions(+)
create mode 100644 drivers/mtd/ubi/bakvol.c

diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index f0855ce..d9f01fa 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -77,6 +77,21 @@ config MTD_UBI_FASTMAP

If in doubt, say "N".

+config MTD_UBI_MLC_NAND_BAKVOL
+ bool "UBI solution for NAND pair page (Experimental feature)"
+ default n
+ help
+ This used for MLC NAND paired page power-cut protection.
+
+ MLC NAND paired page power loss is a known issue so far.
+ Namely, MLC NAND pages are coupled in a sense that if you cut
+ power while writing to a page, you corrupt not only this page,
+ but also one of the previous pages which is paired with the current
+ one. More detail information,please refer to follwoing link:
+ http://www.linux-mtd.infradead.org/doc/ubifs.html
+
+ If in doubt, say "N".
+
config MTD_UBI_GLUEBI
tristate "MTD devices emulation driver (gluebi)"
help
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 4e3c3d7..f8cf01c 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -5,4 +5,6 @@ ubi-y += misc.o debug.o
ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o

+ubi-$(CONFIG_MTD_UBI_MLC_NAND_BAKVOL) += bakvol.o
+
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/bakvol.c b/drivers/mtd/ubi/bakvol.c
new file mode 100644
index 0000000..c2c99fd
--- /dev/null
+++ b/drivers/mtd/ubi/bakvol.c
@@ -0,0 +1,1106 @@
+/* This version ported to the Linux-UBI system by Micron
+ *
+ * Based on: Micorn APPARATUSES AND METHODS FOR MEMORY MANAGEMENT
+ *
+ */
+
+/*===========================================
+ A UBI solution for MLC NAND powerloss
+
+ This driver implements backup lower page to a log volume.
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/.But you can use this
+ file, no matter in compliance with the License or not.
+
+ The initial developer of the original code is Beanhuo
+ <beanhuo@xxxxxxxxxx>. Portions created by Micorn.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+===========================================*/
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <asm/div64.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include "ubi.h"
+
+u8 corruption;
+struct mtd_oob_ops *oob_ops_bak, *oob_ops_src;
+struct mtd_oob_ops *oob_ops; /* Global read/write point */
+static u8 BK_free = UBI_BACKUP_VOLUME_EBS;
+
+struct recover_data_info {
+ uint8_t perch; /* 1 used,0 free */
+ loff_t original_data_addr; /* Orginal corrupted page address */
+ u32 original_peb;
+ u32 original_page;
+ u32 bk_pb; /* Backup block phisical num */
+ uint8_t *datbuf; /* Contains corruption page data */
+ uint8_t *oobbuf;
+};
+
+struct recover_data_info BK[UBI_BACKUP_VOLUME_EBS]; /* Backup date buffer */
+
+/* Temporary variables used during scanning */
+struct ubi_ec_hdr *ech;
+struct ubi_vid_hdr *vidh;
+
+/* Following defined Micronn NAND version */
+#define MICRON_NAND_VERSION_L7X_CONFIG 1
+#define MICRON_NAND_VERSION_L8X_CONFIG 0
+#define MICRON_NAND_VERSION_L9X_CONFIG 0
+#undef DEBUG
+
+/**
+ * if phy_paeg_num is lower page, return 1
+ * if phy_page_num is upper page, return 0
+ * if phy_page_num is SLC page,return -1
+ */
+static int is_lowerpage(int phy_page_num)
+{
+
+ int ret;
+#if MICRON_NAND_VERSION_L8X_CONFIG
+ /* used for Micron L8x parallel nand */
+ switch (phy_page_num) {
+ case 2:
+ case 3:
+ case 248:
+ case 249:
+ case 252:
+ case 253:
+ case 254:
+ case 255:
+ ret = -1;/* This page belongs to SLC page. */
+ break;
+ default:
+ if (phy_page_num % 4 < 2) {
+ /* If remainder is 2,3 ,this page belongs to lower page,
+ * 0,1 is upper page */
+ ret = 1; /* Lower page */
+ } else {
+
+ ret = 0;
+
+ }
+
+ break;
+ }
+#elif MICRON_NAND_VERSION_L7X_CONFIG
+ /* Used for Micron L7X serial parallel NAND */
+ switch (phy_page_num) {
+ case 0:
+ case 1:
+ ret = 1;
+ break;
+ case 4:
+ case 5:
+ case 254:
+ case 255: /* 4,5,254, 255 is upper page */
+ ret = 0;
+ break;
+ default:
+ if (phy_page_num % 4 > 1)
+ /* 2 ,3 is lower page, 0,1 is upper pages */
+ ret = 1; /* Lower page */
+ else
+ ret = 0;
+ break;
+ }
+#endif
+ return ret;
+}
+
+static int get_next_lowerpage(int page)
+{
+ int ret;
+
+#if MICRON_NAND_VERSION_L8X_CONFIG
+ if (page == 254)
+ ret = 255;
+ if (page >= 255)
+ return -1;
+ /* Used for L8x Micron NAND */
+ if (page % 4 < 2) {
+ if (page % 2 == 0)
+ ret = page + 1;
+ else {
+ if ((page == 1) || (page == 3))
+ ret = page + 1;
+ else
+ ret = page + 3;
+ }
+
+ } else {
+ if (page % 2 == 0)
+ ret = page + 2;
+ else
+ ret = page + 1;
+ }
+
+#elif MICRON_NAND_VERSION_L7X_CONFIG
+
+ /* Used for Micron L7X NAND*/
+ switch (page) {
+ case 0:
+ case 1:
+ case 2:
+ ret = page + 1;
+ break;
+ case 4:
+ case 5:
+ ret = page + 2;
+ break;
+ case 254:
+ case 255: /* 254, 255 is upper page */
+ ret = -1;
+ break;
+ default:
+ if (page % 4 > 1) { /* 2,3 is low page, 0,1 is highpage */
+ if (page % 2 == 0)
+ ret = page + 1;
+ else
+ ret = page + 3;
+ } else {
+ if (page % 2 == 0)
+ ret = page + 2;
+ else
+ ret = page + 1;
+
+ }
+ break;
+ }
+#endif
+ return ret;
+}
+
+static int get_oppo_plane_seq(struct ubi_device *ubi, loff_t addr)
+{
+ int block_num;
+
+ block_num = (int)(addr >> ubi->mtd->erasesize_shift);
+
+ if (block_num % 2)
+ return 0;
+ else
+ return 1;
+}
+
+static int check_original_data(struct ubi_device *ubi, loff_t bak_addr,
+ struct mtd_oob_ops *oob_ops)
+{
+ loff_t addr = *(loff_t *)(oob_ops->oobbuf);
+ void *buf_back = oob_ops->datbuf;
+ int ret = -1, i;
+
+ struct mtd_oob_ops *temp_oob_ops = kmalloc(sizeof(struct mtd_oob_ops),
+ GFP_KERNEL);
+
+ if (!oob_ops) {
+ ubi_err(ubi, "[%d]Error,kmalloc error!\n", __LINE__);
+ goto out0;
+ }
+
+ temp_oob_ops->datbuf = kmalloc(ubi->min_io_size, GFP_KERNEL);
+ if (!oob_ops->datbuf) {
+ ubi_err(ubi, "[%d]Error,kmalloc error!\n", __LINE__);
+ goto out1;
+ }
+
+ temp_oob_ops->oobbuf = kmalloc(ubi->mtd->oobsize, GFP_KERNEL);
+ if (!oob_ops->oobbuf) {
+ ubi_err(ubi, "[%d]Error,kmalloc error!\n", __LINE__);
+ goto out2;
+ }
+
+ temp_oob_ops->mode = MTD_OPS_AUTO_OOB;
+ temp_oob_ops->ooblen = sizeof(loff_t);
+ temp_oob_ops->len = ubi->min_io_size;
+ temp_oob_ops->ooboffs = 0;
+ temp_oob_ops->retlen = 0;
+ temp_oob_ops->oobretlen = 0;
+
+ dbg_gen("%s:Source page addr = 0x%llx\n", __func__, addr);
+
+ ret = ubi->mtd->_read_oob(ubi->mtd, addr, temp_oob_ops);
+
+ if (ret < 0) /* Refresh this block */
+ goto out3;
+
+ if (*(loff_t *)(temp_oob_ops->oobbuf) != bak_addr) {
+ ubi_err(ubi, "[%d] source block has been erased.\n", __LINE__);
+ ret = 0;
+ goto out3;
+ }
+
+ for (i = 0; i < ubi->min_io_size; i += 4) {
+ if (*(u32 *)temp_oob_ops->datbuf != *(u32 *)buf_back) {
+ ret = 1;
+ goto out3;
+ }
+ }
+ ret = 0;
+ dbg_gen("Backup data is all the same with original data\n");
+
+out3:
+ kfree(temp_oob_ops->oobbuf);
+
+out2:
+ kfree(temp_oob_ops->datbuf);
+
+out1:
+ kfree(temp_oob_ops);
+out0:
+ return ret;
+
+}
+
+static int find_last_unempty_page(struct ubi_device *ubi, int pnum,
+ struct mtd_oob_ops *oob_ops)
+{
+ struct mtd_info *mtd_back = ubi->mtd;
+ int page, start_page, err;
+
+ oob_ops->mode = MTD_OPS_AUTO_OOB;
+ oob_ops->ooblen = sizeof(loff_t);
+ oob_ops->len = ubi->min_io_size;
+ oob_ops->ooboffs = 0;
+ oob_ops->retlen = 0;
+ oob_ops->oobretlen = 0;
+
+ page = (mtd_back->erasesize - 1) >> mtd_back->writesize_shift;
+ start_page = ubi->leb_start >> mtd_back->writesize_shift;
+ for (; page >= start_page; page--) {
+ dbg_gen("[%s][%d]check page [%d]\n", __func__, __LINE__, page);
+ err = ubi->mtd->_read_oob(ubi->mtd,
+ (pnum << mtd_back->erasesize_shift) |
+ (page << mtd_back->writesize_shift), oob_ops);
+ if (err < 0 && err != -EUCLEAN)
+ return -1;
+
+ if (!ubi_check_pattern(oob_ops->oobbuf, 0xFF,
+ oob_ops->ooblen))
+ return page;
+ }
+ return 0;
+}
+
+static int get_free_perch(void)
+{
+ int i;
+
+ if (BK_free == 0)
+ dbg_gen("[%s]:No more BK for save backup info\n", __func__);
+ else {
+ for (i = 0; i < UBI_BACKUP_VOLUME_EBS; i++)
+ if (BK[i].perch == 0) {
+ BK_free -= 1;
+ return i;
+ }
+ }
+
+ return 0xFF;
+}
+
+static int put_unused_perch(int count)
+{
+ BK[count].perch = 0;
+ BK_free++;
+ kfree(BK[count].datbuf);
+ kfree(BK[count].oobbuf);
+ return 0;
+}
+
+static struct ubi_bkblk_info *get_block_from_bakvol(struct ubi_device *ubi,
+ char plane, int phy_page_num)
+{
+ struct ubi_bkblk_tbl *backup_info = ubi->bkblk_tbl;
+ struct list_head *head = &backup_info->head;
+ struct ubi_bkblk_info *bbi, *tempbbi = NULL;
+
+ if (list_empty(head)) {
+ dbg_gen("%s:Backup info list is empty.\n", __func__);
+ return NULL;
+ }
+
+ list_for_each_entry(bbi, head, node) {
+ if (bbi->plane == plane) {
+ if (bbi->pgnum == phy_page_num)/* Perfecct match */
+ return bbi;
+ else if (bbi->pgnum < phy_page_num) {
+ /* Make sure this page not be programmed */
+ if (tempbbi == NULL)
+ tempbbi = bbi;
+ else {
+ if (tempbbi->pgnum > bbi->pgnum)
+ tempbbi = bbi;
+ }
+ }
+ }
+ }
+
+ return tempbbi;
+
+}
+
+static struct
+ubi_bkblk_info *find_min_space_block(struct ubi_bkblk_tbl *backup_info)
+{
+ struct list_head *head = &backup_info->head;
+ struct ubi_bkblk_info *bbi, *retbbi;
+
+ retbbi = list_first_entry(head, struct ubi_bkblk_info, node);
+
+ list_for_each_entry(bbi, head, node) {
+ if (bbi->pgnum > retbbi->pgnum)
+ retbbi = bbi;
+ }
+
+ return retbbi;
+}
+
+static struct
+ubi_bkblk_info *allo_new_block_for_bakvol(struct ubi_device *ubi, u8 plane)
+{
+
+ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_volume *vol;
+ struct ubi_bkblk_tbl *backup_info = ubi->bkblk_tbl;
+ struct ubi_bkblk_info *bbin, *newbbin;
+ int bc_plane0, bc_plane1;
+ int pnum;
+ int err;
+ int tries = 0;
+
+ vol = ubi->volumes[vol_id2idx(ubi, UBI_BACKUP_VOLUME_ID)];
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return NULL;
+
+ newbbin = kmalloc(sizeof(*newbbin), GFP_ATOMIC);
+ if (!newbbin) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(UBI_BACKUP_VOLUME_ID);
+ vid_hdr->compat = UBI_BACKUP_VOLUME_COMPAT;
+ vid_hdr->data_pad = cpu_to_be32(0);
+ vid_hdr->data_size = vid_hdr->used_ebs = vid_hdr->data_pad;
+
+retry:
+ pnum = ubi_wl_get_plane_peb(ubi, plane);
+
+ if (pnum < 0) {
+ err = pnum;
+ goto free_mem;
+ }
+
+ bc_plane0 = backup_info->bcount_of_plane[0];
+ bc_plane1 = backup_info->bcount_of_plane[1];
+
+ if (bc_plane0 + bc_plane1 >= vol->reserved_pebs) {
+ bbin = find_min_space_block(backup_info);
+ dbg_gen("%s:Log volume overfloat,move min space block.\n",
+ __func__);
+ if (vol->eba_tbl[bbin->lbn] >= 0) {
+ dbg_gen("%s:put this block.\n", __func__);
+ err = ubi_wl_put_peb(ubi, vol->vol_id, bbin->lbn,
+ bbin->pbn, 0);
+ if (err)
+ goto free_mem;
+ }
+
+ vol->eba_tbl[bbin->lbn] = pnum;
+ backup_info->bcount_of_plane[bbin->plane]--;
+ newbbin->lbn = bbin->lbn;
+
+ list_del(&bbin->node);
+
+ } else {
+ int i = 0;
+
+ for (i = 0; i < vol->reserved_pebs; i++)
+ if (vol->eba_tbl[i] == UBI_LEB_UNMAPPED) {
+ vol->eba_tbl[i] = pnum;
+ newbbin->lbn = i;
+ break;
+ }
+ }
+
+ vid_hdr->lnum = cpu_to_be32(newbbin->lbn);
+ if ((newbbin->lbn > vol->reserved_pebs) || (newbbin->lbn < 0)) {
+ ubi_err(ubi, "BUG: logic block number error.\n");
+ panic("BUG!");
+
+ }
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+
+ if (err) {
+ ubi_err(ubi, "Failed to write VID header to PEB %d.\n", pnum);
+ goto write_error;
+ }
+
+ newbbin->pbn = pnum;
+ newbbin->plane = plane;
+ newbbin->pgnum = get_next_lowerpage(1); /* Skip page0 and page1 */
+
+ /* Init backup variable */
+ backup_info->bcount_of_plane[plane]++;
+ list_add(&newbbin->node, &backup_info->head);
+ corruption = 0;
+
+out:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return newbbin;
+
+write_error:
+ /*
+ * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
+ * get another one.
+ */
+ ubi_err(ubi, "Failed to write to PEB %d", pnum);
+ ubi_wl_put_peb(ubi, vol->vol_id, newbbin->lbn, newbbin->pbn, 1);
+ if (++tries > UBI_IO_RETRIES)
+ goto free_mem;
+
+ ubi_err(ubi, "Try again");
+ goto retry;
+
+free_mem:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ kfree(newbbin);
+ ubi_err(ubi, "%d failed to alloc a new block!\n", __LINE__);
+ return NULL;
+
+}
+
+int search_backup_page(int peb, int page)
+{
+ int i;
+
+ for (i = 0; i < UBI_BACKUP_VOLUME_EBS; i++) {
+
+ if (BK[i].perch == 1) {
+ if (BK[i].original_peb == peb &&
+ BK[i].original_page == page)
+ return i;
+ }
+
+ }
+ return -1;
+
+}
+
+#ifdef CONFIG_MTD_UBI_MLC_NAND_BAKVOL
+/**
+ * return 0 ,meas this page data shouldn't backup
+ * return 1, meas this page data should backup
+ */
+int is_backup_need(struct ubi_device *ubi, loff_t addr)
+{
+ int phy_page_num, ret;
+
+ phy_page_num = (addr & ubi->mtd->erasesize_mask) >>
+ ubi->mtd->writesize_shift;
+ if (phy_page_num == 0 || phy_page_num == 1)
+ /* Currently,we don't backup EC head and VID head */
+ return 0;
+
+ ret = is_lowerpage(phy_page_num);
+
+ if (ret == -1)
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * check_backup_volume - check if backup volume has been built
+ * @ubi: UBI device description object
+ *
+ * This function returns 1 in case of backup volume have been built
+ * 0 in case of don't find
+ */
+int ubi_check_backup_volume(struct ubi_device *ubi)
+{
+
+ return (ubi->bkblk_tbl->volume_built == UBI_BAKVOL_RUN);
+}
+
+/**
+ * ubi_backup_data_to_backup_volume -
+ * backup data to the backup volume
+ * @ubi: UBI device description object
+ * @addr: address will be programmed
+ */
+int ubi_backup_data_to_backup_volume(struct ubi_device *ubi, loff_t addr,
+ size_t len, size_t *retlen, const void *buf)
+{
+ struct ubi_bkblk_tbl *backup_info = ubi->bkblk_tbl;
+ struct mtd_info *mtd_back = ubi->mtd;
+ int err = 0, nobak = 0;
+ int pnum;
+ u8 oppe_plane;
+ loff_t lpage_addr = 0x55555555; /* Lower page byte address */
+ struct ubi_bkblk_info *pbk;
+ int phy_page_num;
+
+ if (len > ubi->min_io_size) {
+ ubi_err(ubi, "%d: Write data len overflow [%d]\n",
+ __LINE__, len);
+ return -EROFS;
+ }
+
+ if (!buf) {
+ ubi_err(ubi, "%d: Write buf is NULL!\n", __LINE__);
+ return -EROFS;
+
+ }
+
+ oppe_plane = get_oppo_plane_seq(ubi, addr);
+ phy_page_num = (int)(addr & ubi->mtd->erasesize_mask) >>
+ ubi->mtd->writesize_shift;
+ pnum = (int)(addr >> ubi->mtd->erasesize_shift);
+
+ if (backup_info->bcount_of_plane[oppe_plane] == 0)
+ pbk = NULL;
+ else
+ pbk = get_block_from_bakvol(ubi, oppe_plane, phy_page_num);
+
+ if (pbk == NULL) {
+ /* Free the previous block and alloc a new block */
+ pbk = allo_new_block_for_bakvol(ubi, oppe_plane);
+
+ if (!pbk) {
+ ubi_err(ubi, "Allocate new block failed,only program source data.\n");
+ nobak = 1;
+ goto Only_source;
+ }
+ }
+
+ lpage_addr = (pbk->pbn << mtd_back->erasesize_shift) |
+ (phy_page_num << mtd_back->writesize_shift);
+
+ oob_ops_bak->datbuf = (uint8_t *)buf;
+ oob_ops_bak->mode = MTD_OPS_AUTO_OOB;
+ oob_ops_bak->ooblen = sizeof(loff_t);
+ *(loff_t *)(oob_ops_bak->oobbuf) = addr; /* Original page addr */
+ oob_ops_bak->len = len;
+ oob_ops_bak->ooboffs = 0;
+ oob_ops_bak->retlen = 0;
+ oob_ops_bak->oobretlen = 0;
+
+Only_source:
+ oob_ops_src->datbuf = (uint8_t *)buf;
+ oob_ops_src->mode = MTD_OPS_AUTO_OOB;
+ oob_ops_src->ooblen = sizeof(loff_t);
+ *(loff_t *)(oob_ops_src->oobbuf) = lpage_addr; /* backup page addr */
+ oob_ops_src->len = len;
+ oob_ops_src->ooboffs = 0;
+ oob_ops_src->retlen = 0;
+ oob_ops_src->oobretlen = 0;
+
+ if ((!oob_ops_bak) || (!oob_ops_src) ||
+ (!oob_ops_src->datbuf) || (!oob_ops_bak->datbuf))
+ ubi_err(ubi, "There is a NULL point.\n");
+
+ if (!nobak)
+ err = mtd_write_dual_plane_oob(ubi->mtd, lpage_addr,
+ oob_ops_bak, addr, oob_ops_src);
+
+ if ((err == -EOPNOTSUPP) || (nobak == 1)) {
+ ubi_err(ubi, "Warning:Dual plane program failed!There is an error...\n");
+ err = mtd_write_oob(ubi->mtd, addr, oob_ops_src);
+ goto out_unlock;
+ }
+
+ pbk->pgnum = get_next_lowerpage(phy_page_num);
+
+ /* FIXME, need to add bad block management code. */
+
+out_unlock:
+ if (err)
+ *retlen = 0;
+ else
+ *retlen = len;
+
+ return err;
+}
+
+
+int ubi_backup_volume_init(struct ubi_device *ubi)
+{
+
+ if ((ubi->mtd->type != MTD_MLCNANDFLASH) || (ubi->mtd->oobavail < 4)) {
+ ubi->bkblk_tbl->volume_built = UBI_BAKVOL_UNONE;
+ return 0;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out_init;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ ubi->bkblk_tbl = kzalloc(sizeof(struct ubi_bkblk_tbl), GFP_KERNEL);
+ if (!ubi->bkblk_tbl)
+ goto out_vidh;
+
+ ubi->bkblk_tbl->bcount_of_plane[0] = 0;
+ ubi->bkblk_tbl->bcount_of_plane[1] = 0;
+ INIT_LIST_HEAD(&ubi->bkblk_tbl->head);
+
+ corruption = 0;
+
+ oob_ops_bak = kzalloc(sizeof(struct mtd_oob_ops), GFP_KERNEL);
+ if (!oob_ops_bak)
+ goto out_backup_info;
+
+ oob_ops_bak->oobbuf = kzalloc(ubi->mtd->oobsize, GFP_KERNEL);
+ if (!oob_ops_bak->oobbuf)
+ goto out_back_oob_ops;
+
+ oob_ops_src = kzalloc(sizeof(struct mtd_oob_ops), GFP_KERNEL);
+ if (!oob_ops_src)
+ goto out_obs_src;
+
+ oob_ops_src->oobbuf = kzalloc(ubi->mtd->oobsize, GFP_KERNEL);
+ if (!oob_ops_src->oobbuf)
+ goto out_oob_ops;
+
+ oob_ops = kmalloc(sizeof(struct mtd_oob_ops), GFP_KERNEL);
+ if (!oob_ops)
+ goto out0;
+
+ oob_ops->datbuf = kmalloc(ubi->min_io_size, GFP_KERNEL);
+ if (!oob_ops->datbuf)
+ goto out1;
+
+ oob_ops->oobbuf = kmalloc(ubi->mtd->oobsize, GFP_KERNEL);
+ if (!oob_ops->oobbuf)
+ goto out2;
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ ubi->bkblk_tbl->volume_built = UBI_BAKVOL_INIT_INFO;
+ BK_free = UBI_BACKUP_VOLUME_EBS;
+ memset(BK, 0, sizeof(BK));
+ return 0;
+
+out2:
+ kfree(oob_ops->datbuf);
+out1:
+ kfree(oob_ops);
+out0:
+ kfree(oob_ops_src->oobbuf);
+out_oob_ops:
+ kfree(oob_ops_src);
+out_obs_src:
+ kfree(oob_ops_bak->oobbuf);
+out_back_oob_ops:
+ kfree(oob_ops_bak);
+out_backup_info:
+ kfree(ubi->bkblk_tbl);
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out_init:
+ ubi_err(ubi, "Kmalloc error!\n");
+ return -1;
+}
+
+/**
+ * ubi_backup_volume_scan - scan whole flash to find backup volume
+ *
+ * This function returns 1 in case of found backup volume
+ * -1 in case of allocate resource error
+ * 0 : scan complete
+ */
+int ubi_backup_volume_scan(struct ubi_device *ubi, struct ubi_vid_hdr *vidh,
+ int pnum)
+{
+ int page;
+ int vol_id;
+ uint8_t count;
+ struct ubi_bkblk_info *bbi;
+ loff_t bakpage_addr;
+ int lnum;
+ int ret;
+
+ /*
+ * Scan NAND flash to find the backup volume
+ */
+ vol_id = be32_to_cpu(vidh->vol_id);
+ if (vol_id == UBI_BACKUP_VOLUME_ID) {
+ /* Found backup volume */
+ lnum = be32_to_cpu(vidh->lnum);
+ dbg_gen("%s:Found backup volume,pnum is [%d],lnum is[%d]\n",
+ __func__, pnum, lnum);
+
+ /* Unsupported internal volume */
+ if (vidh->compat != UBI_COMPAT_REJECT) {
+ ubi_err(ubi, "Error,compat != UBI_COMPAT_REJECT\n");
+ return -1;
+ }
+ } else {
+ return 1;
+ }
+ /* find a backup block */
+ page = find_last_unempty_page(ubi, pnum, oob_ops);
+
+ if (page > 0) {
+ bakpage_addr = (pnum << ubi->mtd->erasesize_shift) |
+ (page << ubi->mtd->writesize_shift);
+ ret = check_original_data(ubi, bakpage_addr, oob_ops);
+ if (ret == 1) {
+ /* the original data isn't correct anymore,
+ * have been damaged.*/
+ corruption = 1;
+ count = get_free_perch();
+ if (count == 0xff) {
+ ubi_err(ubi, "Warning No free perch.\n");
+ return -1;
+ }
+
+ BK[count].datbuf = kmalloc(ubi->min_io_size,
+ GFP_KERNEL);
+
+ if (!BK[count].datbuf) {
+ ubi_err(ubi, "Kmallock error!\n");
+ goto out;
+ }
+ BK[count].original_data_addr =
+ *(loff_t *)(oob_ops->oobbuf);
+ BK[count].bk_pb = pnum;
+ BK[count].perch = 1;
+ memcpy(BK[count].datbuf, oob_ops->datbuf,
+ ubi->min_io_size);
+ }
+
+ bbi = kmalloc(sizeof(*bbi), GFP_ATOMIC);
+ if (!bbi)
+ goto out;
+ bbi->pbn = pnum;
+ bbi->lbn = lnum;
+ bbi->pgnum = get_next_lowerpage(page);
+ bbi->plane = pnum % 2;
+ bbi->plane ? (ubi->bkblk_tbl->bcount_of_plane[1]++) :
+ (ubi->bkblk_tbl->bcount_of_plane[0]++);
+ list_add(&bbi->node, &ubi->bkblk_tbl->head);
+ } else if (page == 0) {/* This backup block has not been programmed. */
+
+ bbi = kmalloc(sizeof(*bbi), GFP_ATOMIC);
+ if (!bbi)
+ goto out;
+ bbi->pbn = pnum;
+ bbi->lbn = lnum;
+ bbi->pgnum = get_next_lowerpage(1);
+ bbi->plane = pnum % 2;
+ bbi->plane ? (ubi->bkblk_tbl->bcount_of_plane[1]++) :
+ (ubi->bkblk_tbl->bcount_of_plane[0]++);
+ list_add(&bbi->node, &ubi->bkblk_tbl->head);
+ } else {
+
+ ubi_err(ubi, "[%d]:Read error,this block will be remove from backup volume.\n",
+ __LINE__);
+
+ bbi = kmalloc(sizeof(*bbi), GFP_ATOMIC);
+ if (!bbi)
+ goto out;
+ bbi->pbn = pnum;
+ bbi->lbn = lnum;
+ bbi->pgnum = ubi->leb_start >> ubi->mtd->writesize_shift;
+ bbi->plane = pnum % 2;
+ bbi->plane ? (ubi->bkblk_tbl->bcount_of_plane[1]++) :
+ (ubi->bkblk_tbl->bcount_of_plane[0]++);
+ list_add(&bbi->node, &ubi->bkblk_tbl->head);
+ }
+
+ return 0;
+
+out:
+ return -1;
+
+}
+
+/**
+ * ubi_backup_volume_init - init backup volume.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * This function init the backup volume
+ * Returns zero in case of success and a negative
+ * error code in case of failure
+ */
+int ubi_backup_volume_init_tail(struct ubi_device *ubi,
+ struct ubi_attach_info *si)
+{
+ int reserved_pebs = 0;
+ struct ubi_volume *vol;
+ struct ubi_bkblk_info *bbi;
+
+ if (ubi->bkblk_tbl->volume_built == UBI_BAKVOL_UNONE)
+ return 0;
+
+ if (ubi->bkblk_tbl->volume_built != UBI_BAKVOL_INIT_INFO_DONE) {
+
+ /* And add the layout volume */
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = UBI_BACKUP_VOLUME_EBS;
+ vol->alignment = 1;
+ vol->vol_type = UBI_DYNAMIC_VOLUME;
+ vol->name_len = sizeof(UBI_BACKUP_VOLUME_NAME) - 1;
+ vol->data_pad = 0;
+ memcpy(vol->name, UBI_BACKUP_VOLUME_NAME, vol->name_len + 1);
+
+ vol->usable_leb_size = ubi->leb_size;
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->reserved_pebs;
+ vol->used_bytes =
+ (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
+ vol->vol_id = UBI_BACKUP_VOLUME_ID;
+ vol->ref_count = UBI_BACKUP_VOLUME_EBS;
+ ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
+ reserved_pebs += vol->reserved_pebs;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+ if (reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "No enough PEBs, required %d, available %d",
+ reserved_pebs, ubi->avail_pebs);
+ return -1;
+ }
+ ubi->rsvd_pebs += reserved_pebs;
+ ubi->avail_pebs -= reserved_pebs;
+
+ ubi->bkblk_tbl->volume_built = UBI_BAKVOL_INIT_INFO_DONE;
+
+ list_for_each_entry(bbi, &(ubi->bkblk_tbl->head), node) {
+ dbg_gen("bkblk_tbl:pbn %d,pgnum %d,plane %d,lbn %d.\n",
+ bbi->pbn, bbi->pgnum, bbi->plane, bbi->lbn);
+ }
+ }
+ return 0;
+}
+
+int ubi_bad_data_recovery(struct ubi_device *ubi)
+{
+ u32 base_offset, buff_offset, pnum, offset, correct, uncorrect, page;
+ int ret;
+ struct mtd_info *mtd_back = ubi->mtd;
+ struct ubi_volume *vol = ubi->volumes[vol_id2idx(ubi,
+ UBI_BACKUP_VOLUME_ID)];
+ unsigned char *buf = ubi->peb_buf;
+ struct ubi_volume_desc volume_desc;
+ int c, index;
+
+ if (!corruption) {
+ dbg_gen("[%s][%d] No corruptted block to be recovered!\n",
+ __func__, __LINE__);
+ return 0;
+ }
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ return -1;
+
+ correct = 0;
+ uncorrect = 0;
+ for (c = 0; c < vol->reserved_pebs; c++) {
+ if (BK[c].perch == 0)
+ continue;
+
+ pnum = BK[c].original_data_addr >> mtd_back->erasesize_shift;
+ offset = BK[c].original_data_addr & mtd_back->erasesize_mask;
+ buff_offset = 0;
+
+ dbg_gen("PEB:%d, corrupted offset 0x%x ", pnum, offset);
+
+ for (base_offset = ubi->leb_start; base_offset < ubi->peb_size;
+ base_offset += mtd_back->writesize) {
+ ret = ubi_io_read(ubi, buf + buff_offset, pnum,
+ base_offset, mtd_back->writesize);
+ if (ret == -EBADMSG || ret == -EIO) {
+ if (base_offset == offset) {
+ dbg_gen("Recovery.....\n");
+ memcpy(buf + buff_offset, BK[c].datbuf,
+ mtd_back->writesize);
+ put_unused_perch(c);
+ correct++;
+ } else {
+ page = base_offset >>
+ mtd_back->writesize_shift;
+ if (is_lowerpage(page)) {
+ /* More low-page also
+ * be corrupted */
+ index = search_backup_page(pnum,
+ page);
+ if (index >= 0) {
+ /**
+ * There is a copy of backup
+ * for this page */
+ memcpy(buf + buff_offset,
+ BK[index].datbuf,
+ mtd_back->writesize);
+ put_unused_perch(index);
+ correct++;
+ } else
+ /**
+ * Unbackup lower page also
+ * be corruptted */
+ uncorrect++;
+ } else
+ /**
+ * Unbackup upper page also
+ * be corruptted */
+ uncorrect++;
+ }
+
+ /* FIXME: How to handle if other pages
+ * occur unexpection error. */
+ }
+
+ ret = ubi_check_pattern(buf + buff_offset, 0xFF,
+ mtd_back->writesize);
+ if (ret)
+ /* This page is empty, so the last of pages
+ * also are empty, no need to read.*/
+ break;
+
+ buff_offset += mtd_back->writesize;
+ }
+
+ if ((correct == 0) || (uncorrect > 0)) {
+ dbg_gen("Can not be recoverred.\n");
+ uncorrect = 0;
+ continue;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+
+ volume_desc.vol = ubi->volumes[be32_to_cpu(vidh->vol_id)];
+ volume_desc.mode = UBI_READWRITE;
+ ret = ubi_leb_change(&volume_desc, be32_to_cpu(vidh->lnum),
+ buf, buff_offset);
+ correct = 0;
+ uncorrect = 0;
+ if (ret) {
+ ubi_err(ubi, "Changing %d bytes in LEB %d failed",
+ buff_offset, vidh->lnum);
+ ubi_err(ubi, "Error:%d.", ret);
+ dump_stack();
+ } else
+ dbg_gen(".....[Done]\n");
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ return 0;
+
+}
+
+void clear_bakvol(struct ubi_device *ubi)
+{
+ kfree(oob_ops->datbuf);
+ kfree(oob_ops);
+ kfree(oob_ops_src->oobbuf);
+ kfree(oob_ops_src);
+ kfree(oob_ops_bak->oobbuf);
+ kfree(oob_ops_bak);
+ kfree(ubi->bkblk_tbl);
+ corruption = 0;
+}
+
+void init_bakvol(struct ubi_volume_desc *desc, uint8_t choice)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ if (choice) {
+
+ ubi->bkblk_tbl->volume_built = UBI_BAKVOL_RUN;
+ dbg_gen("[%s][%d] Enable backup operation!\n",
+ __func__, __LINE__);
+
+ } else {
+
+ ubi->bkblk_tbl->volume_built = UBI_BAKVOL_INIT_INFO_DONE;
+ dbg_gen("[%s][%d] Disable backup operation!\n",
+ __func__, __LINE__);
+ }
+}
+EXPORT_SYMBOL_GPL(init_bakvol);
+#else
+int is_backup_need(struct ubi_device *ubi, loff_t addr)
+{
+ return 0;
+}
+
+int ubi_check_backup_volume(struct ubi_device *ubi)
+{
+
+ return 0;
+}
+
+int ubi_backup_data_to_backup_volume(struct ubi_device *ubi, loff_t addr,
+ size_t len, size_t *retlen, const void *buf)
+{
+ return 0;
+}
+int ubi_backup_volume_init(struct ubi_device *ubi)
+{
+ return 0;
+
+}
+
+int ubi_backup_volume_scan(struct ubi_device *ubi, struct ubi_vid_hdr *vidh,
+ int pnum)
+{
+ return 0;
+}
+
+int ubi_backup_volume_init_tail(struct ubi_device *ubi,
+ struct ubi_attach_info *si)
+{
+ return 0;
+}
+
+int ubi_bad_data_recovery(struct ubi_device *ubi)
+{
+ return 0;
+
+}
+
+void init_bakvol(struct ubi_volume_desc *desc, uint8_t choice)
+{
+
+
+}
+EXPORT_SYMBOL_GPL(init_bakvol);
+#endif
+
+MODULE_LICENSE("Dual MPL/GPL");
+MODULE_AUTHOR("Bean Huo <jackyard88@xxxxxxxxx>");
+MODULE_DESCRIPTION("Support code for MLC NAND pair page powerloss protection");
--
1.9.1