mirror of https://github.com/hak5/openwrt-owl.git
8785 lines
248 KiB
Diff
8785 lines
248 KiB
Diff
From d2981ca1343b837fc574c4e46806d041b258720d Mon Sep 17 00:00:00 2001
|
|
From: Andy Gross <agross@codeaurora.org>
|
|
Date: Mon, 16 Jun 2014 17:13:22 -0500
|
|
Subject: [PATCH 150/182] mtd: nand: Add Qualcomm NAND controller
|
|
|
|
This patch adds the Qualcomm NAND controller and required infrastructure.
|
|
|
|
Signed-off-by: Andy Gross <agross@codeaurora.org>
|
|
---
|
|
drivers/mtd/nand/Kconfig | 18 +
|
|
drivers/mtd/nand/Makefile | 2 +
|
|
drivers/mtd/nand/qcom_adm_dma.c | 797 +++++
|
|
drivers/mtd/nand/qcom_adm_dma.h | 268 ++
|
|
drivers/mtd/nand/qcom_nand.c | 7455 +++++++++++++++++++++++++++++++++++++++
|
|
drivers/mtd/nand/qcom_nand.h | 196 +
|
|
6 files changed, 8736 insertions(+)
|
|
create mode 100644 drivers/mtd/nand/qcom_adm_dma.c
|
|
create mode 100644 drivers/mtd/nand/qcom_adm_dma.h
|
|
create mode 100644 drivers/mtd/nand/qcom_nand.c
|
|
create mode 100644 drivers/mtd/nand/qcom_nand.h
|
|
|
|
--- a/drivers/mtd/nand/Kconfig
|
|
+++ b/drivers/mtd/nand/Kconfig
|
|
@@ -510,4 +510,22 @@ config MTD_NAND_XWAY
|
|
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
|
|
to the External Bus Unit (EBU).
|
|
|
|
+config MTD_QCOM_DMA
|
|
+ tristate "QCMO NAND DMA Support"
|
|
+ depends on ARCH_QCOM && MTD_QCOM_NAND
|
|
+ default n
|
|
+ help
|
|
+ DMA support for QCOM NAND
|
|
+
|
|
+config MTD_QCOM_NAND
|
|
+ tristate "QCOM NAND Device Support"
|
|
+ depends on MTD && ARCH_QCOM
|
|
+ select CRC16
|
|
+ select BITREVERSE
|
|
+ select MTD_NAND_IDS
|
|
+ select MTD_QCOM_DMA
|
|
+ default y
|
|
+ help
|
|
+ Support for some NAND chips connected to the QCOM NAND controller.
|
|
+
|
|
endif # MTD_NAND
|
|
--- a/drivers/mtd/nand/Makefile
|
|
+++ b/drivers/mtd/nand/Makefile
|
|
@@ -49,5 +49,7 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740
|
|
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
|
|
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
|
|
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
|
|
+obj-$(CONFIG_MTD_QCOM_NAND) += qcom_nand.o
|
|
+obj-$(CONFIG_MTD_QCOM_DMA) += qcom_adm_dma.o
|
|
|
|
nand-objs := nand_base.o nand_bbt.o
|
|
--- /dev/null
|
|
+++ b/drivers/mtd/nand/qcom_adm_dma.c
|
|
@@ -0,0 +1,797 @@
|
|
+/* * Copyright (c) 2012 The Linux Foundation. All rights reserved.* */
|
|
+/* linux/arch/arm/mach-msm/dma.c
|
|
+ *
|
|
+ * Copyright (C) 2007 Google, Inc.
|
|
+ * Copyright (c) 2008-2010, 2012 The Linux Foundation. All rights reserved.
|
|
+ *
|
|
+ * This software is licensed under the terms of the GNU General Public
|
|
+ * License version 2, as published by the Free Software Foundation, and
|
|
+ * may be copied, distributed, and modified under those terms.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/clk.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/reset.h>
|
|
+#include <linux/reset-controller.h>
|
|
+#include "qcom_adm_dma.h"
|
|
+
|
|
+#define MODULE_NAME "msm_dmov"
|
|
+
|
|
+#define MSM_DMOV_CHANNEL_COUNT 16
|
|
+#define MSM_DMOV_CRCI_COUNT 16
|
|
+
|
|
+enum {
|
|
+ CLK_DIS,
|
|
+ CLK_TO_BE_DIS,
|
|
+ CLK_EN
|
|
+};
|
|
+
|
|
+struct msm_dmov_ci_conf {
|
|
+ int start;
|
|
+ int end;
|
|
+ int burst;
|
|
+};
|
|
+
|
|
+struct msm_dmov_crci_conf {
|
|
+ int sd;
|
|
+ int blk_size;
|
|
+};
|
|
+
|
|
+struct msm_dmov_chan_conf {
|
|
+ int sd;
|
|
+ int block;
|
|
+ int priority;
|
|
+};
|
|
+
|
|
+struct msm_dmov_conf {
|
|
+ void *base;
|
|
+ struct msm_dmov_crci_conf *crci_conf;
|
|
+ struct msm_dmov_chan_conf *chan_conf;
|
|
+ int channel_active;
|
|
+ int sd;
|
|
+ size_t sd_size;
|
|
+ struct list_head staged_commands[MSM_DMOV_CHANNEL_COUNT];
|
|
+ struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
|
|
+ struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
|
|
+ struct mutex lock;
|
|
+ spinlock_t list_lock;
|
|
+ unsigned int irq;
|
|
+ struct clk *clk;
|
|
+ struct clk *pclk;
|
|
+ struct clk *ebiclk;
|
|
+ unsigned int clk_ctl;
|
|
+ struct delayed_work work;
|
|
+ struct workqueue_struct *cmd_wq;
|
|
+
|
|
+ struct reset_control *adm_reset;
|
|
+ struct reset_control *pbus_reset;
|
|
+ struct reset_control *c0_reset;
|
|
+ struct reset_control *c1_reset;
|
|
+ struct reset_control *c2_reset;
|
|
+
|
|
+};
|
|
+
|
|
+static void msm_dmov_clock_work(struct work_struct *);
|
|
+
|
|
+#define DMOV_CRCI_DEFAULT_CONF { .sd = 0, .blk_size = 0 }
|
|
+#define DMOV_CRCI_CONF(secd, blk) { .sd = secd, .blk_size = blk }
|
|
+
|
|
+static struct msm_dmov_crci_conf adm_crci_conf[] = {
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_CONF(0, 1),
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+ DMOV_CRCI_DEFAULT_CONF,
|
|
+};
|
|
+
|
|
+#define DMOV_CHANNEL_DEFAULT_CONF { .sd = 0, .block = 0, .priority = 1 }
|
|
+
|
|
+static struct msm_dmov_chan_conf adm_chan_conf[] = {
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+ DMOV_CHANNEL_DEFAULT_CONF,
|
|
+};
|
|
+
|
|
+#define DMOV_IRQ_TO_ADM(irq) 0
|
|
+
|
|
+static struct msm_dmov_conf dmov_conf[] = {
|
|
+ {
|
|
+ .crci_conf = adm_crci_conf,
|
|
+ .chan_conf = adm_chan_conf,
|
|
+ .lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
|
|
+ .list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
|
|
+ .clk_ctl = CLK_EN,
|
|
+ .work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
|
|
+ msm_dmov_clock_work,0),
|
|
+ }
|
|
+};
|
|
+
|
|
+#define MSM_DMOV_ID_COUNT (MSM_DMOV_CHANNEL_COUNT * ARRAY_SIZE(dmov_conf))
|
|
+#define DMOV_REG(name, adm) ((name) + (dmov_conf[adm].base) +\
|
|
+ (dmov_conf[adm].sd * dmov_conf[adm].sd_size))
|
|
+#define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
|
|
+#define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
|
|
+#define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
|
|
+
|
|
+enum {
|
|
+ MSM_DMOV_PRINT_ERRORS = 1,
|
|
+ MSM_DMOV_PRINT_IO = 2,
|
|
+ MSM_DMOV_PRINT_FLOW = 4
|
|
+};
|
|
+
|
|
+unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
|
|
+
|
|
+#define MSM_DMOV_DPRINTF(mask, format, args...) \
|
|
+ do { \
|
|
+ if ((mask) & msm_dmov_print_mask) \
|
|
+ printk(KERN_ERR format, args); \
|
|
+ } while (0)
|
|
+#define PRINT_ERROR(format, args...) \
|
|
+ MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
|
|
+#define PRINT_IO(format, args...) \
|
|
+ MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
|
|
+#define PRINT_FLOW(format, args...) \
|
|
+ MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
|
|
+
|
|
+static int msm_dmov_clk_on(int adm)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+return 0;
|
|
+ ret = clk_prepare_enable(dmov_conf[adm].clk);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ if (dmov_conf[adm].pclk) {
|
|
+ ret = clk_prepare_enable(dmov_conf[adm].pclk);
|
|
+ if (ret) {
|
|
+ clk_disable_unprepare(dmov_conf[adm].clk);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ if (dmov_conf[adm].ebiclk) {
|
|
+ ret = clk_prepare_enable(dmov_conf[adm].ebiclk);
|
|
+ if (ret) {
|
|
+ if (dmov_conf[adm].pclk)
|
|
+ clk_disable_unprepare(dmov_conf[adm].pclk);
|
|
+ clk_disable_unprepare(dmov_conf[adm].clk);
|
|
+ }
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void msm_dmov_clk_off(int adm)
|
|
+{
|
|
+#if 0
|
|
+ if (dmov_conf[adm].ebiclk)
|
|
+ clk_disable_unprepare(dmov_conf[adm].ebiclk);
|
|
+ if (dmov_conf[adm].pclk)
|
|
+ clk_disable_unprepare(dmov_conf[adm].pclk);
|
|
+ clk_disable_unprepare(dmov_conf[adm].clk);
|
|
+#endif
|
|
+}
|
|
+
|
|
+static void msm_dmov_clock_work(struct work_struct *work)
|
|
+{
|
|
+ struct msm_dmov_conf *conf =
|
|
+ container_of(to_delayed_work(work), struct msm_dmov_conf, work);
|
|
+ int adm = DMOV_IRQ_TO_ADM(conf->irq);
|
|
+ mutex_lock(&conf->lock);
|
|
+ if (conf->clk_ctl == CLK_TO_BE_DIS) {
|
|
+ BUG_ON(conf->channel_active);
|
|
+ msm_dmov_clk_off(adm);
|
|
+ conf->clk_ctl = CLK_DIS;
|
|
+ }
|
|
+ mutex_unlock(&conf->lock);
|
|
+}
|
|
+
|
|
+enum {
|
|
+ NOFLUSH = 0,
|
|
+ GRACEFUL,
|
|
+ NONGRACEFUL,
|
|
+};
|
|
+
|
|
+/* Caller must hold the list lock */
|
|
+static struct msm_dmov_cmd *start_ready_cmd(unsigned ch, int adm)
|
|
+{
|
|
+ struct msm_dmov_cmd *cmd;
|
|
+
|
|
+ if (list_empty(&dmov_conf[adm].ready_commands[ch])) {
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ cmd = list_entry(dmov_conf[adm].ready_commands[ch].next, typeof(*cmd),
|
|
+ list);
|
|
+ list_del(&cmd->list);
|
|
+ if (cmd->exec_func)
|
|
+ cmd->exec_func(cmd);
|
|
+ list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
|
|
+ if (!dmov_conf[adm].channel_active) {
|
|
+ enable_irq(dmov_conf[adm].irq);
|
|
+ }
|
|
+ dmov_conf[adm].channel_active |= BIT(ch);
|
|
+ PRINT_IO("msm dmov enqueue command, %x, ch %d\n", cmd->cmdptr, ch);
|
|
+ writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
|
|
+
|
|
+ return cmd;
|
|
+}
|
|
+
|
|
+static void msm_dmov_enqueue_cmd_ext_work(struct work_struct *work)
|
|
+{
|
|
+ struct msm_dmov_cmd *cmd =
|
|
+ container_of(work, struct msm_dmov_cmd, work);
|
|
+ unsigned id = cmd->id;
|
|
+ unsigned status;
|
|
+ unsigned long flags;
|
|
+ int adm = DMOV_ID_TO_ADM(id);
|
|
+ int ch = DMOV_ID_TO_CHAN(id);
|
|
+
|
|
+ mutex_lock(&dmov_conf[adm].lock);
|
|
+ if (dmov_conf[adm].clk_ctl == CLK_DIS) {
|
|
+ status = msm_dmov_clk_on(adm);
|
|
+ if (status != 0)
|
|
+ goto error;
|
|
+ }
|
|
+ dmov_conf[adm].clk_ctl = CLK_EN;
|
|
+
|
|
+ spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
|
|
+
|
|
+ cmd = list_entry(dmov_conf[adm].staged_commands[ch].next, typeof(*cmd),
|
|
+ list);
|
|
+ list_del(&cmd->list);
|
|
+ list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
|
|
+ status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
|
|
+ if (status & DMOV_STATUS_CMD_PTR_RDY) {
|
|
+ PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
|
|
+ id, status);
|
|
+ cmd = start_ready_cmd(ch, adm);
|
|
+ /*
|
|
+ * We added something to the ready list, and still hold the
|
|
+ * list lock. Thus, no need to check for cmd == NULL
|
|
+ */
|
|
+ if (cmd->toflush) {
|
|
+ int flush = (cmd->toflush == GRACEFUL) ? 1 << 31 : 0;
|
|
+ writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
|
|
+ }
|
|
+ } else {
|
|
+ cmd->toflush = 0;
|
|
+ if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
|
|
+ !list_empty(&dmov_conf[adm].ready_commands[ch]))
|
|
+ PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
|
|
+ "status %x\n", id, status);
|
|
+ PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
|
|
+ "%x\n", id, status);
|
|
+ }
|
|
+ if (!dmov_conf[adm].channel_active) {
|
|
+ dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
|
|
+ schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
|
|
+ }
|
|
+ spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
|
|
+error:
|
|
+ mutex_unlock(&dmov_conf[adm].lock);
|
|
+}
|
|
+
|
|
+static void __msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
|
|
+{
|
|
+ int adm = DMOV_ID_TO_ADM(id);
|
|
+ int ch = DMOV_ID_TO_CHAN(id);
|
|
+ unsigned long flags;
|
|
+ cmd->id = id;
|
|
+ cmd->toflush = 0;
|
|
+
|
|
+ spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
|
|
+ list_add_tail(&cmd->list, &dmov_conf[adm].staged_commands[ch]);
|
|
+ spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
|
|
+
|
|
+ queue_work(dmov_conf[adm].cmd_wq, &cmd->work);
|
|
+}
|
|
+
|
|
+void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
|
|
+{
|
|
+ INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
|
|
+ __msm_dmov_enqueue_cmd_ext(id, cmd);
|
|
+}
|
|
+EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
|
|
+
|
|
+void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
|
|
+{
|
|
+ /* Disable callback function (for backwards compatibility) */
|
|
+ cmd->exec_func = NULL;
|
|
+ INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
|
|
+ __msm_dmov_enqueue_cmd_ext(id, cmd);
|
|
+}
|
|
+EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
|
|
+
|
|
+void msm_dmov_flush(unsigned int id, int graceful)
|
|
+{
|
|
+ unsigned long irq_flags;
|
|
+ int ch = DMOV_ID_TO_CHAN(id);
|
|
+ int adm = DMOV_ID_TO_ADM(id);
|
|
+ int flush = graceful ? DMOV_FLUSH_TYPE : 0;
|
|
+ struct msm_dmov_cmd *cmd;
|
|
+
|
|
+ spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
|
|
+ /* XXX not checking if flush cmd sent already */
|
|
+ if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
|
|
+ PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
|
|
+ writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
|
|
+ }
|
|
+ list_for_each_entry(cmd, &dmov_conf[adm].staged_commands[ch], list)
|
|
+ cmd->toflush = graceful ? GRACEFUL : NONGRACEFUL;
|
|
+ /* spin_unlock_irqrestore has the necessary barrier */
|
|
+ spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
|
|
+}
|
|
+EXPORT_SYMBOL(msm_dmov_flush);
|
|
+
|
|
+struct msm_dmov_exec_cmdptr_cmd {
|
|
+ struct msm_dmov_cmd dmov_cmd;
|
|
+ struct completion complete;
|
|
+ unsigned id;
|
|
+ unsigned int result;
|
|
+ struct msm_dmov_errdata err;
|
|
+};
|
|
+
|
|
+static void
|
|
+dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
|
|
+ unsigned int result,
|
|
+ struct msm_dmov_errdata *err)
|
|
+{
|
|
+ struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
|
|
+ cmd->result = result;
|
|
+ if (result != 0x80000002 && err)
|
|
+ memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
|
|
+
|
|
+ complete(&cmd->complete);
|
|
+}
|
|
+
|
|
+int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
|
|
+{
|
|
+ struct msm_dmov_exec_cmdptr_cmd cmd;
|
|
+
|
|
+ PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
|
|
+
|
|
+ cmd.dmov_cmd.cmdptr = cmdptr;
|
|
+ cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
|
|
+ cmd.dmov_cmd.exec_func = NULL;
|
|
+ cmd.id = id;
|
|
+ cmd.result = 0;
|
|
+ INIT_WORK_ONSTACK(&cmd.dmov_cmd.work, msm_dmov_enqueue_cmd_ext_work);
|
|
+ init_completion(&cmd.complete);
|
|
+
|
|
+ __msm_dmov_enqueue_cmd_ext(id, &cmd.dmov_cmd);
|
|
+ wait_for_completion_timeout(&cmd.complete, msecs_to_jiffies(1000));
|
|
+
|
|
+ if (cmd.result != 0x80000002) {
|
|
+ PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
|
|
+ PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
|
|
+ id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
|
|
+ return -EIO;
|
|
+ }
|
|
+ PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(msm_dmov_exec_cmd);
|
|
+
|
|
+static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
|
|
+{
|
|
+ errdata->flush[0] = readl_relaxed(DMOV_REG(DMOV_FLUSH0(ch), adm));
|
|
+ errdata->flush[1] = readl_relaxed(DMOV_REG(DMOV_FLUSH1(ch), adm));
|
|
+ errdata->flush[2] = 0;
|
|
+ errdata->flush[3] = readl_relaxed(DMOV_REG(DMOV_FLUSH3(ch), adm));
|
|
+ errdata->flush[4] = readl_relaxed(DMOV_REG(DMOV_FLUSH4(ch), adm));
|
|
+ errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
|
|
+}
|
|
+
|
|
+static irqreturn_t msm_dmov_isr(int irq, void *dev_id)
|
|
+{
|
|
+ unsigned int int_status;
|
|
+ unsigned int mask;
|
|
+ unsigned int id;
|
|
+ unsigned int ch;
|
|
+ unsigned long irq_flags;
|
|
+ unsigned int ch_status;
|
|
+ unsigned int ch_result;
|
|
+ unsigned int valid = 0;
|
|
+ struct msm_dmov_cmd *cmd;
|
|
+ int adm = DMOV_IRQ_TO_ADM(irq);
|
|
+
|
|
+ mutex_lock(&dmov_conf[adm].lock);
|
|
+ /* read and clear isr */
|
|
+ int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
|
|
+ PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
|
|
+
|
|
+ spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
|
|
+ while (int_status) {
|
|
+ mask = int_status & -int_status;
|
|
+ ch = fls(mask) - 1;
|
|
+ id = DMOV_CHAN_ADM_TO_ID(ch, adm);
|
|
+ PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
|
|
+ int_status &= ~mask;
|
|
+ ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
|
|
+ if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
|
|
+ PRINT_FLOW("msm_datamover_irq_handler id %d, "
|
|
+ "result not valid %x\n", id, ch_status);
|
|
+ continue;
|
|
+ }
|
|
+ do {
|
|
+ valid = 1;
|
|
+ ch_result = readl_relaxed(DMOV_REG(DMOV_RSLT(ch), adm));
|
|
+ if (list_empty(&dmov_conf[adm].active_commands[ch])) {
|
|
+ PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
|
|
+ "with no active command, status %x, result %x\n",
|
|
+ id, ch_status, ch_result);
|
|
+ cmd = NULL;
|
|
+ } else {
|
|
+ cmd = list_entry(dmov_conf[adm].
|
|
+ active_commands[ch].next, typeof(*cmd),
|
|
+ list);
|
|
+ }
|
|
+ PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
|
|
+ if (ch_result & DMOV_RSLT_DONE) {
|
|
+ PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
|
|
+ id, ch_status);
|
|
+ PRINT_IO("msm_datamover_irq_handler id %d, got result "
|
|
+ "for %p, result %x\n", id, cmd, ch_result);
|
|
+ if (cmd) {
|
|
+ list_del(&cmd->list);
|
|
+ cmd->complete_func(cmd, ch_result, NULL);
|
|
+ }
|
|
+ }
|
|
+ if (ch_result & DMOV_RSLT_FLUSH) {
|
|
+ struct msm_dmov_errdata errdata;
|
|
+
|
|
+ fill_errdata(&errdata, ch, adm);
|
|
+ PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
|
|
+ PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
|
|
+ if (cmd) {
|
|
+ list_del(&cmd->list);
|
|
+ cmd->complete_func(cmd, ch_result, &errdata);
|
|
+ }
|
|
+ }
|
|
+ if (ch_result & DMOV_RSLT_ERROR) {
|
|
+ struct msm_dmov_errdata errdata;
|
|
+
|
|
+ fill_errdata(&errdata, ch, adm);
|
|
+
|
|
+ PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
|
|
+ PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
|
|
+ if (cmd) {
|
|
+ list_del(&cmd->list);
|
|
+ cmd->complete_func(cmd, ch_result, &errdata);
|
|
+ }
|
|
+ /* this does not seem to work, once we get an error */
|
|
+ /* the datamover will no longer accept commands */
|
|
+ writel_relaxed(0, DMOV_REG(DMOV_FLUSH0(ch),
|
|
+ adm));
|
|
+ }
|
|
+ rmb();
|
|
+ ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
|
|
+ adm));
|
|
+ PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
|
|
+ if (ch_status & DMOV_STATUS_CMD_PTR_RDY)
|
|
+ start_ready_cmd(ch, adm);
|
|
+ } while (ch_status & DMOV_STATUS_RSLT_VALID);
|
|
+ if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
|
|
+ list_empty(&dmov_conf[adm].ready_commands[ch]))
|
|
+ dmov_conf[adm].channel_active &= ~(1U << ch);
|
|
+ PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
|
|
+
|
|
+ if (!dmov_conf[adm].channel_active && valid) {
|
|
+ disable_irq_nosync(dmov_conf[adm].irq);
|
|
+ dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
|
|
+ schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&dmov_conf[adm].lock);
|
|
+
|
|
+ return valid ? IRQ_HANDLED : IRQ_NONE;
|
|
+}
|
|
+
|
|
+static int msm_dmov_suspend_late(struct device *dev)
|
|
+{
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ int adm = (pdev->id >= 0) ? pdev->id : 0;
|
|
+ mutex_lock(&dmov_conf[adm].lock);
|
|
+ if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
|
|
+ BUG_ON(dmov_conf[adm].channel_active);
|
|
+ msm_dmov_clk_off(adm);
|
|
+ dmov_conf[adm].clk_ctl = CLK_DIS;
|
|
+ }
|
|
+ mutex_unlock(&dmov_conf[adm].lock);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int msm_dmov_runtime_suspend(struct device *dev)
|
|
+{
|
|
+ dev_dbg(dev, "pm_runtime: suspending...\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int msm_dmov_runtime_resume(struct device *dev)
|
|
+{
|
|
+ dev_dbg(dev, "pm_runtime: resuming...\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int msm_dmov_runtime_idle(struct device *dev)
|
|
+{
|
|
+ dev_dbg(dev, "pm_runtime: idling...\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct dev_pm_ops msm_dmov_dev_pm_ops = {
|
|
+ .runtime_suspend = msm_dmov_runtime_suspend,
|
|
+ .runtime_resume = msm_dmov_runtime_resume,
|
|
+ .runtime_idle = msm_dmov_runtime_idle,
|
|
+ .suspend = msm_dmov_suspend_late,
|
|
+};
|
|
+
|
|
+static int msm_dmov_init_clocks(struct platform_device *pdev)
|
|
+{
|
|
+ int adm = (pdev->id >= 0) ? pdev->id : 0;
|
|
+ int ret;
|
|
+
|
|
+ dmov_conf[adm].clk = devm_clk_get(&pdev->dev, "core_clk");
|
|
+ if (IS_ERR(dmov_conf[adm].clk)) {
|
|
+ printk(KERN_ERR "%s: Error getting adm_clk\n", __func__);
|
|
+ dmov_conf[adm].clk = NULL;
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ dmov_conf[adm].pclk = devm_clk_get(&pdev->dev, "iface_clk");
|
|
+ if (IS_ERR(dmov_conf[adm].pclk)) {
|
|
+ dmov_conf[adm].pclk = NULL;
|
|
+ /* pclk not present on all SoCs, don't bail on failure */
|
|
+ }
|
|
+
|
|
+ dmov_conf[adm].ebiclk = devm_clk_get(&pdev->dev, "mem_clk");
|
|
+ if (IS_ERR(dmov_conf[adm].ebiclk)) {
|
|
+ dmov_conf[adm].ebiclk = NULL;
|
|
+ /* ebiclk not present on all SoCs, don't bail on failure */
|
|
+ } else {
|
|
+ ret = clk_set_rate(dmov_conf[adm].ebiclk, 27000000);
|
|
+ if (ret)
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void config_datamover(int adm)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /* Reset the ADM */
|
|
+ reset_control_assert(dmov_conf[adm].adm_reset);
|
|
+ reset_control_assert(dmov_conf[adm].c0_reset);
|
|
+ reset_control_assert(dmov_conf[adm].c1_reset);
|
|
+ reset_control_assert(dmov_conf[adm].c2_reset);
|
|
+
|
|
+ reset_control_deassert(dmov_conf[adm].c2_reset);
|
|
+ reset_control_deassert(dmov_conf[adm].c1_reset);
|
|
+ reset_control_deassert(dmov_conf[adm].c0_reset);
|
|
+ reset_control_deassert(dmov_conf[adm].adm_reset);
|
|
+
|
|
+ for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
|
|
+ struct msm_dmov_chan_conf *chan_conf =
|
|
+ dmov_conf[adm].chan_conf;
|
|
+ unsigned conf;
|
|
+ /* Only configure scorpion channels */
|
|
+ if (chan_conf[i].sd <= 1) {
|
|
+ conf = readl_relaxed(DMOV_REG(DMOV_CONF(i), adm));
|
|
+ conf |= DMOV_CONF_MPU_DISABLE |
|
|
+ DMOV_CONF_PERM_MPU_CONF |
|
|
+ DMOV_CONF_FLUSH_RSLT_EN |
|
|
+ DMOV_CONF_FORCE_RSLT_EN |
|
|
+ DMOV_CONF_IRQ_EN |
|
|
+ DMOV_CONF_PRIORITY(chan_conf[i].priority);
|
|
+
|
|
+ conf &= ~DMOV_CONF_SD(7);
|
|
+ conf |= DMOV_CONF_SD(chan_conf[i].sd);
|
|
+ writel_relaxed(conf, DMOV_REG(DMOV_CONF(i), adm));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
|
|
+ writel_relaxed(DMOV_CRCI_CTL_RST,
|
|
+ DMOV_REG(DMOV_CRCI_CTL(i), adm));
|
|
+ }
|
|
+
|
|
+ /* NAND CRCI Enable */
|
|
+ writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_NAND_CRCI_DATA), adm));
|
|
+ writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_NAND_CRCI_CMD), adm));
|
|
+
|
|
+ /* GSBI5 CRCI Enable */
|
|
+ writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_SPI_GSBI5_RX_CRCI), adm));
|
|
+ writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_SPI_GSBI5_TX_CRCI), adm));
|
|
+
|
|
+ writel_relaxed(DMOV_CI_CONF_RANGE_START(0x40) | /* EBI1 */
|
|
+ DMOV_CI_CONF_RANGE_END(0xb0) |
|
|
+ DMOV_CI_CONF_MAX_BURST(0x8),
|
|
+ DMOV_REG(DMOV_CI_CONF(0), adm));
|
|
+
|
|
+ writel_relaxed(DMOV_CI_CONF_RANGE_START(0x2a) | /* IMEM */
|
|
+ DMOV_CI_CONF_RANGE_END(0x2c) |
|
|
+ DMOV_CI_CONF_MAX_BURST(0x8),
|
|
+ DMOV_REG(DMOV_CI_CONF(1), adm));
|
|
+
|
|
+ writel_relaxed(DMOV_CI_CONF_RANGE_START(0x12) | /* CPSS/SPS */
|
|
+ DMOV_CI_CONF_RANGE_END(0x28) |
|
|
+ DMOV_CI_CONF_MAX_BURST(0x8),
|
|
+ DMOV_REG(DMOV_CI_CONF(2), adm));
|
|
+
|
|
+ writel_relaxed(DMOV_HI_GP_CTL_CORE_CLK_LP_EN | /* will disable LP */
|
|
+ DMOV_HI_GP_CTL_LP_CNT(0xf),
|
|
+ DMOV_REG(DMOV_HI_GP_CTL, adm));
|
|
+
|
|
+}
|
|
+
|
|
+static int msm_dmov_probe(struct platform_device *pdev)
|
|
+{
|
|
+
|
|
+ int adm = (pdev->id >= 0) ? pdev->id : 0;
|
|
+ int i;
|
|
+ int ret;
|
|
+ struct resource *irqres =
|
|
+ platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
|
+ struct resource *mres =
|
|
+ platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+
|
|
+ dmov_conf[adm].sd=0;
|
|
+ dmov_conf[adm].sd_size=0x800;
|
|
+
|
|
+ dmov_conf[adm].irq = irqres->start;
|
|
+
|
|
+ dmov_conf[adm].base = devm_ioremap_resource(&pdev->dev, mres);
|
|
+ if (!dmov_conf[adm].base)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ dmov_conf[adm].cmd_wq = alloc_ordered_workqueue("dmov%d_wq", 0, adm);
|
|
+ if (!dmov_conf[adm].cmd_wq) {
|
|
+ PRINT_ERROR("Couldn't allocate ADM%d workqueue.\n", adm);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /* get resets */
|
|
+ dmov_conf[adm].adm_reset = devm_reset_control_get(&pdev->dev, "adm");
|
|
+ if (IS_ERR(dmov_conf[adm].adm_reset)) {
|
|
+ dev_err(&pdev->dev, "failed to get adm reset\n");
|
|
+ ret = PTR_ERR(dmov_conf[adm].adm_reset);
|
|
+ goto out_wq;
|
|
+ }
|
|
+
|
|
+ dmov_conf[adm].pbus_reset = devm_reset_control_get(&pdev->dev, "pbus");
|
|
+ if (IS_ERR(dmov_conf[adm].pbus_reset)) {
|
|
+ dev_err(&pdev->dev, "failed to get pbus reset\n");
|
|
+ ret = PTR_ERR(dmov_conf[adm].pbus_reset);
|
|
+ goto out_wq;
|
|
+ }
|
|
+
|
|
+ dmov_conf[adm].c0_reset = devm_reset_control_get(&pdev->dev, "c0");
|
|
+ if (IS_ERR(dmov_conf[adm].c0_reset)) {
|
|
+ dev_err(&pdev->dev, "failed to get c0 reset\n");
|
|
+ ret = PTR_ERR(dmov_conf[adm].c0_reset);
|
|
+ goto out_wq;
|
|
+ }
|
|
+
|
|
+ dmov_conf[adm].c1_reset = devm_reset_control_get(&pdev->dev, "c1");
|
|
+ if (IS_ERR(dmov_conf[adm].c1_reset)) {
|
|
+ dev_err(&pdev->dev, "failed to get c1 reset\n");
|
|
+ ret = PTR_ERR(dmov_conf[adm].c1_reset);
|
|
+ goto out_wq;
|
|
+ }
|
|
+
|
|
+ dmov_conf[adm].c2_reset = devm_reset_control_get(&pdev->dev, "c2");
|
|
+ if (IS_ERR(dmov_conf[adm].c2_reset)) {
|
|
+ dev_err(&pdev->dev, "failed to get c2 reset\n");
|
|
+ ret = PTR_ERR(dmov_conf[adm].c2_reset);
|
|
+ goto out_wq;
|
|
+ }
|
|
+
|
|
+ ret = devm_request_threaded_irq(&pdev->dev, dmov_conf[adm].irq, NULL,
|
|
+ msm_dmov_isr, IRQF_ONESHOT, "msmdatamover", NULL);
|
|
+
|
|
+ if (ret) {
|
|
+ PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
|
|
+ dmov_conf[adm].irq);
|
|
+ goto out_wq;
|
|
+ }
|
|
+
|
|
+ disable_irq(dmov_conf[adm].irq);
|
|
+ ret = msm_dmov_init_clocks(pdev);
|
|
+ if (ret) {
|
|
+ PRINT_ERROR("Requesting ADM%d clocks failed\n", adm);
|
|
+ goto out_wq;
|
|
+ }
|
|
+ clk_prepare_enable(dmov_conf[adm].clk);
|
|
+ clk_prepare_enable(dmov_conf[adm].pclk);
|
|
+
|
|
+// ret = msm_dmov_clk_on(adm);
|
|
+// if (ret) {
|
|
+// PRINT_ERROR("Enabling ADM%d clocks failed\n", adm);
|
|
+// goto out_wq;
|
|
+// }
|
|
+
|
|
+ config_datamover(adm);
|
|
+ for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
|
|
+ INIT_LIST_HEAD(&dmov_conf[adm].staged_commands[i]);
|
|
+ INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
|
|
+ INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
|
|
+
|
|
+ writel_relaxed(DMOV_RSLT_CONF_IRQ_EN
|
|
+ | DMOV_RSLT_CONF_FORCE_FLUSH_RSLT,
|
|
+ DMOV_REG(DMOV_RSLT_CONF(i), adm));
|
|
+ }
|
|
+ wmb();
|
|
+// msm_dmov_clk_off(adm);
|
|
+ return ret;
|
|
+out_wq:
|
|
+ destroy_workqueue(dmov_conf[adm].cmd_wq);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
+static const struct of_device_id adm_of_match[] = {
|
|
+ { .compatible = "qcom,adm", },
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, adm_of_match);
|
|
+#endif
|
|
+
|
|
+static struct platform_driver msm_dmov_driver = {
|
|
+ .probe = msm_dmov_probe,
|
|
+ .driver = {
|
|
+ .name = MODULE_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
+ .of_match_table = adm_of_match,
|
|
+ .pm = &msm_dmov_dev_pm_ops,
|
|
+ },
|
|
+};
|
|
+
|
|
+/* static int __init */
|
|
+static int __init msm_init_datamover(void)
|
|
+{
|
|
+ int ret;
|
|
+ ret = platform_driver_register(&msm_dmov_driver);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ return 0;
|
|
+}
|
|
+arch_initcall(msm_init_datamover);
|
|
--- /dev/null
|
|
+++ b/drivers/mtd/nand/qcom_adm_dma.h
|
|
@@ -0,0 +1,268 @@
|
|
+/* * Copyright (c) 2012 The Linux Foundation. All rights reserved.* */
|
|
+/* linux/include/asm-arm/arch-msm/dma.h
|
|
+ *
|
|
+ * Copyright (C) 2007 Google, Inc.
|
|
+ * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
|
|
+ *
|
|
+ * This software is licensed under the terms of the GNU General Public
|
|
+ * License version 2, as published by the Free Software Foundation, and
|
|
+ * may be copied, distributed, and modified under those terms.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef __ASM_ARCH_MSM_DMA_H
|
|
+#define __ASM_ARCH_MSM_DMA_H
|
|
+#include <linux/list.h>
|
|
+
|
|
+struct msm_dmov_errdata {
|
|
+ uint32_t flush[6];
|
|
+};
|
|
+
|
|
+struct msm_dmov_cmd {
|
|
+ struct list_head list;
|
|
+ unsigned int cmdptr;
|
|
+ void (*complete_func)(struct msm_dmov_cmd *cmd,
|
|
+ unsigned int result,
|
|
+ struct msm_dmov_errdata *err);
|
|
+ void (*exec_func)(struct msm_dmov_cmd *cmd);
|
|
+ struct work_struct work;
|
|
+ unsigned id; /* For internal use */
|
|
+ void *user; /* Pointer for caller's reference */
|
|
+ u8 toflush;
|
|
+};
|
|
+
|
|
+struct msm_dmov_pdata {
|
|
+ int sd;
|
|
+ size_t sd_size;
|
|
+};
|
|
+
|
|
+void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
|
|
+void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd);
|
|
+void msm_dmov_flush(unsigned int id, int graceful);
|
|
+int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr);
|
|
+
|
|
+#define DMOV_CRCIS_PER_CONF 10
|
|
+
|
|
+#define DMOV_ADDR(off, ch) ((off) + ((ch) << 2))
|
|
+
|
|
+#define DMOV_CMD_PTR(ch) DMOV_ADDR(0x000, ch)
|
|
+#define DMOV_CMD_LIST (0 << 29) /* does not work */
|
|
+#define DMOV_CMD_PTR_LIST (1 << 29) /* works */
|
|
+#define DMOV_CMD_INPUT_CFG (2 << 29) /* untested */
|
|
+#define DMOV_CMD_OUTPUT_CFG (3 << 29) /* untested */
|
|
+#define DMOV_CMD_ADDR(addr) ((addr) >> 3)
|
|
+
|
|
+#define DMOV_RSLT(ch) DMOV_ADDR(0x040, ch)
|
|
+#define DMOV_RSLT_VALID (1 << 31) /* 0 == host has empties result fifo */
|
|
+#define DMOV_RSLT_ERROR (1 << 3)
|
|
+#define DMOV_RSLT_FLUSH (1 << 2)
|
|
+#define DMOV_RSLT_DONE (1 << 1) /* top pointer done */
|
|
+#define DMOV_RSLT_USER (1 << 0) /* command with FR force result */
|
|
+
|
|
+#define DMOV_FLUSH0(ch) DMOV_ADDR(0x080, ch)
|
|
+#define DMOV_FLUSH1(ch) DMOV_ADDR(0x0C0, ch)
|
|
+#define DMOV_FLUSH2(ch) DMOV_ADDR(0x100, ch)
|
|
+#define DMOV_FLUSH3(ch) DMOV_ADDR(0x140, ch)
|
|
+#define DMOV_FLUSH4(ch) DMOV_ADDR(0x180, ch)
|
|
+#define DMOV_FLUSH5(ch) DMOV_ADDR(0x1C0, ch)
|
|
+#define DMOV_FLUSH_TYPE (1 << 31)
|
|
+
|
|
+#define DMOV_STATUS(ch) DMOV_ADDR(0x200, ch)
|
|
+#define DMOV_STATUS_RSLT_COUNT(n) (((n) >> 29))
|
|
+#define DMOV_STATUS_CMD_COUNT(n) (((n) >> 27) & 3)
|
|
+#define DMOV_STATUS_RSLT_VALID (1 << 1)
|
|
+#define DMOV_STATUS_CMD_PTR_RDY (1 << 0)
|
|
+
|
|
+#define DMOV_CONF(ch) DMOV_ADDR(0x240, ch)
|
|
+#define DMOV_CONF_SD(sd) (((sd & 4) << 11) | ((sd & 3) << 4))
|
|
+#define DMOV_CONF_OTHER_CH_BLK_MASK(m) ((m << 0x10) & 0xffff0000)
|
|
+#define DMOV_CONF_SHADOW_EN (1 << 12)
|
|
+#define DMOV_CONF_MPU_DISABLE (1 << 11)
|
|
+#define DMOV_CONF_PERM_MPU_CONF (1 << 9)
|
|
+#define DMOV_CONF_FLUSH_RSLT_EN (1 << 8)
|
|
+#define DMOV_CONF_IRQ_EN (1 << 6)
|
|
+#define DMOV_CONF_FORCE_RSLT_EN (1 << 7)
|
|
+#define DMOV_CONF_PRIORITY(n) (n << 0)
|
|
+
|
|
+#define DMOV_DBG_ERR(ci) DMOV_ADDR(0x280, ci)
|
|
+
|
|
+#define DMOV_RSLT_CONF(ch) DMOV_ADDR(0x300, ch)
|
|
+#define DMOV_RSLT_CONF_FORCE_TOP_PTR_RSLT (1 << 2)
|
|
+#define DMOV_RSLT_CONF_FORCE_FLUSH_RSLT (1 << 1)
|
|
+#define DMOV_RSLT_CONF_IRQ_EN (1 << 0)
|
|
+
|
|
+#define DMOV_ISR DMOV_ADDR(0x380, 0)
|
|
+
|
|
+#define DMOV_CI_CONF(ci) DMOV_ADDR(0x390, ci)
|
|
+#define DMOV_CI_CONF_RANGE_END(n) ((n) << 24)
|
|
+#define DMOV_CI_CONF_RANGE_START(n) ((n) << 16)
|
|
+#define DMOV_CI_CONF_MAX_BURST(n) ((n) << 0)
|
|
+
|
|
+#define DMOV_CI_DBG_ERR(ci) DMOV_ADDR(0x3B0, ci)
|
|
+
|
|
+#define DMOV_CRCI_CONF0 DMOV_ADDR(0x3D0, 0)
|
|
+#define DMOV_CRCI_CONF0_CRCI9_SD (2 << 0x1b)
|
|
+
|
|
+#define DMOV_CRCI_CONF1 DMOV_ADDR(0x3D4, 0)
|
|
+#define DMOV_CRCI_CONF0_SD(crci, sd) (sd << (crci*3))
|
|
+#define DMOV_CRCI_CONF1_SD(crci, sd) (sd << ((crci-DMOV_CRCIS_PER_CONF)*3))
|
|
+
|
|
+#define DMOV_HI_GP_CTL DMOV_ADDR(0x3D8, 0)
|
|
+#define DMOV_HI_GP_CTL_CORE_CLK_LP_EN (1 << 12)
|
|
+#define DMOV_HI_GP_CTL_LP_CNT(x) (((x) & 0xf) << 8)
|
|
+#define DMOV_HI_GP_CTL_CI3_CLK_LP_EN (1 << 7)
|
|
+#define DMOV_HI_GP_CTL_CI2_CLK_LP_EN (1 << 6)
|
|
+#define DMOV_HI_GP_CTL_CI1_CLK_LP_EN (1 << 5)
|
|
+#define DMOV_HI_GP_CTL_CI0_CLK_LP_EN (1 << 4)
|
|
+
|
|
+#define DMOV_CRCI_CTL(crci) DMOV_ADDR(0x400, crci)
|
|
+#define DMOV_CRCI_CTL_BLK_SZ(n) ((n) << 0)
|
|
+#define DMOV_CRCI_CTL_RST (1 << 17)
|
|
+#define DMOV_CRCI_MUX (1 << 18)
|
|
+
|
|
+/* channel assignments */
|
|
+
|
|
+/*
|
|
+ * Format of CRCI numbers: crci number + (muxsel << 4)
|
|
+ */
|
|
+
|
|
+#define DMOV_GP_CHAN 9
|
|
+
|
|
+#define DMOV_CE_IN_CHAN 0
|
|
+#define DMOV_CE_IN_CRCI 2
|
|
+
|
|
+#define DMOV_CE_OUT_CHAN 1
|
|
+#define DMOV_CE_OUT_CRCI 3
|
|
+
|
|
+#define DMOV_TSIF_CHAN 2
|
|
+#define DMOV_TSIF_CRCI 11
|
|
+
|
|
+#define DMOV_HSUART_GSBI6_TX_CHAN 7
|
|
+#define DMOV_HSUART_GSBI6_TX_CRCI 6
|
|
+
|
|
+#define DMOV_HSUART_GSBI6_RX_CHAN 8
|
|
+#define DMOV_HSUART_GSBI6_RX_CRCI 11
|
|
+
|
|
+#define DMOV_HSUART_GSBI8_TX_CHAN 7
|
|
+#define DMOV_HSUART_GSBI8_TX_CRCI 10
|
|
+
|
|
+#define DMOV_HSUART_GSBI8_RX_CHAN 8
|
|
+#define DMOV_HSUART_GSBI8_RX_CRCI 9
|
|
+
|
|
+#define DMOV_HSUART_GSBI9_TX_CHAN 4
|
|
+#define DMOV_HSUART_GSBI9_TX_CRCI 13
|
|
+
|
|
+#define DMOV_HSUART_GSBI9_RX_CHAN 3
|
|
+#define DMOV_HSUART_GSBI9_RX_CRCI 12
|
|
+
|
|
+#define DMOV_NAND_CHAN 3
|
|
+#define DMOV_NAND_CRCI_CMD 15
|
|
+#define DMOV_NAND_CRCI_DATA 3
|
|
+
|
|
+#define DMOV_SPI_GSBI5_RX_CRCI 9
|
|
+#define DMOV_SPI_GSBI5_TX_CRCI 10
|
|
+#define DMOV_SPI_GSBI5_RX_CHAN 6
|
|
+#define DMOV_SPI_GSBI5_TX_CHAN 5
|
|
+
|
|
+/* channels for APQ8064 */
|
|
+#define DMOV8064_CE_IN_CHAN 0
|
|
+#define DMOV8064_CE_IN_CRCI 14
|
|
+
|
|
+#define DMOV8064_CE_OUT_CHAN 1
|
|
+#define DMOV8064_CE_OUT_CRCI 15
|
|
+
|
|
+#define DMOV8064_TSIF_CHAN 2
|
|
+#define DMOV8064_TSIF_CRCI 1
|
|
+
|
|
+/* channels for APQ8064 SGLTE*/
|
|
+#define DMOV_APQ8064_HSUART_GSBI4_TX_CHAN 11
|
|
+#define DMOV_APQ8064_HSUART_GSBI4_TX_CRCI 8
|
|
+
|
|
+#define DMOV_APQ8064_HSUART_GSBI4_RX_CHAN 10
|
|
+#define DMOV_APQ8064_HSUART_GSBI4_RX_CRCI 7
|
|
+
|
|
+/* channels for MPQ8064 */
|
|
+#define DMOV_MPQ8064_HSUART_GSBI6_TX_CHAN 7
|
|
+#define DMOV_MPQ8064_HSUART_GSBI6_TX_CRCI 6
|
|
+
|
|
+#define DMOV_MPQ8064_HSUART_GSBI6_RX_CHAN 6
|
|
+#define DMOV_MPQ8064_HSUART_GSBI6_RX_CRCI 11
|
|
+
|
|
+#define DMOV_IPQ806X_HSUART_GSBI6_TX_CHAN DMOV_MPQ8064_HSUART_GSBI6_TX_CHAN
|
|
+#define DMOV_IPQ806X_HSUART_GSBI6_TX_CRCI DMOV_MPQ8064_HSUART_GSBI6_TX_CRCI
|
|
+
|
|
+#define DMOV_IPQ806X_HSUART_GSBI6_RX_CHAN DMOV_MPQ8064_HSUART_GSBI6_RX_CHAN
|
|
+#define DMOV_IPQ806X_HSUART_GSBI6_RX_CRCI DMOV_MPQ8064_HSUART_GSBI6_RX_CRCI
|
|
+
|
|
+/* no client rate control ifc (eg, ram) */
|
|
+#define DMOV_NONE_CRCI 0
|
|
+
|
|
+
|
|
+/* If the CMD_PTR register has CMD_PTR_LIST selected, the data mover
|
|
+ * is going to walk a list of 32bit pointers as described below. Each
|
|
+ * pointer points to a *array* of dmov_s, etc structs. The last pointer
|
|
+ * in the list is marked with CMD_PTR_LP. The last struct in each array
|
|
+ * is marked with CMD_LC (see below).
|
|
+ */
|
|
+#define CMD_PTR_ADDR(addr) ((addr) >> 3)
|
|
+#define CMD_PTR_LP (1 << 31) /* last pointer */
|
|
+#define CMD_PTR_PT (3 << 29) /* ? */
|
|
+
|
|
+/* Single Item Mode */
|
|
+typedef struct {
|
|
+ unsigned cmd;
|
|
+ unsigned src;
|
|
+ unsigned dst;
|
|
+ unsigned len;
|
|
+} dmov_s;
|
|
+
|
|
+/* Scatter/Gather Mode */
|
|
+typedef struct {
|
|
+ unsigned cmd;
|
|
+ unsigned src_dscr;
|
|
+ unsigned dst_dscr;
|
|
+ unsigned _reserved;
|
|
+} dmov_sg;
|
|
+
|
|
+/* Box mode */
|
|
+typedef struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t src_row_addr;
|
|
+ uint32_t dst_row_addr;
|
|
+ uint32_t src_dst_len;
|
|
+ uint32_t num_rows;
|
|
+ uint32_t row_offset;
|
|
+} dmov_box;
|
|
+
|
|
+/* bits for the cmd field of the above structures */
|
|
+
|
|
+#define CMD_LC (1 << 31) /* last command */
|
|
+#define CMD_FR (1 << 22) /* force result -- does not work? */
|
|
+#define CMD_OCU (1 << 21) /* other channel unblock */
|
|
+#define CMD_OCB (1 << 20) /* other channel block */
|
|
+#define CMD_TCB (1 << 19) /* ? */
|
|
+#define CMD_DAH (1 << 18) /* destination address hold -- does not work?*/
|
|
+#define CMD_SAH (1 << 17) /* source address hold -- does not work? */
|
|
+
|
|
+#define CMD_MODE_SINGLE (0 << 0) /* dmov_s structure used */
|
|
+#define CMD_MODE_SG (1 << 0) /* untested */
|
|
+#define CMD_MODE_IND_SG (2 << 0) /* untested */
|
|
+#define CMD_MODE_BOX (3 << 0) /* untested */
|
|
+
|
|
+#define CMD_DST_SWAP_BYTES (1 << 14) /* exchange each byte n with byte n+1 */
|
|
+#define CMD_DST_SWAP_SHORTS (1 << 15) /* exchange each short n with short n+1 */
|
|
+#define CMD_DST_SWAP_WORDS (1 << 16) /* exchange each word n with word n+1 */
|
|
+
|
|
+#define CMD_SRC_SWAP_BYTES (1 << 11) /* exchange each byte n with byte n+1 */
|
|
+#define CMD_SRC_SWAP_SHORTS (1 << 12) /* exchange each short n with short n+1 */
|
|
+#define CMD_SRC_SWAP_WORDS (1 << 13) /* exchange each word n with word n+1 */
|
|
+
|
|
+#define CMD_DST_CRCI(n) (((n) & 15) << 7)
|
|
+#define CMD_SRC_CRCI(n) (((n) & 15) << 3)
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/mtd/nand/qcom_nand.c
|
|
@@ -0,0 +1,7455 @@
|
|
+/*
|
|
+ * Copyright (C) 2007 Google, Inc.
|
|
+ * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
|
|
+ *
|
|
+ * This software is licensed under the terms of the GNU General Public
|
|
+ * License version 2, as published by the Free Software Foundation, and
|
|
+ * may be copied, distributed, and modified under those terms.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/slab.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <linux/mtd/nand.h>
|
|
+#include <linux/mtd/partitions.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/crc16.h>
|
|
+#include <linux/bitrev.h>
|
|
+#include <linux/clk.h>
|
|
+
|
|
+#include <asm/dma.h>
|
|
+#include <asm/mach/flash.h>
|
|
+
|
|
+#include "qcom_adm_dma.h"
|
|
+
|
|
+#include "qcom_nand.h"
|
|
+unsigned long msm_nand_phys = 0;
|
|
+unsigned long msm_nandc01_phys = 0;
|
|
+unsigned long msm_nandc10_phys = 0;
|
|
+unsigned long msm_nandc11_phys = 0;
|
|
+unsigned long ebi2_register_base = 0;
|
|
+static uint32_t dual_nand_ctlr_present;
|
|
+static uint32_t interleave_enable;
|
|
+static uint32_t enable_bch_ecc;
|
|
+static uint32_t boot_layout;
|
|
+
|
|
+
|
|
+#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
|
|
+#define MSM_NAND_DMA_BUFFER_SLOTS \
|
|
+ (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
|
|
+
|
|
+#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
|
|
+#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
|
|
+#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
|
|
+#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
|
|
+
|
|
+#define ONFI_IDENTIFIER_LENGTH 0x0004
|
|
+#define ONFI_PARAM_INFO_LENGTH 0x0200
|
|
+#define ONFI_PARAM_PAGE_LENGTH 0x0100
|
|
+
|
|
+#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
|
|
+
|
|
+#define FLASH_READ_ONFI_IDENTIFIER_COMMAND 0x90
|
|
+#define FLASH_READ_ONFI_IDENTIFIER_ADDRESS 0x20
|
|
+#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
|
|
+#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
|
|
+
|
|
+#define UD_SIZE_BYTES_MASK (0x3FF << 9)
|
|
+#define SPARE_SIZE_BYTES_MASK (0xF << 23)
|
|
+#define ECC_NUM_DATA_BYTES_MASK (0x3FF << 16)
|
|
+
|
|
+#define VERBOSE 0
|
|
+
|
|
+struct msm_nand_chip {
|
|
+ struct device *dev;
|
|
+ wait_queue_head_t wait_queue;
|
|
+ atomic_t dma_buffer_busy;
|
|
+ unsigned dma_channel;
|
|
+ uint8_t *dma_buffer;
|
|
+ dma_addr_t dma_addr;
|
|
+ unsigned CFG0, CFG1, CFG0_RAW, CFG1_RAW;
|
|
+ uint32_t ecc_buf_cfg;
|
|
+ uint32_t ecc_bch_cfg;
|
|
+ uint32_t ecc_parity_bytes;
|
|
+ unsigned cw_size;
|
|
+ unsigned int uncorrectable_bit_mask;
|
|
+ unsigned int num_err_mask;
|
|
+};
|
|
+
|
|
+#define CFG1_WIDE_FLASH (1U << 1)
|
|
+
|
|
+/* TODO: move datamover code out */
|
|
+
|
|
+#define SRC_CRCI_NAND_CMD CMD_SRC_CRCI(DMOV_NAND_CRCI_CMD)
|
|
+#define DST_CRCI_NAND_CMD CMD_DST_CRCI(DMOV_NAND_CRCI_CMD)
|
|
+#define SRC_CRCI_NAND_DATA CMD_SRC_CRCI(DMOV_NAND_CRCI_DATA)
|
|
+#define DST_CRCI_NAND_DATA CMD_DST_CRCI(DMOV_NAND_CRCI_DATA)
|
|
+
|
|
+#define msm_virt_to_dma(chip, vaddr) \
|
|
+ ((chip)->dma_addr + \
|
|
+ ((uint8_t *)(vaddr) - (chip)->dma_buffer))
|
|
+
|
|
+/**
|
|
+ * msm_nand_oob_64 - oob info for 2KB page
|
|
+ */
|
|
+static struct nand_ecclayout msm_nand_oob_64 = {
|
|
+ .eccbytes = 40,
|
|
+ .eccpos = {
|
|
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
|
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
|
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
|
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
|
+ },
|
|
+ .oobavail = 16,
|
|
+ .oobfree = {
|
|
+ {30, 16},
|
|
+ }
|
|
+};
|
|
+
|
|
+/**
|
|
+ * msm_nand_oob_128 - oob info for 4KB page
|
|
+ */
|
|
+static struct nand_ecclayout msm_nand_oob_128 = {
|
|
+ .eccbytes = 80,
|
|
+ .eccpos = {
|
|
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
|
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
|
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
|
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
|
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
|
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
|
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
|
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
|
|
+ },
|
|
+ .oobavail = 32,
|
|
+ .oobfree = {
|
|
+ {70, 32},
|
|
+ }
|
|
+};
|
|
+
|
|
+/**
|
|
+ * msm_nand_oob_224 - oob info for 4KB page 8Bit interface
|
|
+ */
|
|
+static struct nand_ecclayout msm_nand_oob_224_x8 = {
|
|
+ .eccbytes = 104,
|
|
+ .eccpos = {
|
|
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
|
|
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
|
|
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
|
|
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
|
|
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
|
|
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
|
|
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
|
|
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
|
+ },
|
|
+ .oobavail = 32,
|
|
+ .oobfree = {
|
|
+ {91, 32},
|
|
+ }
|
|
+};
|
|
+
|
|
+/**
|
|
+ * msm_nand_oob_224 - oob info for 4KB page 16Bit interface
|
|
+ */
|
|
+static struct nand_ecclayout msm_nand_oob_224_x16 = {
|
|
+ .eccbytes = 112,
|
|
+ .eccpos = {
|
|
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
|
|
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
|
|
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
|
|
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
|
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
|
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
|
|
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
|
|
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
|
|
+ },
|
|
+ .oobavail = 32,
|
|
+ .oobfree = {
|
|
+ {98, 32},
|
|
+ }
|
|
+};
|
|
+
|
|
+/**
|
|
+ * msm_nand_oob_256 - oob info for 8KB page
|
|
+ */
|
|
+static struct nand_ecclayout msm_nand_oob_256 = {
|
|
+ .eccbytes = 160,
|
|
+ .eccpos = {
|
|
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
|
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
|
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
|
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
|
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
|
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
|
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
|
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
|
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
|
+ 90, 91, 92, 93, 94, 96, 97, 98 , 99, 100,
|
|
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
|
|
+ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
|
|
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
|
|
+ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
|
|
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
|
|
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
|
|
+ },
|
|
+ .oobavail = 64,
|
|
+ .oobfree = {
|
|
+ {151, 64},
|
|
+ }
|
|
+};
|
|
+
|
|
+/**
|
|
+ * msm_onenand_oob_64 - oob info for large (2KB) page
|
|
+ */
|
|
+static struct nand_ecclayout msm_onenand_oob_64 = {
|
|
+ .eccbytes = 20,
|
|
+ .eccpos = {
|
|
+ 8, 9, 10, 11, 12,
|
|
+ 24, 25, 26, 27, 28,
|
|
+ 40, 41, 42, 43, 44,
|
|
+ 56, 57, 58, 59, 60,
|
|
+ },
|
|
+ .oobavail = 20,
|
|
+ .oobfree = {
|
|
+ {2, 3}, {14, 2}, {18, 3}, {30, 2},
|
|
+ {34, 3}, {46, 2}, {50, 3}, {62, 2}
|
|
+ }
|
|
+};
|
|
+
|
|
+static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
|
|
+{
|
|
+ unsigned int bitmask, free_bitmask, old_bitmask;
|
|
+ unsigned int need_mask, current_need_mask;
|
|
+ int free_index;
|
|
+
|
|
+ need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
|
|
+ bitmask = atomic_read(&chip->dma_buffer_busy);
|
|
+ free_bitmask = ~bitmask;
|
|
+ while (free_bitmask) {
|
|
+ free_index = __ffs(free_bitmask);
|
|
+ current_need_mask = need_mask << free_index;
|
|
+
|
|
+ if (size + free_index * MSM_NAND_DMA_BUFFER_SLOTS >=
|
|
+ MSM_NAND_DMA_BUFFER_SIZE)
|
|
+ return NULL;
|
|
+
|
|
+ if ((bitmask & current_need_mask) == 0) {
|
|
+ old_bitmask =
|
|
+ atomic_cmpxchg(&chip->dma_buffer_busy,
|
|
+ bitmask,
|
|
+ bitmask | current_need_mask);
|
|
+ if (old_bitmask == bitmask)
|
|
+ return chip->dma_buffer +
|
|
+ free_index * MSM_NAND_DMA_BUFFER_SLOTS;
|
|
+ free_bitmask = 0; /* force return */
|
|
+ }
|
|
+ /* current free range was too small, clear all free bits */
|
|
+ /* below the top busy bit within current_need_mask */
|
|
+ free_bitmask &=
|
|
+ ~(~0U >> (32 - fls(bitmask & current_need_mask)));
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
|
|
+ void *buffer, size_t size)
|
|
+{
|
|
+ int index;
|
|
+ unsigned int used_mask;
|
|
+
|
|
+ used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
|
|
+ index = ((uint8_t *)buffer - chip->dma_buffer) /
|
|
+ MSM_NAND_DMA_BUFFER_SLOTS;
|
|
+ atomic_sub(used_mask << index, &chip->dma_buffer_busy);
|
|
+
|
|
+ wake_up(&chip->wait_queue);
|
|
+}
|
|
+
|
|
+
|
|
+unsigned flash_rd_reg(struct msm_nand_chip *chip, unsigned addr)
|
|
+{
|
|
+ struct {
|
|
+ dmov_s cmd;
|
|
+ unsigned cmdptr;
|
|
+ unsigned data;
|
|
+ } *dma_buffer;
|
|
+ unsigned rv;
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(
|
|
+ chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
|
|
+ dma_buffer->cmd.src = addr;
|
|
+ dma_buffer->cmd.dst = msm_virt_to_dma(chip, &dma_buffer->data);
|
|
+ dma_buffer->cmd.len = 4;
|
|
+
|
|
+ dma_buffer->cmdptr =
|
|
+ (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
|
|
+ dma_buffer->data = 0xeeeeeeee;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(
|
|
+ chip->dma_channel, DMOV_CMD_PTR_LIST |
|
|
+ DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ rv = dma_buffer->data;
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ return rv;
|
|
+}
|
|
+
|
|
+void flash_wr_reg(struct msm_nand_chip *chip, unsigned addr, unsigned val)
|
|
+{
|
|
+ struct {
|
|
+ dmov_s cmd;
|
|
+ unsigned cmdptr;
|
|
+ unsigned data;
|
|
+ } *dma_buffer;
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(
|
|
+ chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
|
|
+ dma_buffer->cmd.src = msm_virt_to_dma(chip, &dma_buffer->data);
|
|
+ dma_buffer->cmd.dst = addr;
|
|
+ dma_buffer->cmd.len = 4;
|
|
+
|
|
+ dma_buffer->cmdptr =
|
|
+ (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
|
|
+ dma_buffer->data = val;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(
|
|
+ chip->dma_channel, DMOV_CMD_PTR_LIST |
|
|
+ DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Allocates a bounce buffer, and stores the buffer address in
|
|
+ * variable pointed to by bounce_buf. bounce_buf should point to a
|
|
+ * stack variable, to avoid SMP issues.
|
|
+ */
|
|
+static int msm_nand_alloc_bounce(void *addr, size_t size,
|
|
+ enum dma_data_direction dir,
|
|
+ uint8_t **bounce_buf)
|
|
+{
|
|
+ if (bounce_buf == NULL) {
|
|
+ printk(KERN_ERR "not allocating bounce buffer\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ *bounce_buf = kmalloc(size, GFP_KERNEL | GFP_NOFS | GFP_DMA);
|
|
+ if (*bounce_buf == NULL) {
|
|
+ printk(KERN_ERR "error alloc bounce buffer %zu\n", size);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
|
|
+ memcpy(*bounce_buf, addr, size);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Maps the user buffer for DMA. If the buffer is vmalloced and the
|
|
+ * buffer crosses a page boundary, then we kmalloc a bounce buffer and
|
|
+ * copy the data into it. The bounce buffer is stored in the variable
|
|
+ * pointed to by bounce_buf, for freeing up later on. The bounce_buf
|
|
+ * should point to a stack variable, to avoid SMP issues.
|
|
+ */
|
|
+static dma_addr_t
|
|
+msm_nand_dma_map(struct device *dev, void *addr, size_t size,
|
|
+ enum dma_data_direction dir, uint8_t **bounce_buf)
|
|
+{
|
|
+ int ret;
|
|
+ struct page *page;
|
|
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
|
|
+
|
|
+ if (virt_addr_valid(addr)) {
|
|
+ page = virt_to_page(addr);
|
|
+ } else {
|
|
+ if (size + offset > PAGE_SIZE) {
|
|
+ ret = msm_nand_alloc_bounce(addr, size, dir, bounce_buf);
|
|
+ if (ret < 0)
|
|
+ return DMA_ERROR_CODE;
|
|
+
|
|
+ offset = (unsigned long)*bounce_buf & ~PAGE_MASK;
|
|
+ page = virt_to_page(*bounce_buf);
|
|
+ } else {
|
|
+ page = vmalloc_to_page(addr);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return dma_map_page(dev, page, offset, size, dir);
|
|
+}
|
|
+
|
|
+static void msm_nand_dma_unmap(struct device *dev, dma_addr_t addr, size_t size,
|
|
+ enum dma_data_direction dir,
|
|
+ void *orig_buf, void *bounce_buf)
|
|
+{
|
|
+ dma_unmap_page(dev, addr, size, dir);
|
|
+
|
|
+ if (bounce_buf != NULL) {
|
|
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
|
|
+ memcpy(orig_buf, bounce_buf, size);
|
|
+
|
|
+ kfree(bounce_buf);
|
|
+ }
|
|
+}
|
|
+
|
|
+uint32_t flash_read_id(struct msm_nand_chip *chip)
|
|
+{
|
|
+ struct {
|
|
+ dmov_s cmd[9];
|
|
+ unsigned cmdptr;
|
|
+ unsigned data[7];
|
|
+ } *dma_buffer;
|
|
+ uint32_t rv;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ dma_buffer->data[0] = 0 | 4;
|
|
+ dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
|
|
+ dma_buffer->data[2] = 1;
|
|
+ dma_buffer->data[3] = 0xeeeeeeee;
|
|
+ dma_buffer->data[4] = 0xeeeeeeee;
|
|
+ dma_buffer->data[5] = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
|
|
+ dma_buffer->data[6] = 0x00000000;
|
|
+ BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->data) - 1);
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ cmd->cmd = 0 | CMD_OCB;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
|
|
+ cmd->dst = MSM_NAND_ADDR0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
|
|
+ cmd->dst = MSM_NAND_ADDR1;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
|
|
+ cmd->dst = MSM_NAND_FLASH_CHIP_SELECT;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[1]);
|
|
+ cmd->dst = MSM_NAND_FLASH_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[2]);
|
|
+ cmd->dst = MSM_NAND_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data[3]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_READ_ID;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data[4]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = CMD_OCU | CMD_LC;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->cmd) - 1);
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3
|
|
+ ) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
|
|
+ DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ pr_info("status: %x\n", dma_buffer->data[3]);
|
|
+ pr_info("nandid: %x maker %02x device %02x\n",
|
|
+ dma_buffer->data[4], dma_buffer->data[4] & 0xff,
|
|
+ (dma_buffer->data[4] >> 8) & 0xff);
|
|
+ rv = dma_buffer->data[4];
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+ return rv;
|
|
+}
|
|
+
|
|
+struct flash_identification {
|
|
+ uint32_t flash_id;
|
|
+ uint32_t density;
|
|
+ uint32_t widebus;
|
|
+ uint32_t pagesize;
|
|
+ uint32_t blksize;
|
|
+ uint32_t oobsize;
|
|
+ uint32_t ecc_correctability;
|
|
+} supported_flash;
|
|
+
|
|
+uint16_t flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
|
|
+{
|
|
+ int i;
|
|
+ uint16_t result;
|
|
+
|
|
+ for (i = 0; i < count; i++)
|
|
+ buffer[i] = bitrev8(buffer[i]);
|
|
+
|
|
+ result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
|
|
+
|
|
+ for (i = 0; i < count; i++)
|
|
+ buffer[i] = bitrev8(buffer[i]);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+static void flash_reset(struct msm_nand_chip *chip)
|
|
+{
|
|
+ struct {
|
|
+ dmov_s cmd[6];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t exec;
|
|
+ uint32_t flash_status;
|
|
+ uint32_t sflash_bcfg_orig;
|
|
+ uint32_t sflash_bcfg_mod;
|
|
+ uint32_t chip_select;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ dma_addr_t dma_cmd;
|
|
+ dma_addr_t dma_cmdptr;
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ dma_buffer->data.sflash_bcfg_orig
|
|
+ = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
|
|
+ dma_buffer->data.sflash_bcfg_mod = 0x00000000;
|
|
+ dma_buffer->data.chip_select = 4;
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_RESET;
|
|
+ dma_buffer->data.exec = 1;
|
|
+ dma_buffer->data.flash_status = 0xeeeeeeee;
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ /* Put the Nand ctlr in Async mode and disable SFlash ctlr */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sflash_bcfg_mod);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chip_select);
|
|
+ cmd->dst = MSM_NAND_FLASH_CHIP_SELECT;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready, & write Reset command */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = MSM_NAND_FLASH_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = MSM_NAND_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Restore the SFLASH_BURST_CONFIG register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sflash_bcfg_orig);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_cmd = msm_virt_to_dma(chip, dma_buffer->cmd);
|
|
+ dma_buffer->cmdptr = (dma_cmd >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ dma_cmdptr = msm_virt_to_dma(chip, &dma_buffer->cmdptr);
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(dma_cmdptr));
|
|
+ mb();
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+}
|
|
+
|
|
+uint32_t flash_onfi_probe(struct msm_nand_chip *chip)
|
|
+{
|
|
+
|
|
+
|
|
+ struct onfi_param_page {
|
|
+ uint32_t parameter_page_signature;
|
|
+ uint16_t revision_number;
|
|
+ uint16_t features_supported;
|
|
+ uint16_t optional_commands_supported;
|
|
+ uint8_t reserved0[22];
|
|
+ uint8_t device_manufacturer[12];
|
|
+ uint8_t device_model[20];
|
|
+ uint8_t jedec_manufacturer_id;
|
|
+ uint16_t date_code;
|
|
+ uint8_t reserved1[13];
|
|
+ uint32_t number_of_data_bytes_per_page;
|
|
+ uint16_t number_of_spare_bytes_per_page;
|
|
+ uint32_t number_of_data_bytes_per_partial_page;
|
|
+ uint16_t number_of_spare_bytes_per_partial_page;
|
|
+ uint32_t number_of_pages_per_block;
|
|
+ uint32_t number_of_blocks_per_logical_unit;
|
|
+ uint8_t number_of_logical_units;
|
|
+ uint8_t number_of_address_cycles;
|
|
+ uint8_t number_of_bits_per_cell;
|
|
+ uint16_t maximum_bad_blocks_per_logical_unit;
|
|
+ uint16_t block_endurance;
|
|
+ uint8_t guaranteed_valid_begin_blocks;
|
|
+ uint16_t guaranteed_valid_begin_blocks_endurance;
|
|
+ uint8_t number_of_programs_per_page;
|
|
+ uint8_t partial_program_attributes;
|
|
+ uint8_t number_of_bits_ecc_correctability;
|
|
+ uint8_t number_of_interleaved_address_bits;
|
|
+ uint8_t interleaved_operation_attributes;
|
|
+ uint8_t reserved2[13];
|
|
+ uint8_t io_pin_capacitance;
|
|
+ uint16_t timing_mode_support;
|
|
+ uint16_t program_cache_timing_mode_support;
|
|
+ uint16_t maximum_page_programming_time;
|
|
+ uint16_t maximum_block_erase_time;
|
|
+ uint16_t maximum_page_read_time;
|
|
+ uint16_t maximum_change_column_setup_time;
|
|
+ uint8_t reserved3[23];
|
|
+ uint16_t vendor_specific_revision_number;
|
|
+ uint8_t vendor_specific[88];
|
|
+ uint16_t integrity_crc;
|
|
+
|
|
+ } __attribute__((__packed__));
|
|
+
|
|
+ struct onfi_param_page *onfi_param_page_ptr;
|
|
+ uint8_t *onfi_identifier_buf = NULL;
|
|
+ uint8_t *onfi_param_info_buf = NULL;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[12];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t exec;
|
|
+ uint32_t flash_status;
|
|
+ uint32_t devcmd1_orig;
|
|
+ uint32_t devcmdvld_orig;
|
|
+ uint32_t devcmd1_mod;
|
|
+ uint32_t devcmdvld_mod;
|
|
+ uint32_t sflash_bcfg_orig;
|
|
+ uint32_t sflash_bcfg_mod;
|
|
+ uint32_t chip_select;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ unsigned page_address = 0;
|
|
+ int err = 0;
|
|
+ dma_addr_t dma_addr_param_info = 0;
|
|
+ dma_addr_t dma_addr_identifier = 0;
|
|
+ unsigned cmd_set_count = 2;
|
|
+ unsigned crc_chk_count = 0;
|
|
+
|
|
+ /*if (msm_nand_data.nr_parts) {
|
|
+ page_address = ((msm_nand_data.parts[0]).offset << 6);
|
|
+ } else {
|
|
+ pr_err("flash_onfi_probe: "
|
|
+ "No partition info available\n");
|
|
+ err = -EIO;
|
|
+ return err;
|
|
+ }*/
|
|
+
|
|
+ wait_event(chip->wait_queue, (onfi_identifier_buf =
|
|
+ msm_nand_get_dma_buffer(chip, ONFI_IDENTIFIER_LENGTH)));
|
|
+ dma_addr_identifier = msm_virt_to_dma(chip, onfi_identifier_buf);
|
|
+
|
|
+ wait_event(chip->wait_queue, (onfi_param_info_buf =
|
|
+ msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
|
|
+ dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ dma_buffer->data.sflash_bcfg_orig = flash_rd_reg
|
|
+ (chip, MSM_NAND_SFLASHC_BURST_CFG);
|
|
+ dma_buffer->data.devcmd1_orig = flash_rd_reg(chip, MSM_NAND_DEV_CMD1);
|
|
+ dma_buffer->data.devcmdvld_orig = flash_rd_reg(chip,
|
|
+ MSM_NAND_DEV_CMD_VLD);
|
|
+ dma_buffer->data.chip_select = 4;
|
|
+
|
|
+ while (cmd_set_count-- > 0) {
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ dma_buffer->data.devcmd1_mod = (dma_buffer->data.devcmd1_orig &
|
|
+ 0xFFFFFF00) | (cmd_set_count
|
|
+ ? FLASH_READ_ONFI_IDENTIFIER_COMMAND
|
|
+ : FLASH_READ_ONFI_PARAMETERS_COMMAND);
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
|
|
+ dma_buffer->data.addr0 = (page_address << 16) | (cmd_set_count
|
|
+ ? FLASH_READ_ONFI_IDENTIFIER_ADDRESS
|
|
+ : FLASH_READ_ONFI_PARAMETERS_ADDRESS);
|
|
+ dma_buffer->data.addr1 = (page_address >> 16) & 0xFF;
|
|
+ dma_buffer->data.cfg0 = (cmd_set_count
|
|
+ ? MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER
|
|
+ : MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO);
|
|
+ dma_buffer->data.cfg1 = (cmd_set_count
|
|
+ ? MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER
|
|
+ : MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO);
|
|
+ dma_buffer->data.sflash_bcfg_mod = 0x00000000;
|
|
+ dma_buffer->data.devcmdvld_mod = (dma_buffer->
|
|
+ data.devcmdvld_orig & 0xFFFFFFFE);
|
|
+ dma_buffer->data.exec = 1;
|
|
+ dma_buffer->data.flash_status = 0xeeeeeeee;
|
|
+
|
|
+ /* Put the Nand ctlr in Async mode and disable SFlash ctlr */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sflash_bcfg_mod);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chip_select);
|
|
+ cmd->dst = MSM_NAND_FLASH_CHIP_SELECT;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready, & write CMD,ADDR0,ADDR1,CHIPSEL regs */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = MSM_NAND_FLASH_CMD;
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ /* Configure the CFG0 and CFG1 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = MSM_NAND_DEV0_CFG0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Configure the DEV_CMD_VLD register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.devcmdvld_mod);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD_VLD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Configure the DEV_CMD1 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.devcmd1_mod);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD1;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.exec);
|
|
+ cmd->dst = MSM_NAND_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the two status registers */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.flash_status);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read data block - valid only if status says success */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->dst = (cmd_set_count ? dma_addr_identifier :
|
|
+ dma_addr_param_info);
|
|
+ cmd->len = (cmd_set_count ? ONFI_IDENTIFIER_LENGTH :
|
|
+ ONFI_PARAM_INFO_LENGTH);
|
|
+ cmd++;
|
|
+
|
|
+ /* Restore the DEV_CMD1 register */
|
|
+ cmd->cmd = 0 ;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.devcmd1_orig);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD1;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Restore the DEV_CMD_VLD register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.devcmdvld_orig);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD_VLD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Restore the SFLASH_BURST_CONFIG register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sflash_bcfg_orig);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(12 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
|
|
+ >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* Check for errors, protection violations etc */
|
|
+ if (dma_buffer->data.flash_status & 0x110) {
|
|
+ pr_info("MPU/OP error (0x%x) during "
|
|
+ "ONFI probe\n",
|
|
+ dma_buffer->data.flash_status);
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (cmd_set_count) {
|
|
+ onfi_param_page_ptr = (struct onfi_param_page *)
|
|
+ (&(onfi_identifier_buf[0]));
|
|
+ if (onfi_param_page_ptr->parameter_page_signature !=
|
|
+ ONFI_PARAMETER_PAGE_SIGNATURE) {
|
|
+ pr_info("ONFI probe : Found a non"
|
|
+ "ONFI Compliant device \n");
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ for (crc_chk_count = 0; crc_chk_count <
|
|
+ ONFI_PARAM_INFO_LENGTH
|
|
+ / ONFI_PARAM_PAGE_LENGTH;
|
|
+ crc_chk_count++) {
|
|
+ onfi_param_page_ptr =
|
|
+ (struct onfi_param_page *)
|
|
+ (&(onfi_param_info_buf
|
|
+ [ONFI_PARAM_PAGE_LENGTH *
|
|
+ crc_chk_count]));
|
|
+ if (flash_onfi_crc_check(
|
|
+ (uint8_t *)onfi_param_page_ptr,
|
|
+ ONFI_PARAM_PAGE_LENGTH - 2) ==
|
|
+ onfi_param_page_ptr->integrity_crc) {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
|
|
+ / ONFI_PARAM_PAGE_LENGTH) {
|
|
+ pr_info("ONFI probe : CRC Check "
|
|
+ "failed on ONFI Parameter "
|
|
+ "data \n");
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ } else {
|
|
+ supported_flash.flash_id =
|
|
+ flash_read_id(chip);
|
|
+ supported_flash.widebus =
|
|
+ onfi_param_page_ptr->
|
|
+ features_supported & 0x01;
|
|
+ supported_flash.pagesize =
|
|
+ onfi_param_page_ptr->
|
|
+ number_of_data_bytes_per_page;
|
|
+ supported_flash.blksize =
|
|
+ onfi_param_page_ptr->
|
|
+ number_of_pages_per_block *
|
|
+ supported_flash.pagesize;
|
|
+ supported_flash.oobsize =
|
|
+ onfi_param_page_ptr->
|
|
+ number_of_spare_bytes_per_page;
|
|
+ supported_flash.density =
|
|
+ onfi_param_page_ptr->
|
|
+ number_of_blocks_per_logical_unit
|
|
+ * supported_flash.blksize;
|
|
+ supported_flash.ecc_correctability =
|
|
+ onfi_param_page_ptr->
|
|
+ number_of_bits_ecc_correctability;
|
|
+
|
|
+ pr_info("ONFI probe : Found an ONFI "
|
|
+ "compliant device %s\n",
|
|
+ onfi_param_page_ptr->device_model);
|
|
+
|
|
+ /* Temporary hack for MT29F4G08ABC device.
|
|
+ * Since the device is not properly adhering
|
|
+ * to ONFi specification it is reporting
|
|
+ * as 16 bit device though it is 8 bit device!!!
|
|
+ */
|
|
+ if (!strncmp(onfi_param_page_ptr->device_model,
|
|
+ "MT29F4G08ABC", 12))
|
|
+ supported_flash.widebus = 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+ msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
|
|
+ ONFI_PARAM_INFO_LENGTH);
|
|
+ msm_nand_release_dma_buffer(chip, onfi_identifier_buf,
|
|
+ ONFI_IDENTIFIER_LENGTH);
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[8 * 5 + 2];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t chipsel;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t eccbchcfg;
|
|
+ uint32_t exec;
|
|
+ uint32_t ecccfg;
|
|
+ struct {
|
|
+ uint32_t flash_status;
|
|
+ uint32_t buffer_status;
|
|
+ } result[8];
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ unsigned n;
|
|
+ unsigned page = 0;
|
|
+ uint32_t oob_len;
|
|
+ uint32_t sectordatasize;
|
|
+ uint32_t sectoroobsize;
|
|
+ int err, pageerr, rawerr;
|
|
+ dma_addr_t data_dma_addr = 0;
|
|
+ dma_addr_t oob_dma_addr = 0;
|
|
+ dma_addr_t data_dma_addr_curr = 0;
|
|
+ dma_addr_t oob_dma_addr_curr = 0;
|
|
+ uint8_t *dat_bounce_buf = NULL;
|
|
+ uint8_t *oob_bounce_buf = NULL;
|
|
+ uint32_t oob_col = 0;
|
|
+ unsigned page_count;
|
|
+ unsigned pages_read = 0;
|
|
+ unsigned start_sector = 0;
|
|
+ uint32_t ecc_errors;
|
|
+ uint32_t total_ecc_errors = 0;
|
|
+ unsigned cwperpage;
|
|
+#if VERBOSE
|
|
+ pr_info("================================================="
|
|
+ "================\n");
|
|
+ pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
|
|
+ "\noobbuf 0x%p ooblen 0x%x\n",
|
|
+ __func__, from, ops->mode, ops->datbuf, ops->len,
|
|
+ ops->oobbuf, ops->ooblen);
|
|
+#endif
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = from >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = from >> 12;
|
|
+
|
|
+ oob_len = ops->ooblen;
|
|
+ cwperpage = (mtd->writesize >> 9);
|
|
+
|
|
+ if (from & (mtd->writesize - 1)) {
|
|
+ pr_err("%s: unsupported from, 0x%llx\n",
|
|
+ __func__, from);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
|
|
+ /* when ops->datbuf is NULL, ops->len can be ooblen */
|
|
+ pr_err("%s: unsupported ops->len, %d\n",
|
|
+ __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ if (ops->datbuf != NULL &&
|
|
+ (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
|
|
+ pr_err("%s: unsupported ops->len,"
|
|
+ " %d for MTD_OPS_RAW\n", __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
|
|
+ pr_err("%s: unsupported ops->ooboffs, %d\n",
|
|
+ __func__, ops->ooboffs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
|
|
+ start_sector = cwperpage - 1;
|
|
+
|
|
+ if (ops->oobbuf && !ops->datbuf) {
|
|
+ page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
|
|
+ mtd->oobavail : mtd->oobsize);
|
|
+ if ((page_count == 0) && (ops->ooblen))
|
|
+ page_count = 1;
|
|
+ } else if (ops->mode != MTD_OPS_RAW)
|
|
+ page_count = ops->len / mtd->writesize;
|
|
+ else
|
|
+ page_count = ops->len / (mtd->writesize + mtd->oobsize);
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+ data_dma_addr_curr = data_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
|
|
+ DMA_FROM_DEVICE, &dat_bounce_buf);
|
|
+ if (dma_mapping_error(chip->dev, data_dma_addr)) {
|
|
+ pr_err("msm_nand_read_oob: failed to get dma addr "
|
|
+ "for %p\n", ops->datbuf);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ memset(ops->oobbuf, 0xff, ops->ooblen);
|
|
+ oob_dma_addr_curr = oob_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->oobbuf,
|
|
+ ops->ooblen, DMA_BIDIRECTIONAL,
|
|
+ &oob_bounce_buf);
|
|
+ if (dma_mapping_error(chip->dev, oob_dma_addr)) {
|
|
+ pr_err("msm_nand_read_oob: failed to get dma addr "
|
|
+ "for %p\n", ops->oobbuf);
|
|
+ err = -EIO;
|
|
+ goto err_dma_map_oobbuf_failed;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(
|
|
+ chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ oob_col = start_sector * chip->cw_size;
|
|
+ if (chip->CFG1 & CFG1_WIDE_FLASH)
|
|
+ oob_col >>= 1;
|
|
+
|
|
+ err = 0;
|
|
+ while (page_count-- > 0) {
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
|
|
+ dma_buffer->data.cfg0 =
|
|
+ (chip->CFG0 & ~(7U << 6))
|
|
+ | (((cwperpage-1) - start_sector) << 6);
|
|
+ dma_buffer->data.cfg1 = chip->CFG1;
|
|
+ if (enable_bch_ecc)
|
|
+ dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
|
|
+ } else {
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
|
|
+ dma_buffer->data.cfg0 = (chip->CFG0_RAW
|
|
+ & ~(7U << 6)) | ((cwperpage-1) << 6);
|
|
+ dma_buffer->data.cfg1 = chip->CFG1_RAW |
|
|
+ (chip->CFG1 & CFG1_WIDE_FLASH);
|
|
+ }
|
|
+
|
|
+ dma_buffer->data.addr0 = (page << 16) | oob_col;
|
|
+ dma_buffer->data.addr1 = (page >> 16) & 0xff;
|
|
+ /* chipsel_0 + enable DM interface */
|
|
+ dma_buffer->data.chipsel = 0 | 4;
|
|
+
|
|
+
|
|
+ /* GO bit for the EXEC register */
|
|
+ dma_buffer->data.exec = 1;
|
|
+
|
|
+
|
|
+ BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.result));
|
|
+
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ /* flash + buffer status return words */
|
|
+ dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
|
|
+ dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
|
|
+
|
|
+ /* block on cmd ready, then
|
|
+ * write CMD / ADDR0 / ADDR1 / CHIPSEL
|
|
+ * regs in a burst
|
|
+ */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = MSM_NAND_FLASH_CMD;
|
|
+ if (n == start_sector)
|
|
+ cmd->len = 16;
|
|
+ else
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ if (n == start_sector) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = MSM_NAND_DEV0_CFG0;
|
|
+ if (enable_bch_ecc)
|
|
+ cmd->len = 12;
|
|
+ else
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.ecccfg);
|
|
+ cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ /* kick the execute register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src =
|
|
+ msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = MSM_NAND_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* block on data ready, then
|
|
+ * read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.result[n]);
|
|
+ /* MSM_NAND_FLASH_STATUS + MSM_NAND_BUFFER_STATUS */
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* read data block
|
|
+ * (only valid if status says success)
|
|
+ */
|
|
+ if (ops->datbuf) {
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (!boot_layout)
|
|
+ sectordatasize = (n < (cwperpage - 1))
|
|
+ ? 516 : (512 - ((cwperpage - 1) << 2));
|
|
+ else
|
|
+ sectordatasize = 512;
|
|
+ } else {
|
|
+ sectordatasize = chip->cw_size;
|
|
+ }
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->dst = data_dma_addr_curr;
|
|
+ data_dma_addr_curr += sectordatasize;
|
|
+ cmd->len = sectordatasize;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ if (ops->oobbuf && (n == (cwperpage - 1)
|
|
+ || ops->mode != MTD_OPS_AUTO_OOB)) {
|
|
+ cmd->cmd = 0;
|
|
+ if (n == (cwperpage - 1)) {
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER +
|
|
+ (512 - ((cwperpage - 1) << 2));
|
|
+ sectoroobsize = (cwperpage << 2);
|
|
+ if (ops->mode != MTD_OPS_AUTO_OOB)
|
|
+ sectoroobsize +=
|
|
+ chip->ecc_parity_bytes;
|
|
+ } else {
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER + 516;
|
|
+ sectoroobsize = chip->ecc_parity_bytes;
|
|
+ }
|
|
+
|
|
+ cmd->dst = oob_dma_addr_curr;
|
|
+ if (sectoroobsize < oob_len)
|
|
+ cmd->len = sectoroobsize;
|
|
+ else
|
|
+ cmd->len = oob_len;
|
|
+ oob_dma_addr_curr += cmd->len;
|
|
+ oob_len -= cmd->len;
|
|
+ if (cmd->len > 0)
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ BUILD_BUG_ON(8 * 5 + 2 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr =
|
|
+ (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
|
|
+ | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* if any of the writes failed (0x10), or there
|
|
+ * was a protection violation (0x100), we lose
|
|
+ */
|
|
+ pageerr = rawerr = 0;
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ if (dma_buffer->data.result[n].flash_status & 0x110) {
|
|
+ rawerr = -EIO;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (rawerr) {
|
|
+ if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
|
|
+ uint8_t *datbuf = ops->datbuf +
|
|
+ pages_read * mtd->writesize;
|
|
+
|
|
+ dma_sync_single_for_cpu(chip->dev,
|
|
+ data_dma_addr_curr-mtd->writesize,
|
|
+ mtd->writesize, DMA_BIDIRECTIONAL);
|
|
+
|
|
+ for (n = 0; n < mtd->writesize; n++) {
|
|
+ /* empty blocks read 0x54 at
|
|
+ * these offsets
|
|
+ */
|
|
+ if ((n % 516 == 3 || n % 516 == 175)
|
|
+ && datbuf[n] == 0x54)
|
|
+ datbuf[n] = 0xff;
|
|
+ if (datbuf[n] != 0xff) {
|
|
+ pageerr = rawerr;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dma_sync_single_for_device(chip->dev,
|
|
+ data_dma_addr_curr-mtd->writesize,
|
|
+ mtd->writesize, DMA_BIDIRECTIONAL);
|
|
+
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ dma_sync_single_for_cpu(chip->dev,
|
|
+ oob_dma_addr_curr - (ops->ooblen - oob_len),
|
|
+ ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
|
|
+
|
|
+ for (n = 0; n < ops->ooblen; n++) {
|
|
+ if (ops->oobbuf[n] != 0xff) {
|
|
+ pageerr = rawerr;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dma_sync_single_for_device(chip->dev,
|
|
+ oob_dma_addr_curr - (ops->ooblen - oob_len),
|
|
+ ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
|
|
+ }
|
|
+ }
|
|
+ if (pageerr) {
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ if (dma_buffer->data.result[n].buffer_status &
|
|
+ chip->uncorrectable_bit_mask) {
|
|
+ /* not thread safe */
|
|
+ mtd->ecc_stats.failed++;
|
|
+ pageerr = -EBADMSG;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (!rawerr) { /* check for corretable errors */
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ ecc_errors =
|
|
+ (dma_buffer->data.result[n].buffer_status
|
|
+ & chip->num_err_mask);
|
|
+ if (ecc_errors) {
|
|
+ total_ecc_errors += ecc_errors;
|
|
+ /* not thread safe */
|
|
+ mtd->ecc_stats.corrected += ecc_errors;
|
|
+ if (ecc_errors > 1)
|
|
+ pageerr = -EUCLEAN;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (pageerr && (pageerr != -EUCLEAN || err == 0))
|
|
+ err = pageerr;
|
|
+
|
|
+#if VERBOSE
|
|
+ if (rawerr && !pageerr) {
|
|
+ pr_err("msm_nand_read_oob %llx %x %x empty page\n",
|
|
+ (loff_t)page * mtd->writesize, ops->len,
|
|
+ ops->ooblen);
|
|
+ } else {
|
|
+ for (n = start_sector; n < cwperpage; n++)
|
|
+ pr_info("flash_status[%d] = %x,\
|
|
+ buffr_status[%d] = %x\n",
|
|
+ n, dma_buffer->data.result[n].flash_status,
|
|
+ n, dma_buffer->data.result[n].buffer_status);
|
|
+ }
|
|
+#endif
|
|
+ if (err && err != -EUCLEAN && err != -EBADMSG)
|
|
+ break;
|
|
+ pages_read++;
|
|
+ page++;
|
|
+ }
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ if (ops->oobbuf) {
|
|
+ msm_nand_dma_unmap(chip->dev, oob_dma_addr,
|
|
+ ops->ooblen, DMA_FROM_DEVICE,
|
|
+ ops->oobbuf, oob_bounce_buf);
|
|
+ }
|
|
+err_dma_map_oobbuf_failed:
|
|
+ if (ops->datbuf) {
|
|
+ msm_nand_dma_unmap(chip->dev, data_dma_addr,
|
|
+ ops->len, DMA_BIDIRECTIONAL,
|
|
+ ops->datbuf, dat_bounce_buf);
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ ops->retlen = mtd->writesize * pages_read;
|
|
+ else
|
|
+ ops->retlen = (mtd->writesize + mtd->oobsize) *
|
|
+ pages_read;
|
|
+ ops->oobretlen = ops->ooblen - oob_len;
|
|
+ if (err)
|
|
+ pr_err("msm_nand_read_oob %llx %x %x failed %d, corrected %d\n",
|
|
+ from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
|
|
+ total_ecc_errors);
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
|
|
+ __func__, err, ops->retlen, ops->oobretlen);
|
|
+
|
|
+ pr_info("==================================================="
|
|
+ "==============\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_nand_read_oob_dualnandc(struct mtd_info *mtd, loff_t from,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[16 * 6 + 20];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t nandc01_addr0;
|
|
+ uint32_t nandc10_addr0;
|
|
+ uint32_t nandc11_addr1;
|
|
+ uint32_t chipsel_cs0;
|
|
+ uint32_t chipsel_cs1;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t eccbchcfg;
|
|
+ uint32_t exec;
|
|
+ uint32_t ecccfg;
|
|
+ uint32_t ebi2_chip_select_cfg0;
|
|
+ uint32_t adm_mux_data_ack_req_nc01;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc01;
|
|
+ uint32_t adm_mux_data_ack_req_nc10;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc10;
|
|
+ uint32_t adm_default_mux;
|
|
+ uint32_t default_ebi2_chip_select_cfg0;
|
|
+ uint32_t nc10_flash_dev_cmd_vld;
|
|
+ uint32_t nc10_flash_dev_cmd1;
|
|
+ uint32_t nc10_flash_dev_cmd_vld_default;
|
|
+ uint32_t nc10_flash_dev_cmd1_default;
|
|
+ struct {
|
|
+ uint32_t flash_status;
|
|
+ uint32_t buffer_status;
|
|
+ } result[16];
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ unsigned n;
|
|
+ unsigned page = 0;
|
|
+ uint32_t oob_len;
|
|
+ uint32_t sectordatasize;
|
|
+ uint32_t sectoroobsize;
|
|
+ int err, pageerr, rawerr;
|
|
+ dma_addr_t data_dma_addr = 0;
|
|
+ dma_addr_t oob_dma_addr = 0;
|
|
+ dma_addr_t data_dma_addr_curr = 0;
|
|
+ dma_addr_t oob_dma_addr_curr = 0;
|
|
+ uint32_t oob_col = 0;
|
|
+ unsigned page_count;
|
|
+ unsigned pages_read = 0;
|
|
+ unsigned start_sector = 0;
|
|
+ uint32_t ecc_errors;
|
|
+ uint32_t total_ecc_errors = 0;
|
|
+ unsigned cwperpage;
|
|
+ unsigned cw_offset = chip->cw_size;
|
|
+#if VERBOSE
|
|
+ pr_info("================================================="
|
|
+ "============\n");
|
|
+ pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
|
|
+ "\noobbuf 0x%p ooblen 0x%x\n\n",
|
|
+ __func__, from, ops->mode, ops->datbuf,
|
|
+ ops->len, ops->oobbuf, ops->ooblen);
|
|
+#endif
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = from >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = from >> 12;
|
|
+
|
|
+ if (interleave_enable)
|
|
+ page = (from >> 1) >> 12;
|
|
+
|
|
+ oob_len = ops->ooblen;
|
|
+ cwperpage = (mtd->writesize >> 9);
|
|
+
|
|
+ if (from & (mtd->writesize - 1)) {
|
|
+ pr_err("%s: unsupported from, 0x%llx\n",
|
|
+ __func__, from);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
|
|
+ pr_err("%s: unsupported ops->len, %d\n",
|
|
+ __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ if (ops->datbuf != NULL &&
|
|
+ (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
|
|
+ pr_err("%s: unsupported ops->len,"
|
|
+ " %d for MTD_OPS_RAW\n", __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
|
|
+ pr_err("%s: unsupported ops->ooboffs, %d\n",
|
|
+ __func__, ops->ooboffs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
|
|
+ start_sector = cwperpage - 1;
|
|
+
|
|
+ if (ops->oobbuf && !ops->datbuf) {
|
|
+ page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
|
|
+ mtd->oobavail : mtd->oobsize);
|
|
+ if ((page_count == 0) && (ops->ooblen))
|
|
+ page_count = 1;
|
|
+ } else if (ops->mode != MTD_OPS_RAW)
|
|
+ page_count = ops->len / mtd->writesize;
|
|
+ else
|
|
+ page_count = ops->len / (mtd->writesize + mtd->oobsize);
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+ data_dma_addr_curr = data_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
|
|
+ DMA_FROM_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, data_dma_addr)) {
|
|
+ pr_err("msm_nand_read_oob_dualnandc: "
|
|
+ "failed to get dma addr for %p\n",
|
|
+ ops->datbuf);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ memset(ops->oobbuf, 0xff, ops->ooblen);
|
|
+ oob_dma_addr_curr = oob_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->oobbuf,
|
|
+ ops->ooblen, DMA_BIDIRECTIONAL, NULL);
|
|
+ if (dma_mapping_error(chip->dev, oob_dma_addr)) {
|
|
+ pr_err("msm_nand_read_oob_dualnandc: "
|
|
+ "failed to get dma addr for %p\n",
|
|
+ ops->oobbuf);
|
|
+ err = -EIO;
|
|
+ goto err_dma_map_oobbuf_failed;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(
|
|
+ chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ oob_col = start_sector * chip->cw_size;
|
|
+ if (chip->CFG1 & CFG1_WIDE_FLASH) {
|
|
+ oob_col >>= 1;
|
|
+ cw_offset >>= 1;
|
|
+ }
|
|
+
|
|
+ err = 0;
|
|
+ while (page_count-- > 0) {
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
|
|
+ if (start_sector == (cwperpage - 1)) {
|
|
+ dma_buffer->data.cfg0 = (chip->CFG0 &
|
|
+ ~(7U << 6));
|
|
+ } else {
|
|
+ dma_buffer->data.cfg0 = (chip->CFG0 &
|
|
+ ~(7U << 6))
|
|
+ | (((cwperpage >> 1)-1) << 6);
|
|
+ }
|
|
+ dma_buffer->data.cfg1 = chip->CFG1;
|
|
+ if (enable_bch_ecc)
|
|
+ dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
|
|
+ } else {
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
|
|
+ dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
|
|
+ ~(7U << 6)) | ((((cwperpage >> 1)-1) << 6)));
|
|
+ dma_buffer->data.cfg1 = chip->CFG1_RAW |
|
|
+ (chip->CFG1 & CFG1_WIDE_FLASH);
|
|
+ }
|
|
+
|
|
+ if (!interleave_enable) {
|
|
+ if (start_sector == (cwperpage - 1)) {
|
|
+ dma_buffer->data.nandc10_addr0 =
|
|
+ (page << 16) | oob_col;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd_vld = 0xD;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd1 =
|
|
+ 0xF00F3000;
|
|
+ } else {
|
|
+ dma_buffer->data.nandc01_addr0 = page << 16;
|
|
+ /* NC10 ADDR0 points to the next code word */
|
|
+ dma_buffer->data.nandc10_addr0 = (page << 16) |
|
|
+ cw_offset;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd_vld = 0x1D;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd1 =
|
|
+ 0xF00FE005;
|
|
+ }
|
|
+ } else {
|
|
+ dma_buffer->data.nandc01_addr0 =
|
|
+ dma_buffer->data.nandc10_addr0 =
|
|
+ (page << 16) | oob_col;
|
|
+ }
|
|
+ /* ADDR1 */
|
|
+ dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
|
|
+
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
|
|
+ dma_buffer->data.adm_default_mux = 0x00000FC0;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd_vld_default = 0x1D;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd1_default = 0xF00F3000;
|
|
+
|
|
+ dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
|
|
+ dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
|
|
+
|
|
+ /* chipsel_0 + enable DM interface */
|
|
+ dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
|
|
+ /* chipsel_1 + enable DM interface */
|
|
+ dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
|
|
+
|
|
+ /* GO bit for the EXEC register */
|
|
+ dma_buffer->data.exec = 1;
|
|
+
|
|
+ BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.result));
|
|
+
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ /* flash + buffer status return words */
|
|
+ dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
|
|
+ dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
|
|
+
|
|
+ if (n == start_sector) {
|
|
+ if (!interleave_enable) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.nc10_flash_dev_cmd_vld);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nc10_flash_dev_cmd1);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV_CMD1);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* NC01, NC10 --> ADDR1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc11_addr1);
|
|
+ cmd->dst = NC11(MSM_NAND_ADDR1);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC11(MSM_NAND_DEV0_CFG0);
|
|
+ if (enable_bch_ecc)
|
|
+ cmd->len = 12;
|
|
+ else
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+ } else {
|
|
+ /* enable CS0 & CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* NC01, NC10 --> ADDR1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc11_addr1);
|
|
+ cmd->dst = NC11(MSM_NAND_ADDR1);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Enable CS0 for NC01 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.chipsel_cs0);
|
|
+ cmd->dst =
|
|
+ NC01(MSM_NAND_FLASH_CHIP_SELECT);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Enable CS1 for NC10 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.chipsel_cs1);
|
|
+ cmd->dst =
|
|
+ NC10(MSM_NAND_FLASH_CHIP_SELECT);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* config DEV0_CFG0 & CFG1 for CS0 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* config DEV1_CFG0 & CFG1 for CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.ecccfg);
|
|
+ cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* if 'only' the last code word */
|
|
+ if (n == cwperpage - 1) {
|
|
+ /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_cmd_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* CMD */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* NC10 --> ADDR0 ( 0x0 ) */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc10_addr0);
|
|
+ cmd->dst = NC10(MSM_NAND_ADDR0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* kick the execute reg for NC10 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.exec);
|
|
+ cmd->dst = NC10(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_data_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* block on data ready from NC10, then
|
|
+ * read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC10(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.result[n]);
|
|
+ /* MSM_NAND_FLASH_STATUS +
|
|
+ * MSM_NAND_BUFFER_STATUS
|
|
+ */
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+ } else {
|
|
+ /* NC01 --> ADDR0 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc01_addr0);
|
|
+ cmd->dst = NC01(MSM_NAND_ADDR0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* NC10 --> ADDR1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc10_addr0);
|
|
+ cmd->dst = NC10(MSM_NAND_ADDR0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_cmd_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* CMD */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC01(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* kick the execute register for NC01*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.exec);
|
|
+ cmd->dst = NC01(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* read data block
|
|
+ * (only valid if status says success)
|
|
+ */
|
|
+ if (ops->datbuf || (ops->oobbuf &&
|
|
+ ops->mode != MTD_OPS_AUTO_OOB)) {
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ sectordatasize = (n < (cwperpage - 1))
|
|
+ ? 516 : (512 - ((cwperpage - 1) << 2));
|
|
+ else
|
|
+ sectordatasize = chip->cw_size;
|
|
+
|
|
+ if (n % 2 == 0) {
|
|
+ /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_data_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* block on data ready from NC01, then
|
|
+ * read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC01(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.result[n]);
|
|
+ /* MSM_NAND_FLASH_STATUS +
|
|
+ * MSM_NAND_BUFFER_STATUS
|
|
+ */
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_cmd_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* CMD */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* kick the execute register for NC10 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.exec);
|
|
+ cmd->dst = NC10(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read only when there is data
|
|
+ * buffer
|
|
+ */
|
|
+ if (ops->datbuf) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src =
|
|
+ NC01(MSM_NAND_FLASH_BUFFER);
|
|
+ cmd->dst = data_dma_addr_curr;
|
|
+ data_dma_addr_curr +=
|
|
+ sectordatasize;
|
|
+ cmd->len = sectordatasize;
|
|
+ cmd++;
|
|
+ }
|
|
+ } else {
|
|
+ /* MASK DATA ACK/REQ -->
|
|
+ * NC01 (0xA3C)
|
|
+ */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_data_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* block on data ready from NC10
|
|
+ * then read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src =
|
|
+ NC10(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.result[n]);
|
|
+ /* MSM_NAND_FLASH_STATUS +
|
|
+ * MSM_NAND_BUFFER_STATUS
|
|
+ */
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+ if (n != cwperpage - 1) {
|
|
+ /* MASK CMD ACK/REQ -->
|
|
+ * NC10 (0xF14)
|
|
+ */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src =
|
|
+ msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_cmd_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* CMD */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cmd);
|
|
+ cmd->dst =
|
|
+ NC01(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* EXEC */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.exec);
|
|
+ cmd->dst =
|
|
+ NC01(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ /* Read only when there is data
|
|
+ * buffer
|
|
+ */
|
|
+ if (ops->datbuf) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src =
|
|
+ NC10(MSM_NAND_FLASH_BUFFER);
|
|
+ cmd->dst = data_dma_addr_curr;
|
|
+ data_dma_addr_curr +=
|
|
+ sectordatasize;
|
|
+ cmd->len = sectordatasize;
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ops->oobbuf && (n == (cwperpage - 1)
|
|
+ || ops->mode != MTD_OPS_AUTO_OOB)) {
|
|
+ cmd->cmd = 0;
|
|
+ if (n == (cwperpage - 1)) {
|
|
+ /* Use NC10 for reading the
|
|
+ * last codeword!!!
|
|
+ */
|
|
+ cmd->src = NC10(MSM_NAND_FLASH_BUFFER) +
|
|
+ (512 - ((cwperpage - 1) << 2));
|
|
+ sectoroobsize = (cwperpage << 2);
|
|
+ if (ops->mode != MTD_OPS_AUTO_OOB)
|
|
+ sectoroobsize +=
|
|
+ chip->ecc_parity_bytes;
|
|
+ } else {
|
|
+ if (n % 2 == 0)
|
|
+ cmd->src =
|
|
+ NC01(MSM_NAND_FLASH_BUFFER)
|
|
+ + 516;
|
|
+ else
|
|
+ cmd->src =
|
|
+ NC10(MSM_NAND_FLASH_BUFFER)
|
|
+ + 516;
|
|
+ sectoroobsize = chip->ecc_parity_bytes;
|
|
+ }
|
|
+ cmd->dst = oob_dma_addr_curr;
|
|
+ if (sectoroobsize < oob_len)
|
|
+ cmd->len = sectoroobsize;
|
|
+ else
|
|
+ cmd->len = oob_len;
|
|
+ oob_dma_addr_curr += cmd->len;
|
|
+ oob_len -= cmd->len;
|
|
+ if (cmd->len > 0)
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+ /* ADM --> Default mux state (0xFC0) */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_default_mux);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ if (!interleave_enable) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nc10_flash_dev_cmd_vld_default);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nc10_flash_dev_cmd1_default);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV_CMD1);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ } else {
|
|
+ /* disable CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.default_ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ BUILD_BUG_ON(16 * 6 + 20 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr =
|
|
+ (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
|
|
+ | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* if any of the writes failed (0x10), or there
|
|
+ * was a protection violation (0x100), we lose
|
|
+ */
|
|
+ pageerr = rawerr = 0;
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ if (dma_buffer->data.result[n].flash_status & 0x110) {
|
|
+ rawerr = -EIO;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (rawerr) {
|
|
+ if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
|
|
+ uint8_t *datbuf = ops->datbuf +
|
|
+ pages_read * mtd->writesize;
|
|
+
|
|
+ dma_sync_single_for_cpu(chip->dev,
|
|
+ data_dma_addr_curr-mtd->writesize,
|
|
+ mtd->writesize, DMA_BIDIRECTIONAL);
|
|
+
|
|
+ for (n = 0; n < mtd->writesize; n++) {
|
|
+ /* empty blocks read 0x54 at
|
|
+ * these offsets
|
|
+ */
|
|
+ if ((n % 516 == 3 || n % 516 == 175)
|
|
+ && datbuf[n] == 0x54)
|
|
+ datbuf[n] = 0xff;
|
|
+ if (datbuf[n] != 0xff) {
|
|
+ pageerr = rawerr;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dma_sync_single_for_device(chip->dev,
|
|
+ data_dma_addr_curr-mtd->writesize,
|
|
+ mtd->writesize, DMA_BIDIRECTIONAL);
|
|
+
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ dma_sync_single_for_cpu(chip->dev,
|
|
+ oob_dma_addr_curr - (ops->ooblen - oob_len),
|
|
+ ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
|
|
+
|
|
+ for (n = 0; n < ops->ooblen; n++) {
|
|
+ if (ops->oobbuf[n] != 0xff) {
|
|
+ pageerr = rawerr;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dma_sync_single_for_device(chip->dev,
|
|
+ oob_dma_addr_curr - (ops->ooblen - oob_len),
|
|
+ ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
|
|
+ }
|
|
+ }
|
|
+ if (pageerr) {
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ if (dma_buffer->data.result[n].buffer_status
|
|
+ & chip->uncorrectable_bit_mask) {
|
|
+ /* not thread safe */
|
|
+ mtd->ecc_stats.failed++;
|
|
+ pageerr = -EBADMSG;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (!rawerr) { /* check for corretable errors */
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ ecc_errors = dma_buffer->data.
|
|
+ result[n].buffer_status
|
|
+ & chip->num_err_mask;
|
|
+ if (ecc_errors) {
|
|
+ total_ecc_errors += ecc_errors;
|
|
+ /* not thread safe */
|
|
+ mtd->ecc_stats.corrected += ecc_errors;
|
|
+ if (ecc_errors > 1)
|
|
+ pageerr = -EUCLEAN;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (pageerr && (pageerr != -EUCLEAN || err == 0))
|
|
+ err = pageerr;
|
|
+
|
|
+#if VERBOSE
|
|
+ if (rawerr && !pageerr) {
|
|
+ pr_err("msm_nand_read_oob_dualnandc "
|
|
+ "%llx %x %x empty page\n",
|
|
+ (loff_t)page * mtd->writesize, ops->len,
|
|
+ ops->ooblen);
|
|
+ } else {
|
|
+ for (n = start_sector; n < cwperpage; n++) {
|
|
+ if (n%2) {
|
|
+ pr_info("NC10: flash_status[%d] = %x, "
|
|
+ "buffr_status[%d] = %x\n",
|
|
+ n, dma_buffer->
|
|
+ data.result[n].flash_status,
|
|
+ n, dma_buffer->
|
|
+ data.result[n].buffer_status);
|
|
+ } else {
|
|
+ pr_info("NC01: flash_status[%d] = %x, "
|
|
+ "buffr_status[%d] = %x\n",
|
|
+ n, dma_buffer->
|
|
+ data.result[n].flash_status,
|
|
+ n, dma_buffer->
|
|
+ data.result[n].buffer_status);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ if (err && err != -EUCLEAN && err != -EBADMSG)
|
|
+ break;
|
|
+ pages_read++;
|
|
+ page++;
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ if (ops->oobbuf) {
|
|
+ dma_unmap_page(chip->dev, oob_dma_addr,
|
|
+ ops->ooblen, DMA_FROM_DEVICE);
|
|
+ }
|
|
+err_dma_map_oobbuf_failed:
|
|
+ if (ops->datbuf) {
|
|
+ dma_unmap_page(chip->dev, data_dma_addr,
|
|
+ ops->len, DMA_BIDIRECTIONAL);
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ ops->retlen = mtd->writesize * pages_read;
|
|
+ else
|
|
+ ops->retlen = (mtd->writesize + mtd->oobsize) *
|
|
+ pages_read;
|
|
+ ops->oobretlen = ops->ooblen - oob_len;
|
|
+ if (err)
|
|
+ pr_err("msm_nand_read_oob_dualnandc "
|
|
+ "%llx %x %x failed %d, corrected %d\n",
|
|
+ from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
|
|
+ total_ecc_errors);
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
|
|
+ __func__, err, ops->retlen, ops->oobretlen);
|
|
+
|
|
+ pr_info("==================================================="
|
|
+ "==========\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|
+ size_t *retlen, u_char *buf)
|
|
+{
|
|
+ int ret;
|
|
+ struct mtd_ecc_stats stats;
|
|
+ struct mtd_oob_ops ops;
|
|
+ int (*read_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
|
|
+
|
|
+ if (!dual_nand_ctlr_present)
|
|
+ read_oob = msm_nand_read_oob;
|
|
+ else
|
|
+ read_oob = msm_nand_read_oob_dualnandc;
|
|
+
|
|
+ ops.mode = MTD_OPS_PLACE_OOB;
|
|
+ ops.retlen = 0;
|
|
+ ops.ooblen = 0;
|
|
+ ops.oobbuf = NULL;
|
|
+ ret = 0;
|
|
+ *retlen = 0;
|
|
+ stats = mtd->ecc_stats;
|
|
+
|
|
+ if ((from & (mtd->writesize - 1)) == 0 && len == mtd->writesize) {
|
|
+ /* reading a page on page boundary */
|
|
+ ops.len = len;
|
|
+ ops.datbuf = buf;
|
|
+ ret = read_oob(mtd, from, &ops);
|
|
+ *retlen = ops.retlen;
|
|
+ } else if (len > 0) {
|
|
+ /* reading any size on any offset. partial page is supported */
|
|
+ u8 *bounce_buf;
|
|
+ loff_t aligned_from;
|
|
+ loff_t offset;
|
|
+ size_t actual_len;
|
|
+
|
|
+ bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
|
|
+ if (!bounce_buf) {
|
|
+ pr_err("%s: could not allocate memory\n", __func__);
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ ops.len = mtd->writesize;
|
|
+ offset = from & (mtd->writesize - 1);
|
|
+ aligned_from = from - offset;
|
|
+
|
|
+ for (;;) {
|
|
+ int no_copy;
|
|
+
|
|
+ actual_len = mtd->writesize - offset;
|
|
+ if (actual_len > len)
|
|
+ actual_len = len;
|
|
+
|
|
+ no_copy = (offset == 0 && actual_len == mtd->writesize);
|
|
+ ops.datbuf = (no_copy) ? buf : bounce_buf;
|
|
+
|
|
+ /*
|
|
+ * MTD API requires that all the pages are to
|
|
+ * be read even if uncorrectable or
|
|
+ * correctable ECC errors occur.
|
|
+ */
|
|
+ ret = read_oob(mtd, aligned_from, &ops);
|
|
+ if (ret == -EBADMSG || ret == -EUCLEAN)
|
|
+ ret = 0;
|
|
+
|
|
+ if (ret < 0)
|
|
+ break;
|
|
+
|
|
+ if (!no_copy)
|
|
+ memcpy(buf, bounce_buf + offset, actual_len);
|
|
+
|
|
+ len -= actual_len;
|
|
+ *retlen += actual_len;
|
|
+ if (len == 0)
|
|
+ break;
|
|
+
|
|
+ buf += actual_len;
|
|
+ offset = 0;
|
|
+ aligned_from += mtd->writesize;
|
|
+ }
|
|
+
|
|
+ kfree(bounce_buf);
|
|
+ }
|
|
+
|
|
+out:
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (mtd->ecc_stats.failed - stats.failed)
|
|
+ return -EBADMSG;
|
|
+
|
|
+ if (mtd->ecc_stats.corrected - stats.corrected)
|
|
+ return -EUCLEAN;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+ struct {
|
|
+ dmov_s cmd[8 * 7 + 2];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t chipsel;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t eccbchcfg;
|
|
+ uint32_t exec;
|
|
+ uint32_t ecccfg;
|
|
+ uint32_t clrfstatus;
|
|
+ uint32_t clrrstatus;
|
|
+ uint32_t flash_status[8];
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ unsigned n;
|
|
+ unsigned page = 0;
|
|
+ uint32_t oob_len;
|
|
+ uint32_t sectordatawritesize;
|
|
+ int err = 0;
|
|
+ dma_addr_t data_dma_addr = 0;
|
|
+ dma_addr_t oob_dma_addr = 0;
|
|
+ dma_addr_t data_dma_addr_curr = 0;
|
|
+ dma_addr_t oob_dma_addr_curr = 0;
|
|
+ uint8_t *dat_bounce_buf = NULL;
|
|
+ uint8_t *oob_bounce_buf = NULL;
|
|
+ unsigned page_count;
|
|
+ unsigned pages_written = 0;
|
|
+ unsigned cwperpage;
|
|
+#if VERBOSE
|
|
+ pr_info("================================================="
|
|
+ "================\n");
|
|
+ pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
|
|
+ "\noobbuf 0x%p ooblen 0x%x\n",
|
|
+ __func__, to, ops->mode, ops->datbuf, ops->len,
|
|
+ ops->oobbuf, ops->ooblen);
|
|
+#endif
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = to >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = to >> 12;
|
|
+
|
|
+ oob_len = ops->ooblen;
|
|
+ cwperpage = (mtd->writesize >> 9);
|
|
+
|
|
+ if (to & (mtd->writesize - 1)) {
|
|
+ pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
|
|
+ pr_err("%s: unsupported ops->mode,%d\n",
|
|
+ __func__, ops->mode);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if ((ops->len % mtd->writesize) != 0) {
|
|
+ pr_err("%s: unsupported ops->len, %d\n",
|
|
+ __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
|
|
+ pr_err("%s: unsupported ops->len, "
|
|
+ "%d for MTD_OPS_RAW mode\n",
|
|
+ __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ops->datbuf == NULL) {
|
|
+ pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
|
|
+ pr_err("%s: unsupported ops->ooboffs, %d\n",
|
|
+ __func__, ops->ooboffs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+ data_dma_addr_curr = data_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->datbuf,
|
|
+ ops->len, DMA_TO_DEVICE,
|
|
+ &dat_bounce_buf);
|
|
+ if (dma_mapping_error(chip->dev, data_dma_addr)) {
|
|
+ pr_err("msm_nand_write_oob: failed to get dma addr "
|
|
+ "for %p\n", ops->datbuf);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ oob_dma_addr_curr = oob_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->oobbuf,
|
|
+ ops->ooblen, DMA_TO_DEVICE,
|
|
+ &oob_bounce_buf);
|
|
+ if (dma_mapping_error(chip->dev, oob_dma_addr)) {
|
|
+ pr_err("msm_nand_write_oob: failed to get dma addr "
|
|
+ "for %p\n", ops->oobbuf);
|
|
+ err = -EIO;
|
|
+ goto err_dma_map_oobbuf_failed;
|
|
+ }
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ page_count = ops->len / mtd->writesize;
|
|
+ else
|
|
+ page_count = ops->len / (mtd->writesize + mtd->oobsize);
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer =
|
|
+ msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ while (page_count-- > 0) {
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ dma_buffer->data.cfg0 = chip->CFG0;
|
|
+ dma_buffer->data.cfg1 = chip->CFG1;
|
|
+ if (enable_bch_ecc)
|
|
+ dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
|
|
+ } else {
|
|
+ dma_buffer->data.cfg0 = (chip->CFG0_RAW &
|
|
+ ~(7U << 6)) | ((cwperpage-1) << 6);
|
|
+ dma_buffer->data.cfg1 = chip->CFG1_RAW |
|
|
+ (chip->CFG1 & CFG1_WIDE_FLASH);
|
|
+ }
|
|
+
|
|
+ /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
|
|
+ dma_buffer->data.addr0 = page << 16;
|
|
+ dma_buffer->data.addr1 = (page >> 16) & 0xff;
|
|
+ /* chipsel_0 + enable DM interface */
|
|
+ dma_buffer->data.chipsel = 0 | 4;
|
|
+
|
|
+
|
|
+ /* GO bit for the EXEC register */
|
|
+ dma_buffer->data.exec = 1;
|
|
+ dma_buffer->data.clrfstatus = 0x00000020;
|
|
+ dma_buffer->data.clrrstatus = 0x000000C0;
|
|
+
|
|
+ BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.flash_status));
|
|
+
|
|
+ for (n = 0; n < cwperpage ; n++) {
|
|
+ /* status return words */
|
|
+ dma_buffer->data.flash_status[n] = 0xeeeeeeee;
|
|
+ /* block on cmd ready, then
|
|
+ * write CMD / ADDR0 / ADDR1 / CHIPSEL regs in a burst
|
|
+ */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src =
|
|
+ msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = MSM_NAND_FLASH_CMD;
|
|
+ if (n == 0)
|
|
+ cmd->len = 16;
|
|
+ else
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ if (n == 0) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = MSM_NAND_DEV0_CFG0;
|
|
+ if (enable_bch_ecc)
|
|
+ cmd->len = 12;
|
|
+ else
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.ecccfg);
|
|
+ cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ /* write data block */
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (!boot_layout)
|
|
+ sectordatawritesize = (n < (cwperpage - 1)) ?
|
|
+ 516 : (512 - ((cwperpage - 1) << 2));
|
|
+ else
|
|
+ sectordatawritesize = 512;
|
|
+ } else {
|
|
+ sectordatawritesize = chip->cw_size;
|
|
+ }
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = data_dma_addr_curr;
|
|
+ data_dma_addr_curr += sectordatawritesize;
|
|
+ cmd->dst = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->len = sectordatawritesize;
|
|
+ cmd++;
|
|
+
|
|
+ if (ops->oobbuf) {
|
|
+ if (n == (cwperpage - 1)) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = oob_dma_addr_curr;
|
|
+ cmd->dst = MSM_NAND_FLASH_BUFFER +
|
|
+ (512 - ((cwperpage - 1) << 2));
|
|
+ if ((cwperpage << 2) < oob_len)
|
|
+ cmd->len = (cwperpage << 2);
|
|
+ else
|
|
+ cmd->len = oob_len;
|
|
+ oob_dma_addr_curr += cmd->len;
|
|
+ oob_len -= cmd->len;
|
|
+ if (cmd->len > 0)
|
|
+ cmd++;
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_AUTO_OOB) {
|
|
+ /* skip ecc bytes in oobbuf */
|
|
+ if (oob_len < chip->ecc_parity_bytes) {
|
|
+ oob_dma_addr_curr +=
|
|
+ chip->ecc_parity_bytes;
|
|
+ oob_len -=
|
|
+ chip->ecc_parity_bytes;
|
|
+ } else {
|
|
+ oob_dma_addr_curr += oob_len;
|
|
+ oob_len = 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* kick the execute register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src =
|
|
+ msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = MSM_NAND_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* block on data ready, then
|
|
+ * read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.flash_status[n]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.clrfstatus);
|
|
+ cmd->dst = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.clrrstatus);
|
|
+ cmd->dst = MSM_NAND_READ_STATUS;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ }
|
|
+
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+ BUILD_BUG_ON(8 * 7 + 2 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmdptr =
|
|
+ (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) |
|
|
+ CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
|
|
+ msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* if any of the writes failed (0x10), or there was a
|
|
+ * protection violation (0x100), or the program success
|
|
+ * bit (0x80) is unset, we lose
|
|
+ */
|
|
+ err = 0;
|
|
+ for (n = 0; n < cwperpage; n++) {
|
|
+ if (dma_buffer->data.flash_status[n] & 0x110) {
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ }
|
|
+ if (!(dma_buffer->data.flash_status[n] & 0x80)) {
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if VERBOSE
|
|
+ for (n = 0; n < cwperpage; n++)
|
|
+ pr_info("write pg %d: flash_status[%d] = %x\n", page,
|
|
+ n, dma_buffer->data.flash_status[n]);
|
|
+
|
|
+#endif
|
|
+ if (err)
|
|
+ break;
|
|
+ pages_written++;
|
|
+ page++;
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ ops->retlen = mtd->writesize * pages_written;
|
|
+ else
|
|
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
|
|
+
|
|
+ ops->oobretlen = ops->ooblen - oob_len;
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ if (ops->oobbuf) {
|
|
+ msm_nand_dma_unmap(chip->dev, oob_dma_addr,
|
|
+ ops->ooblen, DMA_TO_DEVICE,
|
|
+ ops->oobbuf, oob_bounce_buf);
|
|
+ }
|
|
+err_dma_map_oobbuf_failed:
|
|
+ if (ops->datbuf) {
|
|
+ msm_nand_dma_unmap(chip->dev, data_dma_addr, ops->len,
|
|
+ DMA_TO_DEVICE, ops->datbuf,
|
|
+ dat_bounce_buf);
|
|
+ }
|
|
+ if (err)
|
|
+ pr_err("msm_nand_write_oob %llx %x %x failed %d\n",
|
|
+ to, ops->len, ops->ooblen, err);
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
|
|
+ __func__, err, ops->retlen, ops->oobretlen);
|
|
+
|
|
+ pr_info("==================================================="
|
|
+ "==============\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_write_oob_dualnandc(struct mtd_info *mtd, loff_t to,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+ struct {
|
|
+ dmov_s cmd[16 * 6 + 18];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t nandc01_addr0;
|
|
+ uint32_t nandc10_addr0;
|
|
+ uint32_t nandc11_addr1;
|
|
+ uint32_t chipsel_cs0;
|
|
+ uint32_t chipsel_cs1;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t eccbchcfg;
|
|
+ uint32_t exec;
|
|
+ uint32_t ecccfg;
|
|
+ uint32_t cfg0_nc01;
|
|
+ uint32_t ebi2_chip_select_cfg0;
|
|
+ uint32_t adm_mux_data_ack_req_nc01;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc01;
|
|
+ uint32_t adm_mux_data_ack_req_nc10;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc10;
|
|
+ uint32_t adm_default_mux;
|
|
+ uint32_t default_ebi2_chip_select_cfg0;
|
|
+ uint32_t nc01_flash_dev_cmd_vld;
|
|
+ uint32_t nc10_flash_dev_cmd0;
|
|
+ uint32_t nc01_flash_dev_cmd_vld_default;
|
|
+ uint32_t nc10_flash_dev_cmd0_default;
|
|
+ uint32_t flash_status[16];
|
|
+ uint32_t clrfstatus;
|
|
+ uint32_t clrrstatus;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ unsigned n;
|
|
+ unsigned page = 0;
|
|
+ uint32_t oob_len;
|
|
+ uint32_t sectordatawritesize;
|
|
+ int err = 0;
|
|
+ dma_addr_t data_dma_addr = 0;
|
|
+ dma_addr_t oob_dma_addr = 0;
|
|
+ dma_addr_t data_dma_addr_curr = 0;
|
|
+ dma_addr_t oob_dma_addr_curr = 0;
|
|
+ unsigned page_count;
|
|
+ unsigned pages_written = 0;
|
|
+ unsigned cwperpage;
|
|
+ unsigned cw_offset = chip->cw_size;
|
|
+#if VERBOSE
|
|
+ pr_info("================================================="
|
|
+ "============\n");
|
|
+ pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
|
|
+ "\noobbuf 0x%p ooblen 0x%x\n\n",
|
|
+ __func__, to, ops->mode, ops->datbuf, ops->len,
|
|
+ ops->oobbuf, ops->ooblen);
|
|
+#endif
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = to >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = to >> 12;
|
|
+
|
|
+ if (interleave_enable)
|
|
+ page = (to >> 1) >> 12;
|
|
+
|
|
+ oob_len = ops->ooblen;
|
|
+ cwperpage = (mtd->writesize >> 9);
|
|
+
|
|
+ if (to & (mtd->writesize - 1)) {
|
|
+ pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
|
|
+ pr_err("%s: unsupported ops->mode,%d\n",
|
|
+ __func__, ops->mode);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if ((ops->len % mtd->writesize) != 0) {
|
|
+ pr_err("%s: unsupported ops->len, %d\n",
|
|
+ __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
|
|
+ pr_err("%s: unsupported ops->len, "
|
|
+ "%d for MTD_OPS_RAW mode\n",
|
|
+ __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ops->datbuf == NULL) {
|
|
+ pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
|
|
+ pr_err("%s: unsupported ops->ooboffs, %d\n",
|
|
+ __func__, ops->ooboffs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+ data_dma_addr_curr = data_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->datbuf,
|
|
+ ops->len, DMA_TO_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, data_dma_addr)) {
|
|
+ pr_err("msm_nand_write_oob_dualnandc:"
|
|
+ "failed to get dma addr "
|
|
+ "for %p\n", ops->datbuf);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ oob_dma_addr_curr = oob_dma_addr =
|
|
+ msm_nand_dma_map(chip->dev, ops->oobbuf,
|
|
+ ops->ooblen, DMA_TO_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, oob_dma_addr)) {
|
|
+ pr_err("msm_nand_write_oob_dualnandc:"
|
|
+ "failed to get dma addr "
|
|
+ "for %p\n", ops->oobbuf);
|
|
+ err = -EIO;
|
|
+ goto err_dma_map_oobbuf_failed;
|
|
+ }
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ page_count = ops->len / mtd->writesize;
|
|
+ else
|
|
+ page_count = ops->len / (mtd->writesize + mtd->oobsize);
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer =
|
|
+ msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ if (chip->CFG1 & CFG1_WIDE_FLASH)
|
|
+ cw_offset >>= 1;
|
|
+
|
|
+ dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
|
|
+ dma_buffer->data.adm_default_mux = 0x00000FC0;
|
|
+ dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
|
|
+ dma_buffer->data.nc01_flash_dev_cmd_vld = 0x9;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd0 = 0x1085D060;
|
|
+ dma_buffer->data.nc01_flash_dev_cmd_vld_default = 0x1D;
|
|
+ dma_buffer->data.nc10_flash_dev_cmd0_default = 0x1080D060;
|
|
+ dma_buffer->data.clrfstatus = 0x00000020;
|
|
+ dma_buffer->data.clrrstatus = 0x000000C0;
|
|
+
|
|
+ while (page_count-- > 0) {
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ dma_buffer->data.cfg0 = ((chip->CFG0 & ~(7U << 6))
|
|
+ & ~(1 << 4)) | ((((cwperpage >> 1)-1)) << 6);
|
|
+ dma_buffer->data.cfg1 = chip->CFG1;
|
|
+ if (enable_bch_ecc)
|
|
+ dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
|
|
+ } else {
|
|
+ dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
|
|
+ ~(7U << 6)) & ~(1 << 4)) | (((cwperpage >> 1)-1) << 6);
|
|
+ dma_buffer->data.cfg1 = chip->CFG1_RAW |
|
|
+ (chip->CFG1 & CFG1_WIDE_FLASH);
|
|
+ }
|
|
+
|
|
+ /* Disables the automatic issuing of the read
|
|
+ * status command for first NAND controller.
|
|
+ */
|
|
+ if (!interleave_enable)
|
|
+ dma_buffer->data.cfg0_nc01 = dma_buffer->data.cfg0
|
|
+ | (1 << 4);
|
|
+ else
|
|
+ dma_buffer->data.cfg0 |= (1 << 4);
|
|
+
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
|
|
+ dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
|
|
+ dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
|
|
+
|
|
+ /* GO bit for the EXEC register */
|
|
+ dma_buffer->data.exec = 1;
|
|
+
|
|
+ if (!interleave_enable) {
|
|
+ dma_buffer->data.nandc01_addr0 = (page << 16) | 0x0;
|
|
+ /* NC10 ADDR0 points to the next code word */
|
|
+ dma_buffer->data.nandc10_addr0 =
|
|
+ (page << 16) | cw_offset;
|
|
+ } else {
|
|
+ dma_buffer->data.nandc01_addr0 =
|
|
+ dma_buffer->data.nandc10_addr0 = (page << 16) | 0x0;
|
|
+ }
|
|
+ /* ADDR1 */
|
|
+ dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
|
|
+
|
|
+ BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.flash_status));
|
|
+
|
|
+ for (n = 0; n < cwperpage; n++) {
|
|
+ /* status return words */
|
|
+ dma_buffer->data.flash_status[n] = 0xeeeeeeee;
|
|
+
|
|
+ if (n == 0) {
|
|
+ if (!interleave_enable) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.nc01_flash_dev_cmd_vld);
|
|
+ cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nc10_flash_dev_cmd0);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV_CMD0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* common settings for both NC01 & NC10
|
|
+ * NC01, NC10 --> ADDR1 / CHIPSEL
|
|
+ */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc11_addr1);
|
|
+ cmd->dst = NC11(MSM_NAND_ADDR1);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Disables the automatic issue of the
|
|
+ * read status command after the write
|
|
+ * operation.
|
|
+ */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0_nc01);
|
|
+ cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV0_CFG0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg1);
|
|
+ cmd->dst = NC11(MSM_NAND_DEV0_CFG1);
|
|
+ if (enable_bch_ecc)
|
|
+ cmd->len = 8;
|
|
+ else
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ } else {
|
|
+ /* enable CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* NC11 --> ADDR1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc11_addr1);
|
|
+ cmd->dst = NC11(MSM_NAND_ADDR1);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Enable CS0 for NC01 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.chipsel_cs0);
|
|
+ cmd->dst =
|
|
+ NC01(MSM_NAND_FLASH_CHIP_SELECT);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Enable CS1 for NC10 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.chipsel_cs1);
|
|
+ cmd->dst =
|
|
+ NC10(MSM_NAND_FLASH_CHIP_SELECT);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* config DEV0_CFG0 & CFG1 for CS0 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* config DEV1_CFG0 & CFG1 for CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.ecccfg);
|
|
+ cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* NC01 --> ADDR0 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc01_addr0);
|
|
+ cmd->dst = NC01(MSM_NAND_ADDR0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* NC10 --> ADDR0 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nandc10_addr0);
|
|
+ cmd->dst = NC10(MSM_NAND_ADDR0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ if (n % 2 == 0) {
|
|
+ /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* CMD */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC01(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ } else {
|
|
+ /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* CMD */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ sectordatawritesize = (n < (cwperpage - 1)) ?
|
|
+ 516 : (512 - ((cwperpage - 1) << 2));
|
|
+ else
|
|
+ sectordatawritesize = chip->cw_size;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = data_dma_addr_curr;
|
|
+ data_dma_addr_curr += sectordatawritesize;
|
|
+
|
|
+ if (n % 2 == 0)
|
|
+ cmd->dst = NC01(MSM_NAND_FLASH_BUFFER);
|
|
+ else
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_BUFFER);
|
|
+ cmd->len = sectordatawritesize;
|
|
+ cmd++;
|
|
+
|
|
+ if (ops->oobbuf) {
|
|
+ if (n == (cwperpage - 1)) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = oob_dma_addr_curr;
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_BUFFER) +
|
|
+ (512 - ((cwperpage - 1) << 2));
|
|
+ if ((cwperpage << 2) < oob_len)
|
|
+ cmd->len = (cwperpage << 2);
|
|
+ else
|
|
+ cmd->len = oob_len;
|
|
+ oob_dma_addr_curr += cmd->len;
|
|
+ oob_len -= cmd->len;
|
|
+ if (cmd->len > 0)
|
|
+ cmd++;
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_AUTO_OOB) {
|
|
+ /* skip ecc bytes in oobbuf */
|
|
+ if (oob_len < chip->ecc_parity_bytes) {
|
|
+ oob_dma_addr_curr +=
|
|
+ chip->ecc_parity_bytes;
|
|
+ oob_len -=
|
|
+ chip->ecc_parity_bytes;
|
|
+ } else {
|
|
+ oob_dma_addr_curr += oob_len;
|
|
+ oob_len = 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (n % 2 == 0) {
|
|
+ if (n != 0) {
|
|
+ /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->
|
|
+ data.adm_mux_data_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* block on data ready from NC10, then
|
|
+ * read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC10(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.flash_status[n-1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+ /* kick the NC01 execute register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.exec);
|
|
+ cmd->dst = NC01(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ } else {
|
|
+ /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_data_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* block on data ready from NC01, then
|
|
+ * read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC01(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.flash_status[n-1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* kick the execute register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src =
|
|
+ msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = NC10(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_data_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* we should process outstanding request */
|
|
+ /* block on data ready, then
|
|
+ * read the status register
|
|
+ */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC10(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.flash_status[n-1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
|
|
+ cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
|
|
+ cmd->dst = NC11(MSM_NAND_READ_STATUS);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* MASK DATA ACK/REQ --> NC01 (0xFC0)*/
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_default_mux);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ if (!interleave_enable) {
|
|
+ /* setting to defalut values back */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nc01_flash_dev_cmd_vld_default);
|
|
+ cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.nc10_flash_dev_cmd0_default);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV_CMD0);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ } else {
|
|
+ /* disable CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.default_ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+ BUILD_BUG_ON(16 * 6 + 18 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmdptr =
|
|
+ ((msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP);
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
|
|
+ msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* if any of the writes failed (0x10), or there was a
|
|
+ * protection violation (0x100), or the program success
|
|
+ * bit (0x80) is unset, we lose
|
|
+ */
|
|
+ err = 0;
|
|
+ for (n = 0; n < cwperpage; n++) {
|
|
+ if (dma_buffer->data.flash_status[n] & 0x110) {
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ }
|
|
+ if (!(dma_buffer->data.flash_status[n] & 0x80)) {
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ /* check for flash status busy for the last codeword */
|
|
+ if (!interleave_enable)
|
|
+ if (!(dma_buffer->data.flash_status[cwperpage - 1]
|
|
+ & 0x20)) {
|
|
+ err = -EIO;
|
|
+ break;
|
|
+ }
|
|
+#if VERBOSE
|
|
+ for (n = 0; n < cwperpage; n++) {
|
|
+ if (n%2) {
|
|
+ pr_info("NC10: write pg %d: flash_status[%d] = %x\n",
|
|
+ page, n, dma_buffer->data.flash_status[n]);
|
|
+ } else {
|
|
+ pr_info("NC01: write pg %d: flash_status[%d] = %x\n",
|
|
+ page, n, dma_buffer->data.flash_status[n]);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ if (err)
|
|
+ break;
|
|
+ pages_written++;
|
|
+ page++;
|
|
+ }
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ ops->retlen = mtd->writesize * pages_written;
|
|
+ else
|
|
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
|
|
+
|
|
+ ops->oobretlen = ops->ooblen - oob_len;
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ if (ops->oobbuf)
|
|
+ dma_unmap_page(chip->dev, oob_dma_addr,
|
|
+ ops->ooblen, DMA_TO_DEVICE);
|
|
+err_dma_map_oobbuf_failed:
|
|
+ if (ops->datbuf)
|
|
+ dma_unmap_page(chip->dev, data_dma_addr, ops->len,
|
|
+ DMA_TO_DEVICE);
|
|
+ if (err)
|
|
+ pr_err("msm_nand_write_oob_dualnandc %llx %x %x failed %d\n",
|
|
+ to, ops->len, ops->ooblen, err);
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
|
|
+ __func__, err, ops->retlen, ops->oobretlen);
|
|
+
|
|
+ pr_info("==================================================="
|
|
+ "==========\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
+ size_t *retlen, const u_char *buf)
|
|
+{
|
|
+ int ret;
|
|
+ struct mtd_oob_ops ops;
|
|
+ int (*write_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
|
|
+
|
|
+ if (!dual_nand_ctlr_present)
|
|
+ write_oob = msm_nand_write_oob;
|
|
+ else
|
|
+ write_oob = msm_nand_write_oob_dualnandc;
|
|
+
|
|
+ ops.mode = MTD_OPS_PLACE_OOB;
|
|
+ ops.retlen = 0;
|
|
+ ops.ooblen = 0;
|
|
+ ops.oobbuf = NULL;
|
|
+ ret = 0;
|
|
+ *retlen = 0;
|
|
+
|
|
+ if (!virt_addr_valid(buf) &&
|
|
+ ((to | len) & (mtd->writesize - 1)) == 0 &&
|
|
+ ((unsigned long) buf & ~PAGE_MASK) + len > PAGE_SIZE) {
|
|
+ /*
|
|
+ * Handle writing of large size write buffer in vmalloc
|
|
+ * address space that does not fit in an MMU page.
|
|
+ * The destination address must be on page boundary,
|
|
+ * and the size must be multiple of NAND page size.
|
|
+ * Writing partial page is not supported.
|
|
+ */
|
|
+ ops.len = mtd->writesize;
|
|
+
|
|
+ for (;;) {
|
|
+ ops.datbuf = (uint8_t *) buf;
|
|
+
|
|
+ ret = write_oob(mtd, to, &ops);
|
|
+ if (ret < 0)
|
|
+ break;
|
|
+
|
|
+ len -= mtd->writesize;
|
|
+ *retlen += mtd->writesize;
|
|
+ if (len == 0)
|
|
+ break;
|
|
+
|
|
+ buf += mtd->writesize;
|
|
+ to += mtd->writesize;
|
|
+ }
|
|
+ } else {
|
|
+ ops.len = len;
|
|
+ ops.datbuf = (uint8_t *) buf;
|
|
+ ret = write_oob(mtd, to, &ops);
|
|
+ *retlen = ops.retlen;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
+{
|
|
+ int err;
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+ struct {
|
|
+ dmov_s cmd[6];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t chipsel;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t exec;
|
|
+ uint32_t flash_status;
|
|
+ uint32_t clrfstatus;
|
|
+ uint32_t clrrstatus;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ unsigned page = 0;
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = instr->addr >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = instr->addr >> 12;
|
|
+
|
|
+ if (instr->addr & (mtd->erasesize - 1)) {
|
|
+ pr_err("%s: unsupported erase address, 0x%llx\n",
|
|
+ __func__, instr->addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (instr->len != mtd->erasesize) {
|
|
+ pr_err("%s: unsupported erase len, %lld\n",
|
|
+ __func__, instr->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(
|
|
+ chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
|
|
+ dma_buffer->data.addr0 = page;
|
|
+ dma_buffer->data.addr1 = 0;
|
|
+ dma_buffer->data.chipsel = 0 | 4;
|
|
+ dma_buffer->data.exec = 1;
|
|
+ dma_buffer->data.flash_status = 0xeeeeeeee;
|
|
+ dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
|
|
+ dma_buffer->data.cfg1 = chip->CFG1;
|
|
+ dma_buffer->data.clrfstatus = 0x00000020;
|
|
+ dma_buffer->data.clrrstatus = 0x000000C0;
|
|
+
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD | CMD_OCB;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = MSM_NAND_FLASH_CMD;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
|
|
+ cmd->dst = MSM_NAND_DEV0_CFG0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = MSM_NAND_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
|
|
+ cmd->dst = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = CMD_OCU | CMD_LC;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
|
|
+ cmd->dst = MSM_NAND_READ_STATUS;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd) - 1);
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmdptr =
|
|
+ (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(
|
|
+ chip->dma_channel, DMOV_CMD_PTR_LIST |
|
|
+ DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* we fail if there was an operation error, a mpu error, or the
|
|
+ * erase success bit was not set.
|
|
+ */
|
|
+
|
|
+ if (dma_buffer->data.flash_status & 0x110 ||
|
|
+ !(dma_buffer->data.flash_status & 0x80))
|
|
+ err = -EIO;
|
|
+ else
|
|
+ err = 0;
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+ if (err) {
|
|
+ pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
|
|
+ instr->fail_addr = instr->addr;
|
|
+ instr->state = MTD_ERASE_FAILED;
|
|
+ } else {
|
|
+ instr->state = MTD_ERASE_DONE;
|
|
+ instr->fail_addr = 0xffffffff;
|
|
+ mtd_erase_callback(instr);
|
|
+ }
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_erase_dualnandc(struct mtd_info *mtd, struct erase_info *instr)
|
|
+{
|
|
+ int err;
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+ struct {
|
|
+ dmov_s cmd[18];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t chipsel_cs0;
|
|
+ uint32_t chipsel_cs1;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t exec;
|
|
+ uint32_t ecccfg;
|
|
+ uint32_t ebi2_chip_select_cfg0;
|
|
+ uint32_t adm_mux_data_ack_req_nc01;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc01;
|
|
+ uint32_t adm_mux_data_ack_req_nc10;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc10;
|
|
+ uint32_t adm_default_mux;
|
|
+ uint32_t default_ebi2_chip_select_cfg0;
|
|
+ uint32_t nc01_flash_dev_cmd0;
|
|
+ uint32_t nc01_flash_dev_cmd0_default;
|
|
+ uint32_t flash_status[2];
|
|
+ uint32_t clrfstatus;
|
|
+ uint32_t clrrstatus;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ unsigned page = 0;
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = instr->addr >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = instr->addr >> 12;
|
|
+
|
|
+ if (mtd->writesize == 8192)
|
|
+ page = (instr->addr >> 1) >> 12;
|
|
+
|
|
+ if (instr->addr & (mtd->erasesize - 1)) {
|
|
+ pr_err("%s: unsupported erase address, 0x%llx\n",
|
|
+ __func__, instr->addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (instr->len != mtd->erasesize) {
|
|
+ pr_err("%s: unsupported erase len, %lld\n",
|
|
+ __func__, instr->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(
|
|
+ chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
|
|
+ dma_buffer->data.addr0 = page;
|
|
+ dma_buffer->data.addr1 = 0;
|
|
+ dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
|
|
+ dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
|
|
+ dma_buffer->data.exec = 1;
|
|
+ dma_buffer->data.flash_status[0] = 0xeeeeeeee;
|
|
+ dma_buffer->data.flash_status[1] = 0xeeeeeeee;
|
|
+ dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
|
|
+ dma_buffer->data.cfg1 = chip->CFG1;
|
|
+ dma_buffer->data.clrfstatus = 0x00000020;
|
|
+ dma_buffer->data.clrrstatus = 0x000000C0;
|
|
+
|
|
+ dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
|
|
+ dma_buffer->data.adm_default_mux = 0x00000FC0;
|
|
+ dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
|
|
+
|
|
+ /* enable CS1 */
|
|
+ cmd->cmd = 0 | CMD_OCB;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* erase CS0 block now !!! */
|
|
+ /* 0xF14 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC01(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = NC01(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* 0xF28 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_data_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC01(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[0]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* erase CS1 block now !!! */
|
|
+ /* 0x53C */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
|
|
+ cmd->len = 8;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = NC10(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* 0xA3C */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_data_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC10(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
|
|
+ cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
|
|
+ cmd->dst = NC11(MSM_NAND_READ_STATUS);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_default_mux);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* disable CS1 */
|
|
+ cmd->cmd = CMD_OCU | CMD_LC;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.default_ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(17 != ARRAY_SIZE(dma_buffer->cmd) - 1);
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+
|
|
+ dma_buffer->cmdptr =
|
|
+ (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(
|
|
+ chip->dma_channel, DMOV_CMD_PTR_LIST |
|
|
+ DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* we fail if there was an operation error, a mpu error, or the
|
|
+ * erase success bit was not set.
|
|
+ */
|
|
+
|
|
+ if (dma_buffer->data.flash_status[0] & 0x110 ||
|
|
+ !(dma_buffer->data.flash_status[0] & 0x80) ||
|
|
+ dma_buffer->data.flash_status[1] & 0x110 ||
|
|
+ !(dma_buffer->data.flash_status[1] & 0x80))
|
|
+ err = -EIO;
|
|
+ else
|
|
+ err = 0;
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+ if (err) {
|
|
+ pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
|
|
+ instr->fail_addr = instr->addr;
|
|
+ instr->state = MTD_ERASE_FAILED;
|
|
+ } else {
|
|
+ instr->state = MTD_ERASE_DONE;
|
|
+ instr->fail_addr = 0xffffffff;
|
|
+ mtd_erase_callback(instr);
|
|
+ }
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+ int ret;
|
|
+ struct {
|
|
+ dmov_s cmd[5];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t chipsel;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t eccbchcfg;
|
|
+ uint32_t exec;
|
|
+ uint32_t ecccfg;
|
|
+ struct {
|
|
+ uint32_t flash_status;
|
|
+ uint32_t buffer_status;
|
|
+ } result;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ uint8_t *buf;
|
|
+ unsigned page = 0;
|
|
+ unsigned cwperpage;
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = ofs >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = ofs >> 12;
|
|
+
|
|
+ cwperpage = (mtd->writesize >> 9);
|
|
+
|
|
+ /* Check for invalid offset */
|
|
+ if (ofs > mtd->size)
|
|
+ return -EINVAL;
|
|
+ if (ofs & (mtd->erasesize - 1)) {
|
|
+ pr_err("%s: unsupported block address, 0x%x\n",
|
|
+ __func__, (uint32_t)ofs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(chip ,
|
|
+ sizeof(*dma_buffer) + 4)));
|
|
+ buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
|
|
+
|
|
+ /* Read 4 bytes starting from the bad block marker location
|
|
+ * in the last code word of the page
|
|
+ */
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
|
|
+ dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
|
|
+ dma_buffer->data.cfg1 = chip->CFG1_RAW |
|
|
+ (chip->CFG1 & CFG1_WIDE_FLASH);
|
|
+ if (enable_bch_ecc)
|
|
+ dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
|
|
+
|
|
+ if (chip->CFG1 & CFG1_WIDE_FLASH)
|
|
+ dma_buffer->data.addr0 = (page << 16) |
|
|
+ ((chip->cw_size * (cwperpage-1)) >> 1);
|
|
+ else
|
|
+ dma_buffer->data.addr0 = (page << 16) |
|
|
+ (chip->cw_size * (cwperpage-1));
|
|
+
|
|
+ dma_buffer->data.addr1 = (page >> 16) & 0xff;
|
|
+ dma_buffer->data.chipsel = 0 | 4;
|
|
+
|
|
+ dma_buffer->data.exec = 1;
|
|
+
|
|
+ dma_buffer->data.result.flash_status = 0xeeeeeeee;
|
|
+ dma_buffer->data.result.buffer_status = 0xeeeeeeee;
|
|
+
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = MSM_NAND_FLASH_CMD;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
|
|
+ cmd->dst = MSM_NAND_DEV0_CFG0;
|
|
+ if (enable_bch_ecc)
|
|
+ cmd->len = 12;
|
|
+ else
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = MSM_NAND_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_FLASH_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER +
|
|
+ (mtd->writesize - (chip->cw_size * (cwperpage-1)));
|
|
+ cmd->dst = msm_virt_to_dma(chip, buf);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip,
|
|
+ dma_buffer->cmd) >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
|
|
+ DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ ret = 0;
|
|
+ if (dma_buffer->data.result.flash_status & 0x110)
|
|
+ ret = -EIO;
|
|
+
|
|
+ if (!ret) {
|
|
+ /* Check for bad block marker byte */
|
|
+ if (chip->CFG1 & CFG1_WIDE_FLASH) {
|
|
+ if (buf[0] != 0xFF || buf[1] != 0xFF)
|
|
+ ret = 1;
|
|
+ } else {
|
|
+ if (buf[0] != 0xFF)
|
|
+ ret = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_block_isbad_dualnandc(struct mtd_info *mtd, loff_t ofs)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+ int ret;
|
|
+ struct {
|
|
+ dmov_s cmd[18];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t cmd;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t chipsel_cs0;
|
|
+ uint32_t chipsel_cs1;
|
|
+ uint32_t cfg0;
|
|
+ uint32_t cfg1;
|
|
+ uint32_t exec;
|
|
+ uint32_t ecccfg;
|
|
+ uint32_t ebi2_chip_select_cfg0;
|
|
+ uint32_t adm_mux_data_ack_req_nc01;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc01;
|
|
+ uint32_t adm_mux_data_ack_req_nc10;
|
|
+ uint32_t adm_mux_cmd_ack_req_nc10;
|
|
+ uint32_t adm_default_mux;
|
|
+ uint32_t default_ebi2_chip_select_cfg0;
|
|
+ struct {
|
|
+ uint32_t flash_status;
|
|
+ uint32_t buffer_status;
|
|
+ } result[2];
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+ uint8_t *buf01;
|
|
+ uint8_t *buf10;
|
|
+ unsigned page = 0;
|
|
+ unsigned cwperpage;
|
|
+
|
|
+ if (mtd->writesize == 2048)
|
|
+ page = ofs >> 11;
|
|
+
|
|
+ if (mtd->writesize == 4096)
|
|
+ page = ofs >> 12;
|
|
+
|
|
+ if (mtd->writesize == 8192)
|
|
+ page = (ofs >> 1) >> 12;
|
|
+
|
|
+ cwperpage = ((mtd->writesize >> 1) >> 9);
|
|
+
|
|
+ /* Check for invalid offset */
|
|
+ if (ofs > mtd->size)
|
|
+ return -EINVAL;
|
|
+ if (ofs & (mtd->erasesize - 1)) {
|
|
+ pr_err("%s: unsupported block address, 0x%x\n",
|
|
+ __func__, (uint32_t)ofs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(chip ,
|
|
+ sizeof(*dma_buffer) + 8)));
|
|
+ buf01 = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
|
|
+ buf10 = buf01 + 4;
|
|
+
|
|
+ /* Read 4 bytes starting from the bad block marker location
|
|
+ * in the last code word of the page
|
|
+ */
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
|
|
+ dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
|
|
+ dma_buffer->data.cfg1 = chip->CFG1_RAW |
|
|
+ (chip->CFG1 & CFG1_WIDE_FLASH);
|
|
+
|
|
+ if (chip->CFG1 & CFG1_WIDE_FLASH)
|
|
+ dma_buffer->data.addr0 = (page << 16) |
|
|
+ ((528*(cwperpage-1)) >> 1);
|
|
+ else
|
|
+ dma_buffer->data.addr0 = (page << 16) |
|
|
+ (528*(cwperpage-1));
|
|
+
|
|
+ dma_buffer->data.addr1 = (page >> 16) & 0xff;
|
|
+ dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
|
|
+ dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
|
|
+
|
|
+ dma_buffer->data.exec = 1;
|
|
+
|
|
+ dma_buffer->data.result[0].flash_status = 0xeeeeeeee;
|
|
+ dma_buffer->data.result[0].buffer_status = 0xeeeeeeee;
|
|
+ dma_buffer->data.result[1].flash_status = 0xeeeeeeee;
|
|
+ dma_buffer->data.result[1].buffer_status = 0xeeeeeeee;
|
|
+
|
|
+ dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
|
|
+ dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
|
|
+ dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
|
|
+ dma_buffer->data.adm_default_mux = 0x00000FC0;
|
|
+ dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
|
|
+
|
|
+ /* Reading last code word from NC01 */
|
|
+ /* enable CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* 0xF14 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC01(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = NC01(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* 0xF28 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_data_ack_req_nc10);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC01(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[0]);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = NC01(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
|
|
+ (528*(cwperpage-1)));
|
|
+ cmd->dst = msm_virt_to_dma(chip, buf01);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Reading last code word from NC10 */
|
|
+ /* 0x53C */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_CMD);
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
|
|
+ cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
|
|
+ cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = NC10(MSM_NAND_EXEC_CMD);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* A3C */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_mux_data_ack_req_nc01);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = NC10(MSM_NAND_FLASH_STATUS);
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[1]);
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = NC10(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
|
|
+ (528*(cwperpage-1)));
|
|
+ cmd->dst = msm_virt_to_dma(chip, buf10);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* FC0 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.adm_default_mux);
|
|
+ cmd->dst = EBI2_NAND_ADM_MUX;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* disble CS1 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.ebi2_chip_select_cfg0);
|
|
+ cmd->dst = EBI2_CHIP_SELECT_CFG0;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(18 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip,
|
|
+ dma_buffer->cmd) >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
|
|
+ DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ ret = 0;
|
|
+ if ((dma_buffer->data.result[0].flash_status & 0x110) ||
|
|
+ (dma_buffer->data.result[1].flash_status & 0x110))
|
|
+ ret = -EIO;
|
|
+
|
|
+ if (!ret) {
|
|
+ /* Check for bad block marker byte for NC01 & NC10 */
|
|
+ if (chip->CFG1 & CFG1_WIDE_FLASH) {
|
|
+ if ((buf01[0] != 0xFF || buf01[1] != 0xFF) ||
|
|
+ (buf10[0] != 0xFF || buf10[1] != 0xFF))
|
|
+ ret = 1;
|
|
+ } else {
|
|
+ if (buf01[0] != 0xFF || buf10[0] != 0xFF)
|
|
+ ret = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 8);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|
+{
|
|
+ struct mtd_oob_ops ops;
|
|
+ int ret;
|
|
+ uint8_t *buf;
|
|
+
|
|
+ /* Check for invalid offset */
|
|
+ if (ofs > mtd->size)
|
|
+ return -EINVAL;
|
|
+ if (ofs & (mtd->erasesize - 1)) {
|
|
+ pr_err("%s: unsupported block address, 0x%x\n",
|
|
+ __func__, (uint32_t)ofs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ Write all 0s to the first page
|
|
+ This will set the BB marker to 0
|
|
+ */
|
|
+ buf = page_address(ZERO_PAGE());
|
|
+
|
|
+ ops.mode = MTD_OPS_RAW;
|
|
+ ops.len = mtd->writesize + mtd->oobsize;
|
|
+ ops.retlen = 0;
|
|
+ ops.ooblen = 0;
|
|
+ ops.datbuf = buf;
|
|
+ ops.oobbuf = NULL;
|
|
+ if (!interleave_enable)
|
|
+ ret = msm_nand_write_oob(mtd, ofs, &ops);
|
|
+ else
|
|
+ ret = msm_nand_write_oob_dualnandc(mtd, ofs, &ops);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msm_nand_suspend - [MTD Interface] Suspend the msm_nand flash
|
|
+ * @param mtd MTD device structure
|
|
+ */
|
|
+static int msm_nand_suspend(struct mtd_info *mtd)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msm_nand_resume - [MTD Interface] Resume the msm_nand flash
|
|
+ * @param mtd MTD device structure
|
|
+ */
|
|
+static void msm_nand_resume(struct mtd_info *mtd)
|
|
+{
|
|
+}
|
|
+
|
|
+struct onenand_information {
|
|
+ uint16_t manufacturer_id;
|
|
+ uint16_t device_id;
|
|
+ uint16_t version_id;
|
|
+ uint16_t data_buf_size;
|
|
+ uint16_t boot_buf_size;
|
|
+ uint16_t num_of_buffers;
|
|
+ uint16_t technology;
|
|
+};
|
|
+
|
|
+static struct onenand_information onenand_info;
|
|
+static uint32_t nand_sfcmd_mode;
|
|
+
|
|
+uint32_t flash_onenand_probe(struct msm_nand_chip *chip)
|
|
+{
|
|
+ struct {
|
|
+ dmov_s cmd[7];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t bcfg;
|
|
+ uint32_t cmd;
|
|
+ uint32_t exec;
|
|
+ uint32_t status;
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t addr2;
|
|
+ uint32_t addr3;
|
|
+ uint32_t addr4;
|
|
+ uint32_t addr5;
|
|
+ uint32_t addr6;
|
|
+ uint32_t data0;
|
|
+ uint32_t data1;
|
|
+ uint32_t data2;
|
|
+ uint32_t data3;
|
|
+ uint32_t data4;
|
|
+ uint32_t data5;
|
|
+ uint32_t data6;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ int err = 0;
|
|
+ uint32_t initialsflashcmd = 0;
|
|
+
|
|
+ initialsflashcmd = flash_rd_reg(chip, MSM_NAND_SFLASHC_CMD);
|
|
+
|
|
+ if ((initialsflashcmd & 0x10) == 0x10)
|
|
+ nand_sfcmd_mode = MSM_NAND_SFCMD_ASYNC;
|
|
+ else
|
|
+ nand_sfcmd_mode = MSM_NAND_SFCMD_BURST;
|
|
+
|
|
+ printk(KERN_INFO "SFLASHC Async Mode bit: %x \n", nand_sfcmd_mode);
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ dma_buffer->data.bcfg = SFLASH_BCFG |
|
|
+ (nand_sfcmd_mode ? 0 : (1 << 24));
|
|
+ dma_buffer->data.cmd = SFLASH_PREPCMD(7, 0, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGRD);
|
|
+ dma_buffer->data.exec = 1;
|
|
+ dma_buffer->data.status = CLEAN_DATA_32;
|
|
+ dma_buffer->data.addr0 = (ONENAND_DEVICE_ID << 16) |
|
|
+ (ONENAND_MANUFACTURER_ID);
|
|
+ dma_buffer->data.addr1 = (ONENAND_DATA_BUFFER_SIZE << 16) |
|
|
+ (ONENAND_VERSION_ID);
|
|
+ dma_buffer->data.addr2 = (ONENAND_AMOUNT_OF_BUFFERS << 16) |
|
|
+ (ONENAND_BOOT_BUFFER_SIZE);
|
|
+ dma_buffer->data.addr3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_TECHNOLOGY << 0);
|
|
+ dma_buffer->data.data0 = CLEAN_DATA_32;
|
|
+ dma_buffer->data.data1 = CLEAN_DATA_32;
|
|
+ dma_buffer->data.data2 = CLEAN_DATA_32;
|
|
+ dma_buffer->data.data3 = CLEAN_DATA_32;
|
|
+
|
|
+ /* Enable and configure the SFlash controller */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.bcfg);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Configure the ADDR0 and ADDR1 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
|
|
+ cmd->dst = MSM_NAND_ADDR0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Configure the ADDR2 and ADDR3 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
|
|
+ cmd->dst = MSM_NAND_ADDR2;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the two status registers */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.status);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read data registers - valid only if status says success */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_GENP_REG0;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data0);
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(7 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
|
|
+ >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
|
|
+ | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ /* Check for errors, protection violations etc */
|
|
+ if (dma_buffer->data.status & 0x110) {
|
|
+ pr_info("%s: MPU/OP error"
|
|
+ "(0x%x) during Onenand probe\n",
|
|
+ __func__, dma_buffer->data.status);
|
|
+ err = -EIO;
|
|
+ } else {
|
|
+
|
|
+ onenand_info.manufacturer_id =
|
|
+ (dma_buffer->data.data0 >> 0) & 0x0000FFFF;
|
|
+ onenand_info.device_id =
|
|
+ (dma_buffer->data.data0 >> 16) & 0x0000FFFF;
|
|
+ onenand_info.version_id =
|
|
+ (dma_buffer->data.data1 >> 0) & 0x0000FFFF;
|
|
+ onenand_info.data_buf_size =
|
|
+ (dma_buffer->data.data1 >> 16) & 0x0000FFFF;
|
|
+ onenand_info.boot_buf_size =
|
|
+ (dma_buffer->data.data2 >> 0) & 0x0000FFFF;
|
|
+ onenand_info.num_of_buffers =
|
|
+ (dma_buffer->data.data2 >> 16) & 0x0000FFFF;
|
|
+ onenand_info.technology =
|
|
+ (dma_buffer->data.data3 >> 0) & 0x0000FFFF;
|
|
+
|
|
+
|
|
+ pr_info("======================================="
|
|
+ "==========================\n");
|
|
+
|
|
+ pr_info("%s: manufacturer_id = 0x%x\n"
|
|
+ , __func__, onenand_info.manufacturer_id);
|
|
+ pr_info("%s: device_id = 0x%x\n"
|
|
+ , __func__, onenand_info.device_id);
|
|
+ pr_info("%s: version_id = 0x%x\n"
|
|
+ , __func__, onenand_info.version_id);
|
|
+ pr_info("%s: data_buf_size = 0x%x\n"
|
|
+ , __func__, onenand_info.data_buf_size);
|
|
+ pr_info("%s: boot_buf_size = 0x%x\n"
|
|
+ , __func__, onenand_info.boot_buf_size);
|
|
+ pr_info("%s: num_of_buffers = 0x%x\n"
|
|
+ , __func__, onenand_info.num_of_buffers);
|
|
+ pr_info("%s: technology = 0x%x\n"
|
|
+ , __func__, onenand_info.technology);
|
|
+
|
|
+ pr_info("======================================="
|
|
+ "==========================\n");
|
|
+
|
|
+ if ((onenand_info.manufacturer_id != 0x00EC)
|
|
+ || ((onenand_info.device_id & 0x0040) != 0x0040)
|
|
+ || (onenand_info.data_buf_size != 0x0800)
|
|
+ || (onenand_info.boot_buf_size != 0x0200)
|
|
+ || (onenand_info.num_of_buffers != 0x0201)
|
|
+ || (onenand_info.technology != 0)) {
|
|
+
|
|
+ pr_info("%s: Detected an unsupported device\n"
|
|
+ , __func__);
|
|
+ err = -EIO;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+int msm_onenand_read_oob(struct mtd_info *mtd,
|
|
+ loff_t from, struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[53];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t sfbcfg;
|
|
+ uint32_t sfcmd[9];
|
|
+ uint32_t sfexec;
|
|
+ uint32_t sfstat[9];
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t addr2;
|
|
+ uint32_t addr3;
|
|
+ uint32_t addr4;
|
|
+ uint32_t addr5;
|
|
+ uint32_t addr6;
|
|
+ uint32_t data0;
|
|
+ uint32_t data1;
|
|
+ uint32_t data2;
|
|
+ uint32_t data3;
|
|
+ uint32_t data4;
|
|
+ uint32_t data5;
|
|
+ uint32_t data6;
|
|
+ uint32_t macro[5];
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ int err = 0;
|
|
+ int i;
|
|
+ dma_addr_t data_dma_addr = 0;
|
|
+ dma_addr_t oob_dma_addr = 0;
|
|
+ dma_addr_t data_dma_addr_curr = 0;
|
|
+ dma_addr_t oob_dma_addr_curr = 0;
|
|
+
|
|
+ loff_t from_curr = 0;
|
|
+ unsigned page_count;
|
|
+ unsigned pages_read = 0;
|
|
+
|
|
+ uint16_t onenand_startaddr1;
|
|
+ uint16_t onenand_startaddr8;
|
|
+ uint16_t onenand_startaddr2;
|
|
+ uint16_t onenand_startbuffer;
|
|
+ uint16_t onenand_sysconfig1;
|
|
+ uint16_t controller_status;
|
|
+ uint16_t interrupt_status;
|
|
+ uint16_t ecc_status;
|
|
+#if VERBOSE
|
|
+ pr_info("================================================="
|
|
+ "================\n");
|
|
+ pr_info("%s: from 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
|
|
+ "\noobbuf 0x%p ooblen 0x%x\n",
|
|
+ __func__, from, ops->mode, ops->datbuf, ops->len,
|
|
+ ops->oobbuf, ops->ooblen);
|
|
+#endif
|
|
+ if (!mtd) {
|
|
+ pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
|
|
+ (uint32_t)mtd);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (from & (mtd->writesize - 1)) {
|
|
+ pr_err("%s: unsupported from, 0x%llx\n", __func__,
|
|
+ from);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
|
|
+ (ops->mode != MTD_OPS_RAW)) {
|
|
+ pr_err("%s: unsupported ops->mode, %d\n", __func__,
|
|
+ ops->mode);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (((ops->datbuf == NULL) || (ops->len == 0)) &&
|
|
+ ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
|
|
+ pr_err("%s: incorrect ops fields - nothing to do\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((ops->datbuf != NULL) && (ops->len == 0)) {
|
|
+ pr_err("%s: data buffer passed but length 0\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
|
|
+ pr_err("%s: oob buffer passed but length 0\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
|
|
+ /* when ops->datbuf is NULL, ops->len can be ooblen */
|
|
+ pr_err("%s: unsupported ops->len, %d\n", __func__,
|
|
+ ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ if (ops->datbuf != NULL &&
|
|
+ (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
|
|
+ pr_err("%s: unsupported ops->len,"
|
|
+ " %d for MTD_OPS_RAW\n", __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
|
|
+ pr_err("%s: unsupported operation, oobbuf pointer "
|
|
+ "passed in for RAW mode, %x\n", __func__,
|
|
+ (uint32_t)ops->oobbuf);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->oobbuf && !ops->datbuf) {
|
|
+ page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
|
|
+ mtd->oobavail : mtd->oobsize);
|
|
+ if ((page_count == 0) && (ops->ooblen))
|
|
+ page_count = 1;
|
|
+ } else if (ops->mode != MTD_OPS_RAW)
|
|
+ page_count = ops->len / mtd->writesize;
|
|
+ else
|
|
+ page_count = ops->len / (mtd->writesize + mtd->oobsize);
|
|
+
|
|
+ if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
|
|
+ if (page_count * mtd->oobsize > ops->ooblen) {
|
|
+ pr_err("%s: unsupported ops->ooblen for "
|
|
+ "PLACE, %d\n", __func__, ops->ooblen);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
|
|
+ (ops->ooboffs != 0)) {
|
|
+ pr_err("%s: unsupported ops->ooboffs, %d\n", __func__,
|
|
+ ops->ooboffs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+ memset(ops->datbuf, 0x55, ops->len);
|
|
+ data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
|
|
+ ops->datbuf, ops->len, DMA_FROM_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, data_dma_addr)) {
|
|
+ pr_err("%s: failed to get dma addr for %p\n",
|
|
+ __func__, ops->datbuf);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ memset(ops->oobbuf, 0x55, ops->ooblen);
|
|
+ oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
|
|
+ ops->oobbuf, ops->ooblen, DMA_FROM_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, oob_dma_addr)) {
|
|
+ pr_err("%s: failed to get dma addr for %p\n",
|
|
+ __func__, ops->oobbuf);
|
|
+ err = -EIO;
|
|
+ goto err_dma_map_oobbuf_failed;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ from_curr = from;
|
|
+
|
|
+ while (page_count-- > 0) {
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
|
|
+ && (from_curr >= (mtd->size>>1))) { /* DDP Device */
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_1 |
|
|
+ (((uint32_t)(from_curr-(mtd->size>>1))
|
|
+ / mtd->erasesize));
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_1;
|
|
+ } else {
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_0 |
|
|
+ ((uint32_t)from_curr / mtd->erasesize) ;
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_0;
|
|
+ }
|
|
+
|
|
+ onenand_startaddr8 = (((uint32_t)from_curr &
|
|
+ (mtd->erasesize - 1)) / mtd->writesize) << 2;
|
|
+ onenand_startbuffer = DATARAM0_0 << 8;
|
|
+ onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
|
|
+ ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
|
|
+ ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
|
|
+
|
|
+ dma_buffer->data.sfbcfg = SFLASH_BCFG |
|
|
+ (nand_sfcmd_mode ? 0 : (1 << 24));
|
|
+ dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_INTHI);
|
|
+ dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGRD);
|
|
+ dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATRD);
|
|
+ dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATRD);
|
|
+ dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATRD);
|
|
+ dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATRD);
|
|
+ dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(32, 0, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATRD);
|
|
+ dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(4, 10, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfexec = 1;
|
|
+ dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
|
|
+ (ONENAND_START_ADDRESS_2);
|
|
+ dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
|
|
+ (ONENAND_COMMAND);
|
|
+ dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
|
|
+ (ONENAND_INTERRUPT_STATUS);
|
|
+ dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
|
|
+ (onenand_sysconfig1);
|
|
+ dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
|
|
+ (onenand_startaddr1);
|
|
+ dma_buffer->data.data2 = (onenand_startbuffer << 16) |
|
|
+ (onenand_startaddr2);
|
|
+ dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_CMDLOADSPARE);
|
|
+ dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
|
|
+ (CLEAN_DATA_16);
|
|
+ dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
|
|
+ (ONENAND_STARTADDR1_RES);
|
|
+ dma_buffer->data.macro[0] = 0x0200;
|
|
+ dma_buffer->data.macro[1] = 0x0300;
|
|
+ dma_buffer->data.macro[2] = 0x0400;
|
|
+ dma_buffer->data.macro[3] = 0x0500;
|
|
+ dma_buffer->data.macro[4] = 0x8010;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Write necessary address registers in the onenand device */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Enable and configure the SFlash controller */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR0 and ADDR1 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
|
|
+ cmd->dst = MSM_NAND_ADDR0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
|
|
+ cmd->dst = MSM_NAND_ADDR2;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
|
|
+ cmd->dst = MSM_NAND_ADDR6;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the GENP0, GENP1, GENP2, GENP3 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
|
|
+ cmd->dst = MSM_NAND_GENP_REG0;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the FLASH_DEV_CMD4,5,6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD4;
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Wait for the interrupt from the Onenand device controller */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Read necessary status registers from the onenand device */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the GENP3 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_GENP_REG3;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the DEVCMD4 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_DEV_CMD4;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Read the data ram area from the onenand buffer ram */
|
|
+ /*************************************************************/
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+
|
|
+ dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_CMDLOAD);
|
|
+
|
|
+ for (i = 0; i < 4; i++) {
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfcmd[3+i]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the MACRO1 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.macro[i]);
|
|
+ cmd->dst = MSM_NAND_MACRO1_REG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data rdy, & read status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfstat[3+i]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Transfer nand ctlr buf contents to usr buf */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->dst = data_dma_addr_curr;
|
|
+ cmd->len = 512;
|
|
+ data_dma_addr_curr += 512;
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfcmd[7]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the MACRO1 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.macro[4]);
|
|
+ cmd->dst = MSM_NAND_MACRO1_REG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfstat[7]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Transfer nand ctlr buffer contents into usr buf */
|
|
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
|
|
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER +
|
|
+ mtd->ecclayout->oobfree[i].offset;
|
|
+ cmd->dst = oob_dma_addr_curr;
|
|
+ cmd->len =
|
|
+ mtd->ecclayout->oobfree[i].length;
|
|
+ oob_dma_addr_curr +=
|
|
+ mtd->ecclayout->oobfree[i].length;
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+ if (ops->mode == MTD_OPS_PLACE_OOB) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->dst = oob_dma_addr_curr;
|
|
+ cmd->len = mtd->oobsize;
|
|
+ oob_dma_addr_curr += mtd->oobsize;
|
|
+ cmd++;
|
|
+ }
|
|
+ if (ops->mode == MTD_OPS_RAW) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->dst = data_dma_addr_curr;
|
|
+ cmd->len = mtd->oobsize;
|
|
+ data_dma_addr_curr += mtd->oobsize;
|
|
+ cmd++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Restore the necessary registers to proper values */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+
|
|
+ BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
|
|
+ >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ ecc_status = (dma_buffer->data.data3 >> 16) &
|
|
+ 0x0000FFFF;
|
|
+ interrupt_status = (dma_buffer->data.data4 >> 0) &
|
|
+ 0x0000FFFF;
|
|
+ controller_status = (dma_buffer->data.data4 >> 16) &
|
|
+ 0x0000FFFF;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
|
|
+ "%x %x\n", __func__,
|
|
+ dma_buffer->data.sfstat[0],
|
|
+ dma_buffer->data.sfstat[1],
|
|
+ dma_buffer->data.sfstat[2],
|
|
+ dma_buffer->data.sfstat[3],
|
|
+ dma_buffer->data.sfstat[4],
|
|
+ dma_buffer->data.sfstat[5],
|
|
+ dma_buffer->data.sfstat[6],
|
|
+ dma_buffer->data.sfstat[7],
|
|
+ dma_buffer->data.sfstat[8]);
|
|
+
|
|
+ pr_info("%s: controller_status = %x\n", __func__,
|
|
+ controller_status);
|
|
+ pr_info("%s: interrupt_status = %x\n", __func__,
|
|
+ interrupt_status);
|
|
+ pr_info("%s: ecc_status = %x\n", __func__,
|
|
+ ecc_status);
|
|
+#endif
|
|
+ /* Check for errors, protection violations etc */
|
|
+ if ((controller_status != 0)
|
|
+ || (dma_buffer->data.sfstat[0] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[1] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[2] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[8] & 0x110)
|
|
+ || ((dma_buffer->data.sfstat[3] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[4] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[5] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[6] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[7] & 0x110) &&
|
|
+ ((ops->oobbuf)
|
|
+ || (ops->mode == MTD_OPS_RAW)))) {
|
|
+ pr_info("%s: ECC/MPU/OP error\n", __func__);
|
|
+ err = -EIO;
|
|
+ }
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+ pages_read++;
|
|
+ from_curr += mtd->writesize;
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ if (ops->oobbuf) {
|
|
+ dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
|
|
+ DMA_FROM_DEVICE);
|
|
+ }
|
|
+err_dma_map_oobbuf_failed:
|
|
+ if (ops->datbuf) {
|
|
+ dma_unmap_page(chip->dev, data_dma_addr, ops->len,
|
|
+ DMA_FROM_DEVICE);
|
|
+ }
|
|
+
|
|
+ if (err) {
|
|
+ pr_err("%s: %llx %x %x failed\n", __func__, from_curr,
|
|
+ ops->datbuf ? ops->len : 0, ops->ooblen);
|
|
+ } else {
|
|
+ ops->retlen = ops->oobretlen = 0;
|
|
+ if (ops->datbuf != NULL) {
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ ops->retlen = mtd->writesize * pages_read;
|
|
+ else
|
|
+ ops->retlen = (mtd->writesize + mtd->oobsize)
|
|
+ * pages_read;
|
|
+ }
|
|
+ if (ops->oobbuf != NULL) {
|
|
+ if (ops->mode == MTD_OPS_AUTO_OOB)
|
|
+ ops->oobretlen = mtd->oobavail * pages_read;
|
|
+ else
|
|
+ ops->oobretlen = mtd->oobsize * pages_read;
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
|
|
+ __func__, err, ops->retlen, ops->oobretlen);
|
|
+
|
|
+ pr_info("==================================================="
|
|
+ "==============\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+int msm_onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|
+ size_t *retlen, u_char *buf)
|
|
+{
|
|
+ int ret;
|
|
+ struct mtd_oob_ops ops;
|
|
+
|
|
+ ops.mode = MTD_OPS_PLACE_OOB;
|
|
+ ops.datbuf = buf;
|
|
+ ops.len = len;
|
|
+ ops.retlen = 0;
|
|
+ ops.oobbuf = NULL;
|
|
+ ops.ooblen = 0;
|
|
+ ops.oobretlen = 0;
|
|
+ ret = msm_onenand_read_oob(mtd, from, &ops);
|
|
+ *retlen = ops.retlen;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int msm_onenand_write_oob(struct mtd_info *mtd, loff_t to,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[53];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t sfbcfg;
|
|
+ uint32_t sfcmd[10];
|
|
+ uint32_t sfexec;
|
|
+ uint32_t sfstat[10];
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t addr2;
|
|
+ uint32_t addr3;
|
|
+ uint32_t addr4;
|
|
+ uint32_t addr5;
|
|
+ uint32_t addr6;
|
|
+ uint32_t data0;
|
|
+ uint32_t data1;
|
|
+ uint32_t data2;
|
|
+ uint32_t data3;
|
|
+ uint32_t data4;
|
|
+ uint32_t data5;
|
|
+ uint32_t data6;
|
|
+ uint32_t macro[5];
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ int err = 0;
|
|
+ int i, j, k;
|
|
+ dma_addr_t data_dma_addr = 0;
|
|
+ dma_addr_t oob_dma_addr = 0;
|
|
+ dma_addr_t init_dma_addr = 0;
|
|
+ dma_addr_t data_dma_addr_curr = 0;
|
|
+ dma_addr_t oob_dma_addr_curr = 0;
|
|
+ uint8_t *init_spare_bytes;
|
|
+
|
|
+ loff_t to_curr = 0;
|
|
+ unsigned page_count;
|
|
+ unsigned pages_written = 0;
|
|
+
|
|
+ uint16_t onenand_startaddr1;
|
|
+ uint16_t onenand_startaddr8;
|
|
+ uint16_t onenand_startaddr2;
|
|
+ uint16_t onenand_startbuffer;
|
|
+ uint16_t onenand_sysconfig1;
|
|
+
|
|
+ uint16_t controller_status;
|
|
+ uint16_t interrupt_status;
|
|
+ uint16_t ecc_status;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("================================================="
|
|
+ "================\n");
|
|
+ pr_info("%s: to 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
|
|
+ "\noobbuf 0x%p ooblen 0x%x\n",
|
|
+ __func__, to, ops->mode, ops->datbuf, ops->len,
|
|
+ ops->oobbuf, ops->ooblen);
|
|
+#endif
|
|
+ if (!mtd) {
|
|
+ pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
|
|
+ (uint32_t)mtd);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (to & (mtd->writesize - 1)) {
|
|
+ pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
|
|
+ (ops->mode != MTD_OPS_RAW)) {
|
|
+ pr_err("%s: unsupported ops->mode, %d\n", __func__,
|
|
+ ops->mode);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (((ops->datbuf == NULL) || (ops->len == 0)) &&
|
|
+ ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
|
|
+ pr_err("%s: incorrect ops fields - nothing to do\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((ops->datbuf != NULL) && (ops->len == 0)) {
|
|
+ pr_err("%s: data buffer passed but length 0\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
|
|
+ pr_err("%s: oob buffer passed but length 0\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->mode != MTD_OPS_RAW) {
|
|
+ if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
|
|
+ /* when ops->datbuf is NULL, ops->len can be ooblen */
|
|
+ pr_err("%s: unsupported ops->len, %d\n", __func__,
|
|
+ ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ if (ops->datbuf != NULL &&
|
|
+ (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
|
|
+ pr_err("%s: unsupported ops->len,"
|
|
+ " %d for MTD_OPS_RAW\n", __func__, ops->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
|
|
+ pr_err("%s: unsupported operation, oobbuf pointer "
|
|
+ "passed in for RAW mode, %x\n", __func__,
|
|
+ (uint32_t)ops->oobbuf);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ops->oobbuf && !ops->datbuf) {
|
|
+ page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
|
|
+ mtd->oobavail : mtd->oobsize);
|
|
+ if ((page_count == 0) && (ops->ooblen))
|
|
+ page_count = 1;
|
|
+ } else if (ops->mode != MTD_OPS_RAW)
|
|
+ page_count = ops->len / mtd->writesize;
|
|
+ else
|
|
+ page_count = ops->len / (mtd->writesize + mtd->oobsize);
|
|
+
|
|
+ if ((ops->mode == MTD_OPS_AUTO_OOB) && (ops->oobbuf != NULL)) {
|
|
+ if (page_count > 1) {
|
|
+ pr_err("%s: unsupported ops->ooblen for"
|
|
+ "AUTO, %d\n", __func__, ops->ooblen);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
|
|
+ if (page_count * mtd->oobsize > ops->ooblen) {
|
|
+ pr_err("%s: unsupported ops->ooblen for"
|
|
+ "PLACE, %d\n", __func__, ops->ooblen);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
|
|
+ (ops->ooboffs != 0)) {
|
|
+ pr_err("%s: unsupported ops->ooboffs, %d\n",
|
|
+ __func__, ops->ooboffs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ init_spare_bytes = kmalloc(64, GFP_KERNEL);
|
|
+ if (!init_spare_bytes) {
|
|
+ pr_err("%s: failed to alloc init_spare_bytes buffer\n",
|
|
+ __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ for (i = 0; i < 64; i++)
|
|
+ init_spare_bytes[i] = 0xFF;
|
|
+
|
|
+ if ((ops->oobbuf) && (ops->mode == MTD_OPS_AUTO_OOB)) {
|
|
+ for (i = 0, k = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++)
|
|
+ for (j = 0; j < mtd->ecclayout->oobfree[i].length;
|
|
+ j++) {
|
|
+ init_spare_bytes[j +
|
|
+ mtd->ecclayout->oobfree[i].offset]
|
|
+ = (ops->oobbuf)[k];
|
|
+ k++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+ data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
|
|
+ ops->datbuf, ops->len, DMA_TO_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, data_dma_addr)) {
|
|
+ pr_err("%s: failed to get dma addr for %p\n",
|
|
+ __func__, ops->datbuf);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+ if (ops->oobbuf) {
|
|
+ oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
|
|
+ ops->oobbuf, ops->ooblen, DMA_TO_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, oob_dma_addr)) {
|
|
+ pr_err("%s: failed to get dma addr for %p\n",
|
|
+ __func__, ops->oobbuf);
|
|
+ err = -EIO;
|
|
+ goto err_dma_map_oobbuf_failed;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ init_dma_addr = msm_nand_dma_map(chip->dev, init_spare_bytes, 64,
|
|
+ DMA_TO_DEVICE, NULL);
|
|
+ if (dma_mapping_error(chip->dev, init_dma_addr)) {
|
|
+ pr_err("%s: failed to get dma addr for %p\n",
|
|
+ __func__, init_spare_bytes);
|
|
+ err = -EIO;
|
|
+ goto err_dma_map_initbuf_failed;
|
|
+ }
|
|
+
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ to_curr = to;
|
|
+
|
|
+ while (page_count-- > 0) {
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
|
|
+ && (to_curr >= (mtd->size>>1))) { /* DDP Device */
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_1 |
|
|
+ (((uint32_t)(to_curr-(mtd->size>>1))
|
|
+ / mtd->erasesize));
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_1;
|
|
+ } else {
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_0 |
|
|
+ ((uint32_t)to_curr / mtd->erasesize) ;
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_0;
|
|
+ }
|
|
+
|
|
+ onenand_startaddr8 = (((uint32_t)to_curr &
|
|
+ (mtd->erasesize - 1)) / mtd->writesize) << 2;
|
|
+ onenand_startbuffer = DATARAM0_0 << 8;
|
|
+ onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
|
|
+ ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
|
|
+ ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
|
|
+
|
|
+ dma_buffer->data.sfbcfg = SFLASH_BCFG |
|
|
+ (nand_sfcmd_mode ? 0 : (1 << 24));
|
|
+ dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(6, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATWR);
|
|
+ dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATWR);
|
|
+ dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATWR);
|
|
+ dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATWR);
|
|
+ dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(32, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_DATWR);
|
|
+ dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(1, 6, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(0, 0, 32,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_INTHI);
|
|
+ dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(3, 7, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGRD);
|
|
+ dma_buffer->data.sfcmd[9] = SFLASH_PREPCMD(4, 10, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfexec = 1;
|
|
+ dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[9] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
|
|
+ (ONENAND_START_ADDRESS_2);
|
|
+ dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
|
|
+ (ONENAND_COMMAND);
|
|
+ dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
|
|
+ (ONENAND_INTERRUPT_STATUS);
|
|
+ dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
|
|
+ (onenand_sysconfig1);
|
|
+ dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
|
|
+ (onenand_startaddr1);
|
|
+ dma_buffer->data.data2 = (onenand_startbuffer << 16) |
|
|
+ (onenand_startaddr2);
|
|
+ dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_CMDPROGSPARE);
|
|
+ dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
|
|
+ (CLEAN_DATA_16);
|
|
+ dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
|
|
+ (ONENAND_STARTADDR1_RES);
|
|
+ dma_buffer->data.macro[0] = 0x0200;
|
|
+ dma_buffer->data.macro[1] = 0x0300;
|
|
+ dma_buffer->data.macro[2] = 0x0400;
|
|
+ dma_buffer->data.macro[3] = 0x0500;
|
|
+ dma_buffer->data.macro[4] = 0x8010;
|
|
+
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Write necessary address registers in the onenand device */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Enable and configure the SFlash controller */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR0 and ADDR1 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
|
|
+ cmd->dst = MSM_NAND_ADDR0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
|
|
+ cmd->dst = MSM_NAND_ADDR2;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
|
|
+ cmd->dst = MSM_NAND_ADDR6;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the GENP0, GENP1, GENP2, GENP3 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
|
|
+ cmd->dst = MSM_NAND_GENP_REG0;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the FLASH_DEV_CMD4,5,6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD4;
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Write the data ram area in the onenand buffer ram */
|
|
+ /*************************************************************/
|
|
+
|
|
+ if (ops->datbuf) {
|
|
+ dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_CMDPROG);
|
|
+
|
|
+ for (i = 0; i < 4; i++) {
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfcmd[1+i]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Trnsfr usr buf contents to nand ctlr buf */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = data_dma_addr_curr;
|
|
+ cmd->dst = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->len = 512;
|
|
+ data_dma_addr_curr += 512;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the MACRO1 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.macro[i]);
|
|
+ cmd->dst = MSM_NAND_MACRO1_REG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data rdy, & read status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip,
|
|
+ &dma_buffer->data.sfstat[1+i]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[5]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
|
|
+
|
|
+ /* Transfer user buf contents into nand ctlr buffer */
|
|
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = init_dma_addr;
|
|
+ cmd->dst = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->len = mtd->oobsize;
|
|
+ cmd++;
|
|
+ }
|
|
+ if (ops->mode == MTD_OPS_PLACE_OOB) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = oob_dma_addr_curr;
|
|
+ cmd->dst = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->len = mtd->oobsize;
|
|
+ oob_dma_addr_curr += mtd->oobsize;
|
|
+ cmd++;
|
|
+ }
|
|
+ if (ops->mode == MTD_OPS_RAW) {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = data_dma_addr_curr;
|
|
+ cmd->dst = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->len = mtd->oobsize;
|
|
+ data_dma_addr_curr += mtd->oobsize;
|
|
+ cmd++;
|
|
+ }
|
|
+ } else {
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = init_dma_addr;
|
|
+ cmd->dst = MSM_NAND_FLASH_BUFFER;
|
|
+ cmd->len = mtd->oobsize;
|
|
+ cmd++;
|
|
+ }
|
|
+
|
|
+ /* Write the MACRO1 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.macro[4]);
|
|
+ cmd->dst = MSM_NAND_MACRO1_REG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[5]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*********************************************************/
|
|
+ /* Issuing write command */
|
|
+ /*********************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[6]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[6]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Wait for the interrupt from the Onenand device controller */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[7]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[7]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Read necessary status registers from the onenand device */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the GENP3 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_GENP_REG3;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the DEVCMD4 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_DEV_CMD4;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Restore the necessary registers to proper values */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[9]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[9]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+
|
|
+ BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
|
|
+ >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
|
|
+ interrupt_status = (dma_buffer->data.data4 >> 0)&0x0000FFFF;
|
|
+ controller_status = (dma_buffer->data.data4 >> 16)&0x0000FFFF;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
|
|
+ " %x %x %x\n", __func__,
|
|
+ dma_buffer->data.sfstat[0],
|
|
+ dma_buffer->data.sfstat[1],
|
|
+ dma_buffer->data.sfstat[2],
|
|
+ dma_buffer->data.sfstat[3],
|
|
+ dma_buffer->data.sfstat[4],
|
|
+ dma_buffer->data.sfstat[5],
|
|
+ dma_buffer->data.sfstat[6],
|
|
+ dma_buffer->data.sfstat[7],
|
|
+ dma_buffer->data.sfstat[8],
|
|
+ dma_buffer->data.sfstat[9]);
|
|
+
|
|
+ pr_info("%s: controller_status = %x\n", __func__,
|
|
+ controller_status);
|
|
+ pr_info("%s: interrupt_status = %x\n", __func__,
|
|
+ interrupt_status);
|
|
+ pr_info("%s: ecc_status = %x\n", __func__,
|
|
+ ecc_status);
|
|
+#endif
|
|
+ /* Check for errors, protection violations etc */
|
|
+ if ((controller_status != 0)
|
|
+ || (dma_buffer->data.sfstat[0] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[6] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[7] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[8] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[9] & 0x110)
|
|
+ || ((dma_buffer->data.sfstat[1] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[2] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[3] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[4] & 0x110) &&
|
|
+ (ops->datbuf))
|
|
+ || ((dma_buffer->data.sfstat[5] & 0x110) &&
|
|
+ ((ops->oobbuf)
|
|
+ || (ops->mode == MTD_OPS_RAW)))) {
|
|
+ pr_info("%s: ECC/MPU/OP error\n", __func__);
|
|
+ err = -EIO;
|
|
+ }
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+ pages_written++;
|
|
+ to_curr += mtd->writesize;
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ dma_unmap_page(chip->dev, init_dma_addr, 64, DMA_TO_DEVICE);
|
|
+
|
|
+err_dma_map_initbuf_failed:
|
|
+ if (ops->oobbuf) {
|
|
+ dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
|
|
+ DMA_TO_DEVICE);
|
|
+ }
|
|
+err_dma_map_oobbuf_failed:
|
|
+ if (ops->datbuf) {
|
|
+ dma_unmap_page(chip->dev, data_dma_addr, ops->len,
|
|
+ DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ if (err) {
|
|
+ pr_err("%s: %llx %x %x failed\n", __func__, to_curr,
|
|
+ ops->datbuf ? ops->len : 0, ops->ooblen);
|
|
+ } else {
|
|
+ ops->retlen = ops->oobretlen = 0;
|
|
+ if (ops->datbuf != NULL) {
|
|
+ if (ops->mode != MTD_OPS_RAW)
|
|
+ ops->retlen = mtd->writesize * pages_written;
|
|
+ else
|
|
+ ops->retlen = (mtd->writesize + mtd->oobsize)
|
|
+ * pages_written;
|
|
+ }
|
|
+ if (ops->oobbuf != NULL) {
|
|
+ if (ops->mode == MTD_OPS_AUTO_OOB)
|
|
+ ops->oobretlen = mtd->oobavail * pages_written;
|
|
+ else
|
|
+ ops->oobretlen = mtd->oobsize * pages_written;
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
|
|
+ __func__, err, ops->retlen, ops->oobretlen);
|
|
+
|
|
+ pr_info("================================================="
|
|
+ "================\n");
|
|
+#endif
|
|
+ kfree(init_spare_bytes);
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
+ size_t *retlen, const u_char *buf)
|
|
+{
|
|
+ int ret;
|
|
+ struct mtd_oob_ops ops;
|
|
+
|
|
+ ops.mode = MTD_OPS_PLACE_OOB;
|
|
+ ops.datbuf = (uint8_t *)buf;
|
|
+ ops.len = len;
|
|
+ ops.retlen = 0;
|
|
+ ops.oobbuf = NULL;
|
|
+ ops.ooblen = 0;
|
|
+ ops.oobretlen = 0;
|
|
+ ret = msm_onenand_write_oob(mtd, to, &ops);
|
|
+ *retlen = ops.retlen;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int msm_onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[20];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t sfbcfg;
|
|
+ uint32_t sfcmd[4];
|
|
+ uint32_t sfexec;
|
|
+ uint32_t sfstat[4];
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t addr2;
|
|
+ uint32_t addr3;
|
|
+ uint32_t addr4;
|
|
+ uint32_t addr5;
|
|
+ uint32_t addr6;
|
|
+ uint32_t data0;
|
|
+ uint32_t data1;
|
|
+ uint32_t data2;
|
|
+ uint32_t data3;
|
|
+ uint32_t data4;
|
|
+ uint32_t data5;
|
|
+ uint32_t data6;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ int err = 0;
|
|
+
|
|
+ uint16_t onenand_startaddr1;
|
|
+ uint16_t onenand_startaddr8;
|
|
+ uint16_t onenand_startaddr2;
|
|
+ uint16_t onenand_startbuffer;
|
|
+
|
|
+ uint16_t controller_status;
|
|
+ uint16_t interrupt_status;
|
|
+ uint16_t ecc_status;
|
|
+
|
|
+ uint64_t temp;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("================================================="
|
|
+ "================\n");
|
|
+ pr_info("%s: addr 0x%llx len 0x%llx\n",
|
|
+ __func__, instr->addr, instr->len);
|
|
+#endif
|
|
+ if (instr->addr & (mtd->erasesize - 1)) {
|
|
+ pr_err("%s: Unsupported erase address, 0x%llx\n",
|
|
+ __func__, instr->addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (instr->len != mtd->erasesize) {
|
|
+ pr_err("%s: Unsupported erase len, %lld\n",
|
|
+ __func__, instr->len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ temp = instr->addr;
|
|
+
|
|
+ if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
|
|
+ && (temp >= (mtd->size>>1))) { /* DDP Device */
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_1 |
|
|
+ (((uint32_t)(temp-(mtd->size>>1))
|
|
+ / mtd->erasesize));
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_1;
|
|
+ } else {
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_0 |
|
|
+ ((uint32_t)temp / mtd->erasesize) ;
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_0;
|
|
+ }
|
|
+
|
|
+ onenand_startaddr8 = 0x0000;
|
|
+ onenand_startbuffer = DATARAM0_0 << 8;
|
|
+
|
|
+ dma_buffer->data.sfbcfg = SFLASH_BCFG |
|
|
+ (nand_sfcmd_mode ? 0 : (1 << 24));
|
|
+ dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_INTHI);
|
|
+ dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGRD);
|
|
+ dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfexec = 1;
|
|
+ dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
|
|
+ (ONENAND_START_ADDRESS_2);
|
|
+ dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
|
|
+ (ONENAND_COMMAND);
|
|
+ dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
|
|
+ (ONENAND_INTERRUPT_STATUS);
|
|
+ dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
|
|
+ (onenand_startaddr1);
|
|
+ dma_buffer->data.data2 = (onenand_startbuffer << 16) |
|
|
+ (onenand_startaddr2);
|
|
+ dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_CMDERAS);
|
|
+ dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
|
|
+ (CLEAN_DATA_16);
|
|
+ dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
|
|
+ (ONENAND_STARTADDR1_RES);
|
|
+
|
|
+ /***************************************************************/
|
|
+ /* Write the necessary address registers in the onenand device */
|
|
+ /***************************************************************/
|
|
+
|
|
+ /* Enable and configure the SFlash controller */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR0 and ADDR1 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
|
|
+ cmd->dst = MSM_NAND_ADDR0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
|
|
+ cmd->dst = MSM_NAND_ADDR2;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
|
|
+ cmd->dst = MSM_NAND_ADDR6;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
|
|
+ cmd->dst = MSM_NAND_GENP_REG0;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the FLASH_DEV_CMD4,5,6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD4;
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /***************************************************************/
|
|
+ /* Wait for the interrupt from the Onenand device controller */
|
|
+ /***************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /***************************************************************/
|
|
+ /* Read the necessary status registers from the onenand device */
|
|
+ /***************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the GENP3 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_GENP_REG3;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the DEVCMD4 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_DEV_CMD4;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /***************************************************************/
|
|
+ /* Restore the necessary registers to proper values */
|
|
+ /***************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+
|
|
+ BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
|
|
+ >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
|
|
+ | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
|
|
+ interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
|
|
+ controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
|
|
+ dma_buffer->data.sfstat[0],
|
|
+ dma_buffer->data.sfstat[1],
|
|
+ dma_buffer->data.sfstat[2],
|
|
+ dma_buffer->data.sfstat[3]);
|
|
+
|
|
+ pr_info("%s: controller_status = %x\n", __func__,
|
|
+ controller_status);
|
|
+ pr_info("%s: interrupt_status = %x\n", __func__,
|
|
+ interrupt_status);
|
|
+ pr_info("%s: ecc_status = %x\n", __func__,
|
|
+ ecc_status);
|
|
+#endif
|
|
+ /* Check for errors, protection violations etc */
|
|
+ if ((controller_status != 0)
|
|
+ || (dma_buffer->data.sfstat[0] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[1] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[2] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[3] & 0x110)) {
|
|
+ pr_err("%s: ECC/MPU/OP error\n", __func__);
|
|
+ err = -EIO;
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+ if (err) {
|
|
+ pr_err("%s: Erase failed, 0x%llx\n", __func__,
|
|
+ instr->addr);
|
|
+ instr->fail_addr = instr->addr;
|
|
+ instr->state = MTD_ERASE_FAILED;
|
|
+ } else {
|
|
+ instr->state = MTD_ERASE_DONE;
|
|
+ instr->fail_addr = 0xffffffff;
|
|
+ mtd_erase_callback(instr);
|
|
+ }
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d\n", __func__, err);
|
|
+ pr_info("===================================================="
|
|
+ "=============\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
|
|
+{
|
|
+ struct mtd_oob_ops ops;
|
|
+ int rval, i;
|
|
+ int ret = 0;
|
|
+ uint8_t *buffer;
|
|
+ uint8_t *oobptr;
|
|
+
|
|
+ if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
|
|
+ pr_err("%s: unsupported block address, 0x%x\n",
|
|
+ __func__, (uint32_t)ofs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ buffer = kmalloc(2112, GFP_KERNEL|GFP_DMA);
|
|
+ if (buffer == 0) {
|
|
+ pr_err("%s: Could not kmalloc for buffer\n",
|
|
+ __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ memset(buffer, 0x00, 2112);
|
|
+ oobptr = &(buffer[2048]);
|
|
+
|
|
+ ops.mode = MTD_OPS_RAW;
|
|
+ ops.len = 2112;
|
|
+ ops.retlen = 0;
|
|
+ ops.ooblen = 0;
|
|
+ ops.oobretlen = 0;
|
|
+ ops.ooboffs = 0;
|
|
+ ops.datbuf = buffer;
|
|
+ ops.oobbuf = NULL;
|
|
+
|
|
+ for (i = 0; i < 2; i++) {
|
|
+ ofs = ofs + i*mtd->writesize;
|
|
+ rval = msm_onenand_read_oob(mtd, ofs, &ops);
|
|
+ if (rval) {
|
|
+ pr_err("%s: Error in reading bad blk info\n",
|
|
+ __func__);
|
|
+ ret = rval;
|
|
+ break;
|
|
+ }
|
|
+ if ((oobptr[0] != 0xFF) || (oobptr[1] != 0xFF) ||
|
|
+ (oobptr[16] != 0xFF) || (oobptr[17] != 0xFF) ||
|
|
+ (oobptr[32] != 0xFF) || (oobptr[33] != 0xFF) ||
|
|
+ (oobptr[48] != 0xFF) || (oobptr[49] != 0xFF)
|
|
+ ) {
|
|
+ ret = 1;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ kfree(buffer);
|
|
+
|
|
+#if VERBOSE
|
|
+ if (ret == 1)
|
|
+ pr_info("%s : Block containing 0x%x is bad\n",
|
|
+ __func__, (unsigned int)ofs);
|
|
+#endif
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int msm_onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|
+{
|
|
+ struct mtd_oob_ops ops;
|
|
+ int rval, i;
|
|
+ int ret = 0;
|
|
+ uint8_t *buffer;
|
|
+
|
|
+ if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
|
|
+ pr_err("%s: unsupported block address, 0x%x\n",
|
|
+ __func__, (uint32_t)ofs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ buffer = page_address(ZERO_PAGE());
|
|
+
|
|
+ ops.mode = MTD_OPS_RAW;
|
|
+ ops.len = 2112;
|
|
+ ops.retlen = 0;
|
|
+ ops.ooblen = 0;
|
|
+ ops.oobretlen = 0;
|
|
+ ops.ooboffs = 0;
|
|
+ ops.datbuf = buffer;
|
|
+ ops.oobbuf = NULL;
|
|
+
|
|
+ for (i = 0; i < 2; i++) {
|
|
+ ofs = ofs + i*mtd->writesize;
|
|
+ rval = msm_onenand_write_oob(mtd, ofs, &ops);
|
|
+ if (rval) {
|
|
+ pr_err("%s: Error in writing bad blk info\n",
|
|
+ __func__);
|
|
+ ret = rval;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int msm_onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[20];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t sfbcfg;
|
|
+ uint32_t sfcmd[4];
|
|
+ uint32_t sfexec;
|
|
+ uint32_t sfstat[4];
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t addr2;
|
|
+ uint32_t addr3;
|
|
+ uint32_t addr4;
|
|
+ uint32_t addr5;
|
|
+ uint32_t addr6;
|
|
+ uint32_t data0;
|
|
+ uint32_t data1;
|
|
+ uint32_t data2;
|
|
+ uint32_t data3;
|
|
+ uint32_t data4;
|
|
+ uint32_t data5;
|
|
+ uint32_t data6;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ int err = 0;
|
|
+
|
|
+ uint16_t onenand_startaddr1;
|
|
+ uint16_t onenand_startaddr8;
|
|
+ uint16_t onenand_startaddr2;
|
|
+ uint16_t onenand_startblock;
|
|
+
|
|
+ uint16_t controller_status;
|
|
+ uint16_t interrupt_status;
|
|
+ uint16_t write_prot_status;
|
|
+
|
|
+ uint64_t start_ofs;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("===================================================="
|
|
+ "=============\n");
|
|
+ pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
|
|
+#endif
|
|
+ /* 'ofs' & 'len' should align to block size */
|
|
+ if (ofs&(mtd->erasesize - 1)) {
|
|
+ pr_err("%s: Unsupported ofs address, 0x%llx\n",
|
|
+ __func__, ofs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (len&(mtd->erasesize - 1)) {
|
|
+ pr_err("%s: Unsupported len, %lld\n",
|
|
+ __func__, len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ofs+len > mtd->size) {
|
|
+ pr_err("%s: Maximum chip size exceeded\n", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
|
|
+#if VERBOSE
|
|
+ pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
|
|
+#endif
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+ if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
|
|
+ && (ofs >= (mtd->size>>1))) { /* DDP Device */
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_1 |
|
|
+ (((uint32_t)(ofs - (mtd->size>>1))
|
|
+ / mtd->erasesize));
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_1;
|
|
+ onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
|
|
+ / mtd->erasesize);
|
|
+ } else {
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_0 |
|
|
+ ((uint32_t)ofs / mtd->erasesize) ;
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_0;
|
|
+ onenand_startblock = ((uint32_t)ofs
|
|
+ / mtd->erasesize);
|
|
+ }
|
|
+
|
|
+ onenand_startaddr8 = 0x0000;
|
|
+ dma_buffer->data.sfbcfg = SFLASH_BCFG |
|
|
+ (nand_sfcmd_mode ? 0 : (1 << 24));
|
|
+ dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_INTHI);
|
|
+ dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGRD);
|
|
+ dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfexec = 1;
|
|
+ dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
|
|
+ (ONENAND_START_ADDRESS_2);
|
|
+ dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
|
|
+ (ONENAND_COMMAND);
|
|
+ dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
|
|
+ (ONENAND_INTERRUPT_STATUS);
|
|
+ dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
|
|
+ (onenand_startaddr1);
|
|
+ dma_buffer->data.data2 = (onenand_startblock << 16) |
|
|
+ (onenand_startaddr2);
|
|
+ dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_CMD_UNLOCK);
|
|
+ dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
|
|
+ (CLEAN_DATA_16);
|
|
+ dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
|
|
+ (ONENAND_STARTADDR1_RES);
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Write the necessary address reg in the onenand device */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Enable and configure the SFlash controller */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR0 and ADDR1 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
|
|
+ cmd->dst = MSM_NAND_ADDR0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
|
|
+ cmd->dst = MSM_NAND_ADDR2;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
|
|
+ cmd->dst = MSM_NAND_ADDR6;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
|
|
+ cmd->dst = MSM_NAND_GENP_REG0;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the FLASH_DEV_CMD4,5,6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD4;
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Wait for the interrupt from the Onenand device controller */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*********************************************************/
|
|
+ /* Read the necessary status reg from the onenand device */
|
|
+ /*********************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the GENP3 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_GENP_REG3;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the DEVCMD4 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_DEV_CMD4;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /************************************************************/
|
|
+ /* Restore the necessary registers to proper values */
|
|
+ /************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+
|
|
+ BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
|
|
+ >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
|
|
+ interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
|
|
+ controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
|
|
+ dma_buffer->data.sfstat[0],
|
|
+ dma_buffer->data.sfstat[1],
|
|
+ dma_buffer->data.sfstat[2],
|
|
+ dma_buffer->data.sfstat[3]);
|
|
+
|
|
+ pr_info("%s: controller_status = %x\n", __func__,
|
|
+ controller_status);
|
|
+ pr_info("%s: interrupt_status = %x\n", __func__,
|
|
+ interrupt_status);
|
|
+ pr_info("%s: write_prot_status = %x\n", __func__,
|
|
+ write_prot_status);
|
|
+#endif
|
|
+ /* Check for errors, protection violations etc */
|
|
+ if ((controller_status != 0)
|
|
+ || (dma_buffer->data.sfstat[0] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[1] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[2] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[3] & 0x110)) {
|
|
+ pr_err("%s: ECC/MPU/OP error\n", __func__);
|
|
+ err = -EIO;
|
|
+ }
|
|
+
|
|
+ if (!(write_prot_status & ONENAND_WP_US)) {
|
|
+ pr_err("%s: Unexpected status ofs = 0x%llx,"
|
|
+ "wp_status = %x\n",
|
|
+ __func__, ofs, write_prot_status);
|
|
+ err = -EIO;
|
|
+ }
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d\n", __func__, err);
|
|
+ pr_info("===================================================="
|
|
+ "=============\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[20];
|
|
+ unsigned cmdptr;
|
|
+ struct {
|
|
+ uint32_t sfbcfg;
|
|
+ uint32_t sfcmd[4];
|
|
+ uint32_t sfexec;
|
|
+ uint32_t sfstat[4];
|
|
+ uint32_t addr0;
|
|
+ uint32_t addr1;
|
|
+ uint32_t addr2;
|
|
+ uint32_t addr3;
|
|
+ uint32_t addr4;
|
|
+ uint32_t addr5;
|
|
+ uint32_t addr6;
|
|
+ uint32_t data0;
|
|
+ uint32_t data1;
|
|
+ uint32_t data2;
|
|
+ uint32_t data3;
|
|
+ uint32_t data4;
|
|
+ uint32_t data5;
|
|
+ uint32_t data6;
|
|
+ } data;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ int err = 0;
|
|
+
|
|
+ uint16_t onenand_startaddr1;
|
|
+ uint16_t onenand_startaddr8;
|
|
+ uint16_t onenand_startaddr2;
|
|
+ uint16_t onenand_startblock;
|
|
+
|
|
+ uint16_t controller_status;
|
|
+ uint16_t interrupt_status;
|
|
+ uint16_t write_prot_status;
|
|
+
|
|
+ uint64_t start_ofs;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("===================================================="
|
|
+ "=============\n");
|
|
+ pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
|
|
+#endif
|
|
+ /* 'ofs' & 'len' should align to block size */
|
|
+ if (ofs&(mtd->erasesize - 1)) {
|
|
+ pr_err("%s: Unsupported ofs address, 0x%llx\n",
|
|
+ __func__, ofs);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (len&(mtd->erasesize - 1)) {
|
|
+ pr_err("%s: Unsupported len, %lld\n",
|
|
+ __func__, len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ofs+len > mtd->size) {
|
|
+ pr_err("%s: Maximum chip size exceeded\n", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
|
|
+ (chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
|
|
+#if VERBOSE
|
|
+ pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
|
|
+#endif
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+ if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
|
|
+ && (ofs >= (mtd->size>>1))) { /* DDP Device */
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_1 |
|
|
+ (((uint32_t)(ofs - (mtd->size>>1))
|
|
+ / mtd->erasesize));
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_1;
|
|
+ onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
|
|
+ / mtd->erasesize);
|
|
+ } else {
|
|
+ onenand_startaddr1 = DEVICE_FLASHCORE_0 |
|
|
+ ((uint32_t)ofs / mtd->erasesize) ;
|
|
+ onenand_startaddr2 = DEVICE_BUFFERRAM_0;
|
|
+ onenand_startblock = ((uint32_t)ofs
|
|
+ / mtd->erasesize);
|
|
+ }
|
|
+
|
|
+ onenand_startaddr8 = 0x0000;
|
|
+ dma_buffer->data.sfbcfg = SFLASH_BCFG |
|
|
+ (nand_sfcmd_mode ? 0 : (1 << 24));
|
|
+ dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_INTHI);
|
|
+ dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
|
|
+ MSM_NAND_SFCMD_DATXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGRD);
|
|
+ dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
|
|
+ MSM_NAND_SFCMD_CMDXS,
|
|
+ nand_sfcmd_mode,
|
|
+ MSM_NAND_SFCMD_REGWR);
|
|
+ dma_buffer->data.sfexec = 1;
|
|
+ dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
|
|
+ dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
|
|
+ (ONENAND_START_ADDRESS_2);
|
|
+ dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
|
|
+ (ONENAND_COMMAND);
|
|
+ dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
|
|
+ (ONENAND_INTERRUPT_STATUS);
|
|
+ dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
|
|
+ (ONENAND_SYSTEM_CONFIG_1);
|
|
+ dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
|
|
+ (ONENAND_START_ADDRESS_1);
|
|
+ dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
|
|
+ (onenand_startaddr1);
|
|
+ dma_buffer->data.data2 = (onenand_startblock << 16) |
|
|
+ (onenand_startaddr2);
|
|
+ dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
|
|
+ (ONENAND_CMD_LOCK);
|
|
+ dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
|
|
+ (CLEAN_DATA_16);
|
|
+ dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
|
|
+ (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
|
|
+ dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
|
|
+ (ONENAND_STARTADDR1_RES);
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Write the necessary address reg in the onenand device */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Enable and configure the SFlash controller */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR0 and ADDR1 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
|
|
+ cmd->dst = MSM_NAND_ADDR0;
|
|
+ cmd->len = 8;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
|
|
+ cmd->dst = MSM_NAND_ADDR2;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the ADDR6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
|
|
+ cmd->dst = MSM_NAND_ADDR6;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
|
|
+ cmd->dst = MSM_NAND_GENP_REG0;
|
|
+ cmd->len = 16;
|
|
+ cmd++;
|
|
+
|
|
+ /* Write the FLASH_DEV_CMD4,5,6 registers */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->dst = MSM_NAND_DEV_CMD4;
|
|
+ cmd->len = 12;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*************************************************************/
|
|
+ /* Wait for the interrupt from the Onenand device controller */
|
|
+ /*************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /*********************************************************/
|
|
+ /* Read the necessary status reg from the onenand device */
|
|
+ /*********************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the GENP3 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_GENP_REG3;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Read the DEVCMD4 register */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = MSM_NAND_DEV_CMD4;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /************************************************************/
|
|
+ /* Restore the necessary registers to proper values */
|
|
+ /************************************************************/
|
|
+
|
|
+ /* Block on cmd ready and write CMD register */
|
|
+ cmd->cmd = DST_CRCI_NAND_CMD;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Kick the execute command */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
|
|
+ cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+ /* Block on data ready, and read the status register */
|
|
+ cmd->cmd = SRC_CRCI_NAND_DATA;
|
|
+ cmd->src = MSM_NAND_SFLASHC_STATUS;
|
|
+ cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
|
|
+ cmd->len = 4;
|
|
+ cmd++;
|
|
+
|
|
+
|
|
+ BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
|
|
+ >> 3) | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel,
|
|
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+
|
|
+ write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
|
|
+ interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
|
|
+ controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
|
|
+ dma_buffer->data.sfstat[0],
|
|
+ dma_buffer->data.sfstat[1],
|
|
+ dma_buffer->data.sfstat[2],
|
|
+ dma_buffer->data.sfstat[3]);
|
|
+
|
|
+ pr_info("%s: controller_status = %x\n", __func__,
|
|
+ controller_status);
|
|
+ pr_info("%s: interrupt_status = %x\n", __func__,
|
|
+ interrupt_status);
|
|
+ pr_info("%s: write_prot_status = %x\n", __func__,
|
|
+ write_prot_status);
|
|
+#endif
|
|
+ /* Check for errors, protection violations etc */
|
|
+ if ((controller_status != 0)
|
|
+ || (dma_buffer->data.sfstat[0] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[1] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[2] & 0x110)
|
|
+ || (dma_buffer->data.sfstat[3] & 0x110)) {
|
|
+ pr_err("%s: ECC/MPU/OP error\n", __func__);
|
|
+ err = -EIO;
|
|
+ }
|
|
+
|
|
+ if (!(write_prot_status & ONENAND_WP_LS)) {
|
|
+ pr_err("%s: Unexpected status ofs = 0x%llx,"
|
|
+ "wp_status = %x\n",
|
|
+ __func__, ofs, write_prot_status);
|
|
+ err = -EIO;
|
|
+ }
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+
|
|
+#if VERBOSE
|
|
+ pr_info("\n%s: ret %d\n", __func__, err);
|
|
+ pr_info("===================================================="
|
|
+ "=============\n");
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_onenand_suspend(struct mtd_info *mtd)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void msm_onenand_resume(struct mtd_info *mtd)
|
|
+{
|
|
+}
|
|
+
|
|
+int msm_onenand_scan(struct mtd_info *mtd, int maxchips)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ /* Probe and check whether onenand device is present */
|
|
+ if (flash_onenand_probe(chip))
|
|
+ return -ENODEV;
|
|
+
|
|
+ mtd->size = 0x1000000 << ((onenand_info.device_id & 0xF0) >> 4);
|
|
+ mtd->writesize = onenand_info.data_buf_size;
|
|
+ mtd->oobsize = mtd->writesize >> 5;
|
|
+ mtd->erasesize = mtd->writesize << 6;
|
|
+ mtd->oobavail = msm_onenand_oob_64.oobavail;
|
|
+ mtd->ecclayout = &msm_onenand_oob_64;
|
|
+
|
|
+ mtd->type = MTD_NANDFLASH;
|
|
+ mtd->flags = MTD_CAP_NANDFLASH;
|
|
+ mtd->_erase = msm_onenand_erase;
|
|
+ mtd->_point = NULL;
|
|
+ mtd->_unpoint = NULL;
|
|
+ mtd->_read = msm_onenand_read;
|
|
+ mtd->_write = msm_onenand_write;
|
|
+ mtd->_read_oob = msm_onenand_read_oob;
|
|
+ mtd->_write_oob = msm_onenand_write_oob;
|
|
+ mtd->_lock = msm_onenand_lock;
|
|
+ mtd->_unlock = msm_onenand_unlock;
|
|
+ mtd->_suspend = msm_onenand_suspend;
|
|
+ mtd->_resume = msm_onenand_resume;
|
|
+ mtd->_block_isbad = msm_onenand_block_isbad;
|
|
+ mtd->_block_markbad = msm_onenand_block_markbad;
|
|
+ mtd->owner = THIS_MODULE;
|
|
+
|
|
+ pr_info("Found a supported onenand device\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const unsigned int bch_sup_cntrl[] = {
|
|
+ 0x307, /* MSM7x2xA */
|
|
+ 0x4030, /* MDM 9x15 */
|
|
+};
|
|
+
|
|
+static inline bool msm_nand_has_bch_ecc_engine(unsigned int hw_id)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(bch_sup_cntrl); i++) {
|
|
+ if (hw_id == bch_sup_cntrl[i])
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
|
|
+ * @param mtd MTD device structure
|
|
+ * @param maxchips Number of chips to scan for
|
|
+ *
|
|
+ * This fills out all the not initialized function pointers
|
|
+ * with the defaults.
|
|
+ * The flash ID is read and the mtd/chip structures are
|
|
+ * filled with the appropriate values.
|
|
+ */
|
|
+int msm_nand_scan(struct mtd_info *mtd, int maxchips)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+ uint32_t flash_id = 0, i, mtd_writesize;
|
|
+ uint8_t dev_found = 0;
|
|
+ uint8_t wide_bus;
|
|
+ uint32_t manid;
|
|
+ uint32_t devid;
|
|
+ uint32_t devcfg;
|
|
+ struct nand_flash_dev *flashdev = NULL;
|
|
+ struct nand_manufacturers *flashman = NULL;
|
|
+ unsigned int hw_id;
|
|
+
|
|
+ /*
|
|
+ * Some Spansion parts, like the S34MS04G2, requires that the
|
|
+ * NAND Flash be reset before issuing an ONFI probe.
|
|
+ */
|
|
+ flash_reset(chip);
|
|
+
|
|
+ /* Probe the Flash device for ONFI compliance */
|
|
+ if (!flash_onfi_probe(chip)) {
|
|
+ dev_found = 1;
|
|
+ } else {
|
|
+ /* Read the Flash ID from the Nand Flash Device */
|
|
+ flash_id = flash_read_id(chip);
|
|
+ manid = flash_id & 0xFF;
|
|
+ devid = (flash_id >> 8) & 0xFF;
|
|
+ devcfg = (flash_id >> 24) & 0xFF;
|
|
+
|
|
+ for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
|
|
+ if (nand_manuf_ids[i].id == manid)
|
|
+ flashman = &nand_manuf_ids[i];
|
|
+ for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
|
|
+ if (nand_flash_ids[i].id == devid)
|
|
+ flashdev = &nand_flash_ids[i];
|
|
+ if (!flashdev || !flashman) {
|
|
+ pr_err("ERROR: unknown nand device manuf=%x devid=%x\n",
|
|
+ manid, devid);
|
|
+ return -ENOENT;
|
|
+ } else
|
|
+ dev_found = 1;
|
|
+
|
|
+ if (!flashdev->pagesize) {
|
|
+ supported_flash.flash_id = flash_id;
|
|
+ supported_flash.density = flashdev->chipsize << 20;
|
|
+ supported_flash.widebus = devcfg & (1 << 6) ? 1 : 0;
|
|
+ supported_flash.pagesize = 1024 << (devcfg & 0x3);
|
|
+ supported_flash.blksize = (64 * 1024) <<
|
|
+ ((devcfg >> 4) & 0x3);
|
|
+ supported_flash.oobsize = (8 << ((devcfg >> 2) & 0x3)) *
|
|
+ (supported_flash.pagesize >> 9);
|
|
+
|
|
+ if ((supported_flash.oobsize > 64) &&
|
|
+ (supported_flash.pagesize == 2048)) {
|
|
+ pr_info("msm_nand: Found a 2K page device with"
|
|
+ " %d oobsize - changing oobsize to 64 "
|
|
+ "bytes.\n", supported_flash.oobsize);
|
|
+ supported_flash.oobsize = 64;
|
|
+ }
|
|
+ } else {
|
|
+ supported_flash.flash_id = flash_id;
|
|
+ supported_flash.density = flashdev->chipsize << 20;
|
|
+ supported_flash.widebus = flashdev->options &
|
|
+ NAND_BUSWIDTH_16 ? 1 : 0;
|
|
+ supported_flash.pagesize = flashdev->pagesize;
|
|
+ supported_flash.blksize = flashdev->erasesize;
|
|
+ supported_flash.oobsize = flashdev->pagesize >> 5;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (dev_found) {
|
|
+ (!interleave_enable) ? (i = 1) : (i = 2);
|
|
+ wide_bus = supported_flash.widebus;
|
|
+ mtd->size = supported_flash.density * i;
|
|
+ mtd->writesize = supported_flash.pagesize * i;
|
|
+ mtd->oobsize = supported_flash.oobsize * i;
|
|
+ mtd->erasesize = supported_flash.blksize * i;
|
|
+ mtd->writebufsize = mtd->writesize;
|
|
+
|
|
+ if (!interleave_enable)
|
|
+ mtd_writesize = mtd->writesize;
|
|
+ else
|
|
+ mtd_writesize = mtd->writesize >> 1;
|
|
+
|
|
+ /* Check whether controller and NAND device support 8bit ECC*/
|
|
+ hw_id = flash_rd_reg(chip, MSM_NAND_HW_INFO);
|
|
+ if (msm_nand_has_bch_ecc_engine(hw_id)
|
|
+ && (supported_flash.ecc_correctability >= 8)) {
|
|
+ pr_info("Found supported NAND device for %dbit ECC\n",
|
|
+ supported_flash.ecc_correctability);
|
|
+ enable_bch_ecc = 1;
|
|
+ } else {
|
|
+ pr_info("Found a supported NAND device\n");
|
|
+ }
|
|
+ pr_info("NAND Controller ID : 0x%x\n", hw_id);
|
|
+ pr_info("NAND Device ID : 0x%x\n", supported_flash.flash_id);
|
|
+ pr_info("Buswidth : %d Bits\n", (wide_bus) ? 16 : 8);
|
|
+ pr_info("Density : %lld MByte\n", (mtd->size>>20));
|
|
+ pr_info("Pagesize : %d Bytes\n", mtd->writesize);
|
|
+ pr_info("Erasesize: %d Bytes\n", mtd->erasesize);
|
|
+ pr_info("Oobsize : %d Bytes\n", mtd->oobsize);
|
|
+ } else {
|
|
+ pr_err("Unsupported Nand,Id: 0x%x \n", flash_id);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ /* Size of each codeword is 532Bytes incase of 8bit BCH ECC*/
|
|
+ chip->cw_size = enable_bch_ecc ? 532 : 528;
|
|
+ chip->CFG0 = (((mtd_writesize >> 9)-1) << 6) /* 4/8 cw/pg for 2/4k */
|
|
+ | (516 << 9) /* 516 user data bytes */
|
|
+ | (10 << 19) /* 10 parity bytes */
|
|
+ | (5 << 27) /* 5 address cycles */
|
|
+ | (0 << 30) /* Do not read status before data */
|
|
+ | (1 << 31) /* Send read cmd */
|
|
+ /* 0 spare bytes for 16 bit nand or 1/2 spare bytes for 8 bit */
|
|
+ | (wide_bus ? 0 << 23 : (enable_bch_ecc ? 2 << 23 : 1 << 23));
|
|
+
|
|
+ chip->CFG1 = (0 << 0) /* Enable ecc */
|
|
+ | (7 << 2) /* 8 recovery cycles */
|
|
+ | (0 << 5) /* Allow CS deassertion */
|
|
+ /* Bad block marker location */
|
|
+ | ((mtd_writesize - (chip->cw_size * (
|
|
+ (mtd_writesize >> 9) - 1)) + 1) << 6)
|
|
+ | (0 << 16) /* Bad block in user data area */
|
|
+ | (2 << 17) /* 6 cycle tWB/tRB */
|
|
+ | ((wide_bus) ? CFG1_WIDE_FLASH : 0); /* Wide flash bit */
|
|
+
|
|
+ chip->ecc_buf_cfg = 0x203;
|
|
+ chip->CFG0_RAW = 0xA80420C0;
|
|
+ chip->CFG1_RAW = 0x5045D;
|
|
+
|
|
+ if (enable_bch_ecc) {
|
|
+ chip->CFG1 |= (1 << 27); /* Enable BCH engine */
|
|
+ chip->ecc_bch_cfg = (0 << 0) /* Enable ECC*/
|
|
+ | (0 << 1) /* Enable/Disable SW reset of ECC engine */
|
|
+ | (1 << 4) /* 8bit ecc*/
|
|
+ | ((wide_bus) ? (14 << 8) : (13 << 8))/*parity bytes*/
|
|
+ | (516 << 16) /* 516 user data bytes */
|
|
+ | (1 << 30); /* Turn on ECC engine clocks always */
|
|
+ chip->CFG0_RAW = 0xA80428C0; /* CW size is increased to 532B */
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * For 4bit RS ECC (default ECC), parity bytes = 10 (for x8 and x16 I/O)
|
|
+ * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
|
|
+ */
|
|
+ chip->ecc_parity_bytes = enable_bch_ecc ? (wide_bus ? 14 : 13) : 10;
|
|
+
|
|
+ pr_info("CFG0 Init : 0x%08x\n", chip->CFG0);
|
|
+ pr_info("CFG1 Init : 0x%08x\n", chip->CFG1);
|
|
+ pr_info("ECCBUFCFG : 0x%08x\n", chip->ecc_buf_cfg);
|
|
+
|
|
+ if (mtd->oobsize == 64) {
|
|
+ mtd->oobavail = msm_nand_oob_64.oobavail;
|
|
+ mtd->ecclayout = &msm_nand_oob_64;
|
|
+ } else if (mtd->oobsize == 128) {
|
|
+ mtd->oobavail = msm_nand_oob_128.oobavail;
|
|
+ mtd->ecclayout = &msm_nand_oob_128;
|
|
+ } else if (mtd->oobsize == 224) {
|
|
+ mtd->oobavail = wide_bus ? msm_nand_oob_224_x16.oobavail :
|
|
+ msm_nand_oob_224_x8.oobavail;
|
|
+ mtd->ecclayout = wide_bus ? &msm_nand_oob_224_x16 :
|
|
+ &msm_nand_oob_224_x8;
|
|
+ } else if (mtd->oobsize == 256) {
|
|
+ mtd->oobavail = msm_nand_oob_256.oobavail;
|
|
+ mtd->ecclayout = &msm_nand_oob_256;
|
|
+ } else {
|
|
+ pr_err("Unsupported Nand, oobsize: 0x%x \n",
|
|
+ mtd->oobsize);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ /* Fill in remaining MTD driver data */
|
|
+ mtd->type = MTD_NANDFLASH;
|
|
+ mtd->flags = MTD_CAP_NANDFLASH;
|
|
+ /* mtd->ecctype = MTD_ECC_SW; */
|
|
+ mtd->_erase = msm_nand_erase;
|
|
+ mtd->_block_isbad = msm_nand_block_isbad;
|
|
+ mtd->_block_markbad = msm_nand_block_markbad;
|
|
+ mtd->_point = NULL;
|
|
+ mtd->_unpoint = NULL;
|
|
+ mtd->_read = msm_nand_read;
|
|
+ mtd->_write = msm_nand_write;
|
|
+ mtd->_read_oob = msm_nand_read_oob;
|
|
+ mtd->_write_oob = msm_nand_write_oob;
|
|
+ if (dual_nand_ctlr_present) {
|
|
+ mtd->_read_oob = msm_nand_read_oob_dualnandc;
|
|
+ mtd->_write_oob = msm_nand_write_oob_dualnandc;
|
|
+ if (interleave_enable) {
|
|
+ mtd->_erase = msm_nand_erase_dualnandc;
|
|
+ mtd->_block_isbad = msm_nand_block_isbad_dualnandc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* mtd->sync = msm_nand_sync; */
|
|
+ mtd->_lock = NULL;
|
|
+ /* mtd->_unlock = msm_nand_unlock; */
|
|
+ mtd->_suspend = msm_nand_suspend;
|
|
+ mtd->_resume = msm_nand_resume;
|
|
+ mtd->owner = THIS_MODULE;
|
|
+
|
|
+ /* Unlock whole block */
|
|
+ /* msm_nand_unlock_all(mtd); */
|
|
+
|
|
+ /* return this->scan_bbt(mtd); */
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(msm_nand_scan);
|
|
+
|
|
+/**
|
|
+ * msm_nand_release - [msm_nand Interface] Free resources held by the msm_nand device
|
|
+ * @param mtd MTD device structure
|
|
+ */
|
|
+void msm_nand_release(struct mtd_info *mtd)
|
|
+{
|
|
+ /* struct msm_nand_chip *this = mtd->priv; */
|
|
+
|
|
+ /* Deregister the device */
|
|
+ mtd_device_unregister(mtd);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(msm_nand_release);
|
|
+
|
|
+struct msm_nand_info {
|
|
+ struct mtd_info mtd;
|
|
+ struct mtd_partition *parts;
|
|
+ struct msm_nand_chip msm_nand;
|
|
+};
|
|
+
|
|
+/* duplicating the NC01 XFR contents to NC10 */
|
|
+static int msm_nand_nc10_xfr_settings(struct mtd_info *mtd)
|
|
+{
|
|
+ struct msm_nand_chip *chip = mtd->priv;
|
|
+
|
|
+ struct {
|
|
+ dmov_s cmd[2];
|
|
+ unsigned cmdptr;
|
|
+ } *dma_buffer;
|
|
+ dmov_s *cmd;
|
|
+
|
|
+ wait_event(chip->wait_queue,
|
|
+ (dma_buffer = msm_nand_get_dma_buffer(
|
|
+ chip, sizeof(*dma_buffer))));
|
|
+
|
|
+ cmd = dma_buffer->cmd;
|
|
+
|
|
+ /* Copying XFR register contents from NC01 --> NC10 */
|
|
+ cmd->cmd = 0;
|
|
+ cmd->src = NC01(MSM_NAND_XFR_STEP1);
|
|
+ cmd->dst = NC10(MSM_NAND_XFR_STEP1);
|
|
+ cmd->len = 28;
|
|
+ cmd++;
|
|
+
|
|
+ BUILD_BUG_ON(2 != ARRAY_SIZE(dma_buffer->cmd));
|
|
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
|
|
+ dma_buffer->cmd[0].cmd |= CMD_OCB;
|
|
+ cmd[-1].cmd |= CMD_OCU | CMD_LC;
|
|
+ dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
|
|
+ | CMD_PTR_LP;
|
|
+
|
|
+ mb();
|
|
+ msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
|
|
+ | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
|
|
+ &dma_buffer->cmdptr)));
|
|
+ mb();
|
|
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t boot_layout_show(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ return sprintf(buf, "%d\n", boot_layout);
|
|
+}
|
|
+
|
|
+static ssize_t boot_layout_store(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t n)
|
|
+{
|
|
+ struct msm_nand_info *info = dev_get_drvdata(dev);
|
|
+ struct msm_nand_chip *chip = info->mtd.priv;
|
|
+ unsigned int ud_size;
|
|
+ unsigned int spare_size;
|
|
+ unsigned int ecc_num_data_bytes;
|
|
+
|
|
+ sscanf(buf, "%d", &boot_layout);
|
|
+
|
|
+ ud_size = boot_layout? 512: 516;
|
|
+ spare_size = boot_layout? (chip->cw_size -
|
|
+ (chip->ecc_parity_bytes+ 1+ ud_size)):
|
|
+ (enable_bch_ecc ? 2 : 1);
|
|
+ ecc_num_data_bytes = boot_layout? 512: 516;
|
|
+
|
|
+ chip->CFG0 = (chip->CFG0 & ~SPARE_SIZE_BYTES_MASK);
|
|
+ chip->CFG0 |= (spare_size << 23);
|
|
+
|
|
+ chip->CFG0 = (chip->CFG0 & ~UD_SIZE_BYTES_MASK);
|
|
+ chip->CFG0 |= (ud_size << 9);
|
|
+
|
|
+ chip->ecc_buf_cfg = (chip->ecc_buf_cfg & ~ECC_NUM_DATA_BYTES_MASK)
|
|
+ | (ecc_num_data_bytes << 16);
|
|
+
|
|
+ return n;
|
|
+}
|
|
+
|
|
+static const DEVICE_ATTR(boot_layout, 0644, boot_layout_show, boot_layout_store);
|
|
+
|
|
+static int msm_nand_probe(struct platform_device *pdev)
|
|
+
|
|
+{
|
|
+ struct msm_nand_info *info;
|
|
+ struct resource *res;
|
|
+ int err;
|
|
+ struct mtd_part_parser_data ppdata = {};
|
|
+
|
|
+
|
|
+ res = platform_get_resource(pdev,
|
|
+ IORESOURCE_MEM, 0);
|
|
+ if (!res || !res->start) {
|
|
+ pr_err("%s: msm_nand_phys resource invalid/absent\n",
|
|
+ __func__);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ msm_nand_phys = res->start;
|
|
+
|
|
+ info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info), GFP_KERNEL);
|
|
+ if (!info) {
|
|
+ pr_err("%s: No memory for msm_nand_info\n", __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ info->msm_nand.dev = &pdev->dev;
|
|
+
|
|
+ init_waitqueue_head(&info->msm_nand.wait_queue);
|
|
+
|
|
+ info->msm_nand.dma_channel = 3;
|
|
+ pr_info("%s: dmac 0x%x\n", __func__, info->msm_nand.dma_channel);
|
|
+
|
|
+ /* this currently fails if dev is passed in */
|
|
+ info->msm_nand.dma_buffer =
|
|
+ dma_alloc_coherent(/*dev*/ NULL, MSM_NAND_DMA_BUFFER_SIZE,
|
|
+ &info->msm_nand.dma_addr, GFP_KERNEL);
|
|
+ if (info->msm_nand.dma_buffer == NULL) {
|
|
+ pr_err("%s: No memory for msm_nand.dma_buffer\n", __func__);
|
|
+ err = -ENOMEM;
|
|
+ goto out_free_info;
|
|
+ }
|
|
+
|
|
+ pr_info("%s: allocated dma buffer at %p, dma_addr %x\n",
|
|
+ __func__, info->msm_nand.dma_buffer, info->msm_nand.dma_addr);
|
|
+
|
|
+ /* Let default be VERSION_1 for backward compatibility */
|
|
+ info->msm_nand.uncorrectable_bit_mask = BIT(8);
|
|
+ info->msm_nand.num_err_mask = 0x1F;
|
|
+
|
|
+ info->mtd.name = dev_name(&pdev->dev);
|
|
+ info->mtd.priv = &info->msm_nand;
|
|
+ info->mtd.owner = THIS_MODULE;
|
|
+
|
|
+ /* config ebi2_cfg register only for ping pong mode!!! */
|
|
+ if (!interleave_enable && dual_nand_ctlr_present)
|
|
+ flash_wr_reg(&info->msm_nand, EBI2_CFG_REG, 0x4010080);
|
|
+
|
|
+ if (dual_nand_ctlr_present)
|
|
+ msm_nand_nc10_xfr_settings(&info->mtd);
|
|
+
|
|
+ if (msm_nand_scan(&info->mtd, 1))
|
|
+ if (msm_onenand_scan(&info->mtd, 1)) {
|
|
+ pr_err("%s: No nand device found\n", __func__);
|
|
+ err = -ENXIO;
|
|
+ goto out_free_dma_buffer;
|
|
+ }
|
|
+
|
|
+ flash_wr_reg(&info->msm_nand, MSM_NAND_DEV_CMD_VLD,
|
|
+ DEV_CMD_VLD_SEQ_READ_START_VLD |
|
|
+ DEV_CMD_VLD_ERASE_START_VLD |
|
|
+ DEV_CMD_VLD_WRITE_START_VLD |
|
|
+ DEV_CMD_VLD_READ_START_VLD);
|
|
+
|
|
+ ppdata.of_node = pdev->dev.of_node;
|
|
+ err = mtd_device_parse_register(&info->mtd, NULL, &ppdata, NULL, 0);
|
|
+
|
|
+ if (err < 0) {
|
|
+ pr_err("%s: mtd_device_parse_register failed with err=%d\n",
|
|
+ __func__, err);
|
|
+ goto out_free_dma_buffer;
|
|
+ }
|
|
+
|
|
+ err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_boot_layout.attr);
|
|
+ if (err)
|
|
+ goto out_free_dma_buffer;
|
|
+
|
|
+ dev_set_drvdata(&pdev->dev, info);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out_free_dma_buffer:
|
|
+ dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
|
|
+ info->msm_nand.dma_buffer,
|
|
+ info->msm_nand.dma_addr);
|
|
+out_free_info:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int msm_nand_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
|
|
+
|
|
+ dev_set_drvdata(&pdev->dev, NULL);
|
|
+
|
|
+ if (info) {
|
|
+ msm_nand_release(&info->mtd);
|
|
+ dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
|
|
+ info->msm_nand.dma_buffer,
|
|
+ info->msm_nand.dma_addr);
|
|
+ }
|
|
+
|
|
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_boot_layout.attr);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
+static const struct of_device_id msm_nand_of_match[] = {
|
|
+ { .compatible = "qcom,qcom_nand", },
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, msm_nand_of_match);
|
|
+#endif
|
|
+
|
|
+
|
|
+static struct platform_driver msm_nand_driver = {
|
|
+ .probe = msm_nand_probe,
|
|
+ .remove = msm_nand_remove,
|
|
+ .driver = {
|
|
+ .name = "qcom_nand",
|
|
+ .owner = THIS_MODULE,
|
|
+ .of_match_table = msm_nand_of_match,
|
|
+ }
|
|
+};
|
|
+
|
|
+
|
|
+module_platform_driver(msm_nand_driver);
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_DESCRIPTION("msm_nand flash driver code");
|
|
--- /dev/null
|
|
+++ b/drivers/mtd/nand/qcom_nand.h
|
|
@@ -0,0 +1,196 @@
|
|
+/* drivers/mtd/devices/msm_nand.h
|
|
+ *
|
|
+ * Copyright (c) 2008-2011, The Linux Foundation. All rights reserved.
|
|
+ * Copyright (C) 2007 Google, Inc.
|
|
+ *
|
|
+ * This software is licensed under the terms of the GNU General Public
|
|
+ * License version 2, as published by the Free Software Foundation, and
|
|
+ * may be copied, distributed, and modified under those terms.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef __DRIVERS_MTD_DEVICES_MSM_NAND_H
|
|
+#define __DRIVERS_MTD_DEVICES_MSM_NAND_H
|
|
+
|
|
+extern unsigned long msm_nand_phys;
|
|
+extern unsigned long msm_nandc01_phys;
|
|
+extern unsigned long msm_nandc10_phys;
|
|
+extern unsigned long msm_nandc11_phys;
|
|
+extern unsigned long ebi2_register_base;
|
|
+
|
|
+#define NC01(X) ((X) + msm_nandc01_phys - msm_nand_phys)
|
|
+#define NC10(X) ((X) + msm_nandc10_phys - msm_nand_phys)
|
|
+#define NC11(X) ((X) + msm_nandc11_phys - msm_nand_phys)
|
|
+
|
|
+#define MSM_NAND_REG(off) (msm_nand_phys + (off))
|
|
+
|
|
+#define MSM_NAND_FLASH_CMD MSM_NAND_REG(0x0000)
|
|
+#define MSM_NAND_ADDR0 MSM_NAND_REG(0x0004)
|
|
+#define MSM_NAND_ADDR1 MSM_NAND_REG(0x0008)
|
|
+#define MSM_NAND_FLASH_CHIP_SELECT MSM_NAND_REG(0x000C)
|
|
+#define MSM_NAND_EXEC_CMD MSM_NAND_REG(0x0010)
|
|
+#define MSM_NAND_FLASH_STATUS MSM_NAND_REG(0x0014)
|
|
+#define MSM_NAND_BUFFER_STATUS MSM_NAND_REG(0x0018)
|
|
+#define MSM_NAND_SFLASHC_STATUS MSM_NAND_REG(0x001C)
|
|
+#define MSM_NAND_DEV0_CFG0 MSM_NAND_REG(0x0020)
|
|
+#define MSM_NAND_DEV0_CFG1 MSM_NAND_REG(0x0024)
|
|
+#define MSM_NAND_DEV0_ECC_CFG MSM_NAND_REG(0x0028)
|
|
+#define MSM_NAND_DEV1_ECC_CFG MSM_NAND_REG(0x002C)
|
|
+#define MSM_NAND_DEV1_CFG0 MSM_NAND_REG(0x0030)
|
|
+#define MSM_NAND_DEV1_CFG1 MSM_NAND_REG(0x0034)
|
|
+#define MSM_NAND_SFLASHC_CMD MSM_NAND_REG(0x0038)
|
|
+#define MSM_NAND_SFLASHC_EXEC_CMD MSM_NAND_REG(0x003C)
|
|
+#define MSM_NAND_READ_ID MSM_NAND_REG(0x0040)
|
|
+#define MSM_NAND_READ_STATUS MSM_NAND_REG(0x0044)
|
|
+#define MSM_NAND_CONFIG_DATA MSM_NAND_REG(0x0050)
|
|
+#define MSM_NAND_CONFIG MSM_NAND_REG(0x0054)
|
|
+#define MSM_NAND_CONFIG_MODE MSM_NAND_REG(0x0058)
|
|
+#define MSM_NAND_CONFIG_STATUS MSM_NAND_REG(0x0060)
|
|
+#define MSM_NAND_MACRO1_REG MSM_NAND_REG(0x0064)
|
|
+#define MSM_NAND_XFR_STEP1 MSM_NAND_REG(0x0070)
|
|
+#define MSM_NAND_XFR_STEP2 MSM_NAND_REG(0x0074)
|
|
+#define MSM_NAND_XFR_STEP3 MSM_NAND_REG(0x0078)
|
|
+#define MSM_NAND_XFR_STEP4 MSM_NAND_REG(0x007C)
|
|
+#define MSM_NAND_XFR_STEP5 MSM_NAND_REG(0x0080)
|
|
+#define MSM_NAND_XFR_STEP6 MSM_NAND_REG(0x0084)
|
|
+#define MSM_NAND_XFR_STEP7 MSM_NAND_REG(0x0088)
|
|
+#define MSM_NAND_GENP_REG0 MSM_NAND_REG(0x0090)
|
|
+#define MSM_NAND_GENP_REG1 MSM_NAND_REG(0x0094)
|
|
+#define MSM_NAND_GENP_REG2 MSM_NAND_REG(0x0098)
|
|
+#define MSM_NAND_GENP_REG3 MSM_NAND_REG(0x009C)
|
|
+#define MSM_NAND_DEV_CMD0 MSM_NAND_REG(0x00A0)
|
|
+#define MSM_NAND_DEV_CMD1 MSM_NAND_REG(0x00A4)
|
|
+#define MSM_NAND_DEV_CMD2 MSM_NAND_REG(0x00A8)
|
|
+#define MSM_NAND_DEV_CMD_VLD MSM_NAND_REG(0x00AC)
|
|
+#define DEV_CMD_VLD_SEQ_READ_START_VLD 0x10
|
|
+#define DEV_CMD_VLD_ERASE_START_VLD 0x8
|
|
+#define DEV_CMD_VLD_WRITE_START_VLD 0x4
|
|
+#define DEV_CMD_VLD_READ_STOP_VLD 0x2
|
|
+#define DEV_CMD_VLD_READ_START_VLD 0x1
|
|
+
|
|
+#define MSM_NAND_EBI2_MISR_SIG_REG MSM_NAND_REG(0x00B0)
|
|
+#define MSM_NAND_ADDR2 MSM_NAND_REG(0x00C0)
|
|
+#define MSM_NAND_ADDR3 MSM_NAND_REG(0x00C4)
|
|
+#define MSM_NAND_ADDR4 MSM_NAND_REG(0x00C8)
|
|
+#define MSM_NAND_ADDR5 MSM_NAND_REG(0x00CC)
|
|
+#define MSM_NAND_DEV_CMD3 MSM_NAND_REG(0x00D0)
|
|
+#define MSM_NAND_DEV_CMD4 MSM_NAND_REG(0x00D4)
|
|
+#define MSM_NAND_DEV_CMD5 MSM_NAND_REG(0x00D8)
|
|
+#define MSM_NAND_DEV_CMD6 MSM_NAND_REG(0x00DC)
|
|
+#define MSM_NAND_SFLASHC_BURST_CFG MSM_NAND_REG(0x00E0)
|
|
+#define MSM_NAND_ADDR6 MSM_NAND_REG(0x00E4)
|
|
+#define MSM_NAND_EBI2_ECC_BUF_CFG MSM_NAND_REG(0x00F0)
|
|
+#define MSM_NAND_HW_INFO MSM_NAND_REG(0x00FC)
|
|
+#define MSM_NAND_FLASH_BUFFER MSM_NAND_REG(0x0100)
|
|
+
|
|
+/* device commands */
|
|
+
|
|
+#define MSM_NAND_CMD_SOFT_RESET 0x01
|
|
+#define MSM_NAND_CMD_PAGE_READ 0x32
|
|
+#define MSM_NAND_CMD_PAGE_READ_ECC 0x33
|
|
+#define MSM_NAND_CMD_PAGE_READ_ALL 0x34
|
|
+#define MSM_NAND_CMD_SEQ_PAGE_READ 0x15
|
|
+#define MSM_NAND_CMD_PRG_PAGE 0x36
|
|
+#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
|
|
+#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
|
|
+#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
|
|
+#define MSM_NAND_CMD_FETCH_ID 0x0B
|
|
+#define MSM_NAND_CMD_STATUS 0x0C
|
|
+#define MSM_NAND_CMD_RESET 0x0D
|
|
+
|
|
+/* Sflash Commands */
|
|
+
|
|
+#define MSM_NAND_SFCMD_DATXS 0x0
|
|
+#define MSM_NAND_SFCMD_CMDXS 0x1
|
|
+#define MSM_NAND_SFCMD_BURST 0x0
|
|
+#define MSM_NAND_SFCMD_ASYNC 0x1
|
|
+#define MSM_NAND_SFCMD_ABORT 0x1
|
|
+#define MSM_NAND_SFCMD_REGRD 0x2
|
|
+#define MSM_NAND_SFCMD_REGWR 0x3
|
|
+#define MSM_NAND_SFCMD_INTLO 0x4
|
|
+#define MSM_NAND_SFCMD_INTHI 0x5
|
|
+#define MSM_NAND_SFCMD_DATRD 0x6
|
|
+#define MSM_NAND_SFCMD_DATWR 0x7
|
|
+
|
|
+#define SFLASH_PREPCMD(numxfr, offval, delval, trnstp, mode, opcode) \
|
|
+ ((numxfr<<20)|(offval<<12)|(delval<<6)|(trnstp<<5)|(mode<<4)|opcode)
|
|
+
|
|
+#define SFLASH_BCFG 0x20100327
|
|
+
|
|
+/* Onenand addresses */
|
|
+
|
|
+#define ONENAND_MANUFACTURER_ID 0xF000
|
|
+#define ONENAND_DEVICE_ID 0xF001
|
|
+#define ONENAND_VERSION_ID 0xF002
|
|
+#define ONENAND_DATA_BUFFER_SIZE 0xF003
|
|
+#define ONENAND_BOOT_BUFFER_SIZE 0xF004
|
|
+#define ONENAND_AMOUNT_OF_BUFFERS 0xF005
|
|
+#define ONENAND_TECHNOLOGY 0xF006
|
|
+#define ONENAND_START_ADDRESS_1 0xF100
|
|
+#define ONENAND_START_ADDRESS_2 0xF101
|
|
+#define ONENAND_START_ADDRESS_3 0xF102
|
|
+#define ONENAND_START_ADDRESS_4 0xF103
|
|
+#define ONENAND_START_ADDRESS_5 0xF104
|
|
+#define ONENAND_START_ADDRESS_6 0xF105
|
|
+#define ONENAND_START_ADDRESS_7 0xF106
|
|
+#define ONENAND_START_ADDRESS_8 0xF107
|
|
+#define ONENAND_START_BUFFER 0xF200
|
|
+#define ONENAND_COMMAND 0xF220
|
|
+#define ONENAND_SYSTEM_CONFIG_1 0xF221
|
|
+#define ONENAND_SYSTEM_CONFIG_2 0xF222
|
|
+#define ONENAND_CONTROLLER_STATUS 0xF240
|
|
+#define ONENAND_INTERRUPT_STATUS 0xF241
|
|
+#define ONENAND_START_BLOCK_ADDRESS 0xF24C
|
|
+#define ONENAND_WRITE_PROT_STATUS 0xF24E
|
|
+#define ONENAND_ECC_STATUS 0xFF00
|
|
+#define ONENAND_ECC_ERRPOS_MAIN0 0xFF01
|
|
+#define ONENAND_ECC_ERRPOS_SPARE0 0xFF02
|
|
+#define ONENAND_ECC_ERRPOS_MAIN1 0xFF03
|
|
+#define ONENAND_ECC_ERRPOS_SPARE1 0xFF04
|
|
+#define ONENAND_ECC_ERRPOS_MAIN2 0xFF05
|
|
+#define ONENAND_ECC_ERRPOS_SPARE2 0xFF06
|
|
+#define ONENAND_ECC_ERRPOS_MAIN3 0xFF07
|
|
+#define ONENAND_ECC_ERRPOS_SPARE3 0xFF08
|
|
+
|
|
+/* Onenand commands */
|
|
+#define ONENAND_WP_US (1 << 2)
|
|
+#define ONENAND_WP_LS (1 << 1)
|
|
+
|
|
+#define ONENAND_CMDLOAD 0x0000
|
|
+#define ONENAND_CMDLOADSPARE 0x0013
|
|
+#define ONENAND_CMDPROG 0x0080
|
|
+#define ONENAND_CMDPROGSPARE 0x001A
|
|
+#define ONENAND_CMDERAS 0x0094
|
|
+#define ONENAND_CMD_UNLOCK 0x0023
|
|
+#define ONENAND_CMD_LOCK 0x002A
|
|
+
|
|
+#define ONENAND_SYSCFG1_ECCENA(mode) (0x40E0 | (mode ? 0 : 0x8002))
|
|
+#define ONENAND_SYSCFG1_ECCDIS(mode) (0x41E0 | (mode ? 0 : 0x8002))
|
|
+
|
|
+#define ONENAND_CLRINTR 0x0000
|
|
+#define ONENAND_STARTADDR1_RES 0x07FF
|
|
+#define ONENAND_STARTADDR3_RES 0x07FF
|
|
+
|
|
+#define DATARAM0_0 0x8
|
|
+#define DEVICE_FLASHCORE_0 (0 << 15)
|
|
+#define DEVICE_FLASHCORE_1 (1 << 15)
|
|
+#define DEVICE_BUFFERRAM_0 (0 << 15)
|
|
+#define DEVICE_BUFFERRAM_1 (1 << 15)
|
|
+#define ONENAND_DEVICE_IS_DDP (1 << 3)
|
|
+
|
|
+#define CLEAN_DATA_16 0xFFFF
|
|
+#define CLEAN_DATA_32 0xFFFFFFFF
|
|
+
|
|
+#define EBI2_REG(off) (ebi2_register_base + (off))
|
|
+#define EBI2_CHIP_SELECT_CFG0 EBI2_REG(0x0000)
|
|
+#define EBI2_CFG_REG EBI2_REG(0x0004)
|
|
+#define EBI2_NAND_ADM_MUX EBI2_REG(0x005C)
|
|
+
|
|
+extern struct flash_platform_data msm_nand_data;
|
|
+
|
|
+#endif
|