2018-11-25 18:35:16 +00:00
|
|
|
From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001
|
|
|
|
From: John Crispin <blogic@openwrt.org>
|
|
|
|
Date: Tue, 9 Sep 2014 23:12:15 +0200
|
|
|
|
Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes
|
|
|
|
|
|
|
|
Signed-off-by: John Crispin <blogic@openwrt.org>
|
|
|
|
---
|
2018-11-25 19:06:39 +00:00
|
|
|
drivers/mtd/nand/raw/xway_nand.c | 63 ++++++++++++++++++++++++++++++++++++++++++
|
2018-11-25 18:35:16 +00:00
|
|
|
1 file changed, 63 insertions(+)
|
|
|
|
|
2018-11-25 19:06:39 +00:00
|
|
|
--- a/drivers/mtd/nand/raw/xway_nand.c
|
|
|
|
+++ b/drivers/mtd/nand/raw/xway_nand.c
|
2018-11-25 18:35:16 +00:00
|
|
|
@@ -63,6 +63,24 @@
|
|
|
|
#define NAND_CON_CSMUX (1 << 1)
|
|
|
|
#define NAND_CON_NANDM 1
|
|
|
|
|
|
|
|
+#define DANUBE_PCI_REG32( addr ) (*(volatile u32 *)(addr))
|
|
|
|
+#define PCI_CR_PR_OFFSET (KSEG1+0x1E105400)
|
|
|
|
+#define PCI_CR_PC_ARB (PCI_CR_PR_OFFSET + 0x0080)
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * req_mask provides a mechanism to prevent interference between
|
|
|
|
+ * nand and pci (probably only relevant for the BT Home Hub 2B).
|
|
|
|
+ * Setting it causes the corresponding pci req pins to be masked
|
|
|
|
+ * during nand access, and also moves ebu locking from the read/write
|
|
|
|
+ * functions to the chip select function to ensure that the whole
|
|
|
|
+ * operation runs with interrupts disabled.
|
|
|
|
+ * In addition it switches on some extra waiting in xway_cmd_ctrl().
|
|
|
|
+ * This seems to be necessary if the ebu_cs1 pin has open-drain disabled,
|
|
|
|
+ * which in turn seems to be necessary for the nor chip to be recognised
|
|
|
|
+ * reliably, on a board (Home Hub 2B again) which has both nor and nand.
|
|
|
|
+ */
|
|
|
|
+static __be32 req_mask = 0;
|
|
|
|
+
|
|
|
|
struct xway_nand_data {
|
|
|
|
struct nand_chip chip;
|
|
|
|
unsigned long csflags;
|
|
|
|
@@ -94,10 +112,22 @@ static void xway_select_chip(struct mtd_
|
|
|
|
case -1:
|
|
|
|
ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
|
|
|
|
ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
|
|
|
|
+
|
|
|
|
+ if (req_mask) {
|
|
|
|
+ /* Unmask all external PCI request */
|
|
|
|
+ DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
spin_unlock_irqrestore(&ebu_lock, data->csflags);
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
spin_lock_irqsave(&ebu_lock, data->csflags);
|
|
|
|
+
|
|
|
|
+ if (req_mask) {
|
|
|
|
+ /* Mask all external PCI request */
|
|
|
|
+ DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
|
|
|
|
ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
|
|
|
|
break;
|
|
|
|
@@ -108,6 +138,12 @@ static void xway_select_chip(struct mtd_
|
|
|
|
|
|
|
|
static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
|
|
|
|
{
|
|
|
|
+
|
|
|
|
+ if (req_mask) {
|
|
|
|
+ if (cmd != NAND_CMD_STATUS)
|
|
|
|
+ ltq_ebu_w32(0, EBU_NAND_WAIT); /* Clear nand ready */
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
if (cmd == NAND_CMD_NONE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
@@ -118,6 +154,24 @@ static void xway_cmd_ctrl(struct mtd_inf
|
|
|
|
|
|
|
|
while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
|
|
|
|
;
|
|
|
|
+
|
|
|
|
+ if (req_mask) {
|
|
|
|
+ /*
|
|
|
|
+ * program and erase have their own busy handlers
|
|
|
|
+ * status and sequential in needs no delay
|
|
|
|
+ */
|
|
|
|
+ switch (cmd) {
|
|
|
|
+ case NAND_CMD_ERASE1:
|
|
|
|
+ case NAND_CMD_SEQIN:
|
|
|
|
+ case NAND_CMD_STATUS:
|
|
|
|
+ case NAND_CMD_READID:
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* wait until command is processed */
|
|
|
|
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0)
|
|
|
|
+ ;
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xway_dev_ready(struct mtd_info *mtd)
|
|
|
|
@@ -157,6 +211,7 @@ static int xway_nand_probe(struct platfo
|
|
|
|
int err;
|
|
|
|
u32 cs;
|
|
|
|
u32 cs_flag = 0;
|
|
|
|
+ const __be32 *req_mask_ptr;
|
|
|
|
|
|
|
|
/* Allocate memory for the device structure (and zero it) */
|
|
|
|
data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
|
|
|
|
@@ -192,6 +247,15 @@ static int xway_nand_probe(struct platfo
|
|
|
|
if (!err && cs == 1)
|
|
|
|
cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
|
|
|
|
|
|
|
|
+ req_mask_ptr = of_get_property(pdev->dev.of_node,
|
|
|
|
+ "req-mask", NULL);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Load the PCI req lines to mask from the device tree. If the
|
|
|
|
+ * property is not present, setting req_mask to 0 disables masking.
|
|
|
|
+ */
|
|
|
|
+ req_mask = (req_mask_ptr ? *req_mask_ptr : 0);
|
|
|
|
+
|
|
|
|
/* setup the EBU to run in NAND mode on our base addr */
|
|
|
|
ltq_ebu_w32(CPHYSADDR(data->nandaddr)
|
|
|
|
| ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
|