summaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2012-10-09 15:03:21 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-10-09 15:04:25 +0100
commitffe315012510165ce82e4dd4767f0a5dba9edbf7 (patch)
treef601cd980af9d0ced5ca9aedecef4fa0d2ca0e15 /drivers/mtd/nand
parente2d3a35ee427aaba99b6c68a56609ce276c51270 (diff)
parent4a8e43feeac7996b8de2d5b2823e316917493df4 (diff)
Merge tag 'disintegrate-mtd-20121009' of git://git.infradead.org/users/dhowells/linux-headers
UAPI Disintegration 2012-10-09 Conflicts: MAINTAINERS arch/arm/configs/bcmring_defconfig arch/arm/mach-imx/clk-imx51-imx53.c drivers/mtd/nand/Kconfig drivers/mtd/nand/bcm_umi_nand.c drivers/mtd/nand/nand_bcm_umi.h drivers/mtd/nand/orion_nand.c
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/ams-delta.c10
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c56
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nomadik_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c303
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c89
-rw-r--r--drivers/mtd/nand/s3c2410.c2
9 files changed, 367 insertions, 103 deletions
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2d73f2393586..9e7723aa7acc 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -23,11 +23,15 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-omap.h>
+
#include <asm/io.h>
-#include <mach/hardware.h>
#include <asm/sizes.h>
-#include <linux/gpio.h>
-#include <plat/board-ams-delta.h>
+
+#include <mach/board-ams-delta.h>
+
+#include <mach/hardware.h>
/*
* MTD structure for E3 (Delta)
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index df1ab7dc3440..945047ad0952 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -35,8 +35,8 @@
#include <linux/slab.h>
#include <linux/of_device.h>
-#include <mach/nand.h>
-#include <mach/aemif.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
/*
* This is a device driver for the NAND flash controller found on the
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 1be83dcc730a..3551a99076ba 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -31,6 +31,7 @@
#include <linux/mtd/nand_ecc.h>
#include <asm/fsl_ifc.h>
+#define FSL_IFC_V1_1_0 0x01010000
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
@@ -735,13 +736,62 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ /* Save CSOR and CSOR_ext */
+ csor = in_be32(&ifc->csor_cs[cs].csor);
+ csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ out_be32(&ifc->csor_cs[cs].csor, csor_8k);
+ out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
+
+ /* READID */
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.row3, 0x0);
+
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
+
+ /* Program ROW0/COL0 */
+ out_be32(&ifc->ifc_nand.row0, 0x0);
+ out_be32(&ifc->ifc_nand.col0, 0x0);
+
+ /* set the chip select for NAND Transaction */
+ out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
+
+ /* start read seq */
+ out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ IFC_TIMEOUT_MSECS * HZ/1000);
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
+
+ /* Restore CSOR and CSOR_ext */
+ out_be32(&ifc->csor_cs[cs].csor, csor);
+ out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
+}
+
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
struct nand_chip *chip = &priv->chip;
struct nand_ecclayout *layout;
- u32 csor;
+ u32 csor, ver;
/* Fill in fsl_ifc_mtd structure */
priv->mtd.priv = chip;
@@ -834,6 +884,10 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.mode = NAND_ECC_SOFT;
}
+ ver = in_be32(&ifc->ifc_rev);
+ if (ver == FSL_IFC_V1_1_0)
+ fsl_ifc_sram_init(priv);
+
return 0;
}
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 8ec7cc007dee..72e31d86030d 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -36,7 +36,7 @@
#include <linux/of_mtd.h>
#include <asm/mach/flash.h>
-#include <mach/mxc_nand.h>
+#include <linux/platform_data/mtd-mxc_nand.h>
#include <mach/hardware.h>
#define DRIVER_NAME "mxc_nand"
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a86aa812ca13..9ee0c4edfacf 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -31,7 +31,7 @@
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/nand.h>
+#include <linux/platform_data/mtd-nomadik-nand.h>
#include <mach/fsmc.h>
#include <mtd/mtd-abi.h>
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 9142005c3029..5b3138620646 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -29,7 +29,7 @@
#include <plat/dma.h>
#include <plat/gpmc.h>
-#include <plat/nand.h>
+#include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
#define OMAP_NAND_TIMEOUT_MS 5000
@@ -101,6 +101,16 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+#define PREFETCH_CONFIG1_CS_SHIFT 24
+#define ECC_CONFIG_CS_SHIFT 1
+#define CS_MASK 0x7
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE_SHIFT 2
+#define ECCSIZE1_SHIFT 22
+#define ECC1RESULTSIZE 0x1
+#define ECCCLEAR 0x100
+#define ECC1 0x1
+
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used
@@ -124,15 +134,18 @@ struct omap_nand_info {
int gpmc_cs;
unsigned long phys_base;
+ unsigned long mem_size;
struct completion comp;
struct dma_chan *dma;
- int gpmc_irq;
+ int gpmc_irq_fifo;
+ int gpmc_irq_count;
enum {
OMAP_NAND_IO_READ = 0, /* read */
OMAP_NAND_IO_WRITE, /* write */
} iomode;
u_char *buf;
int buf_len;
+ struct gpmc_nand_regs reg;
#ifdef CONFIG_MTD_NAND_OMAP_BCH
struct bch_control *bch;
@@ -141,6 +154,63 @@ struct omap_nand_info {
};
/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+ unsigned int u32_count, int is_write, struct omap_nand_info *info)
+{
+ u32 val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -1;
+
+ if (readl(info->reg.gpmc_prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
+ writel(val, info->reg.gpmc_prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(0x1, info->reg.gpmc_prefetch_control);
+
+ return 0;
+}
+
+/**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+{
+ u32 config1;
+
+ /* check if the same module/cs is trying to reset */
+ config1 = readl(info->reg.gpmc_prefetch_config1);
+ if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+ return -EINVAL;
+
+ /* Stop the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_control);
+
+ /* Reset/disable the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_config1);
+
+ return 0;
+}
+
+/**
* omap_hwcontrol - hardware specific access to control-lines
* @mtd: MTD device structure
* @cmd: command to device
@@ -158,13 +228,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
+ writeb(cmd, info->reg.gpmc_nand_command);
else if (ctrl & NAND_ALE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
+ writeb(cmd, info->reg.gpmc_nand_address);
else /* NAND_NCE */
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
+ writeb(cmd, info->reg.gpmc_nand_data);
}
}
@@ -198,7 +268,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
iowrite8(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ GPMC_STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -235,7 +306,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
iowrite16(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ GPMC_STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -265,8 +337,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -275,14 +347,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
omap_read_buf8(mtd, (u_char *)p, len);
} else {
do {
- r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ r_count = readl(info->reg.gpmc_prefetch_status);
+ r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
p += r_count;
len -= r_count << 2;
} while (len);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
@@ -301,6 +374,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
+ u32 val;
/* take care of subpage writes */
if (len % 2 != 0) {
@@ -310,8 +384,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -320,7 +394,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
omap_write_buf8(mtd, (u_char *)p, len);
} else {
while (len) {
- w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ w_count = readl(info->reg.gpmc_prefetch_status);
+ w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->nand.IO_ADDR_W);
@@ -329,11 +404,14 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
tim = 0;
limit = (loops_per_jiffy *
msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
@@ -365,6 +443,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
unsigned long tim, limit;
unsigned n;
int ret;
+ u32 val;
if (addr >= high_memory) {
struct page *p1;
@@ -396,9 +475,9 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
tx->callback_param = &info->comp;
dmaengine_submit(tx);
- /* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
@@ -410,11 +489,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
wait_for_completion(&info->comp);
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
@@ -471,13 +554,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
{
struct omap_nand_info *info = (struct omap_nand_info *) dev;
u32 bytes;
- u32 irq_stat;
- irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
- bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = readl(info->reg.gpmc_prefetch_status);
+ bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
if (info->buf_len && (info->buf_len < bytes))
@@ -494,20 +576,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
(u32 *)info->buf, bytes >> 2);
info->buf = info->buf + bytes;
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
}
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
return IRQ_HANDLED;
done:
complete(&info->comp);
- /* disable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
- /* clear status */
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+ disable_irq_nosync(info->gpmc_irq_fifo);
+ disable_irq_nosync(info->gpmc_irq_count);
return IRQ_HANDLED;
}
@@ -534,22 +613,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
init_completion(&info->comp);
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for read to complete */
wait_for_completion(&info->comp);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
@@ -572,6 +651,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
struct omap_nand_info, mtd);
int ret = 0;
unsigned long tim, limit;
+ u32 val;
if (len <= mtd->oobsize) {
omap_write_buf_pref(mtd, buf, len);
@@ -583,27 +663,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
init_completion(&info->comp);
/* configure and start prefetch transfer : size=24 */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for write to complete */
wait_for_completion(&info->comp);
+
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
cpu_relax();
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
@@ -822,7 +906,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+ if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+ val = readl(info->reg.gpmc_ecc1_result);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+ return 0;
}
/**
@@ -836,8 +933,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
mtd);
struct nand_chip *chip = mtd->priv;
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ u32 val;
+
+ /* clear ecc and enable bits */
+ val = ECCCLEAR | ECC1;
+ writel(val, info->reg.gpmc_ecc_control);
- gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
+ /* program ecc and result sizes */
+ val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+ ECC1RESULTSIZE);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+ break;
+ case NAND_ECC_READSYN:
+ writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+ break;
+ default:
+ dev_info(&info->pdev->dev,
+ "error: unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ writel(val, info->reg.gpmc_ecc_config);
}
/**
@@ -865,10 +988,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
else
timeo += (HZ * 20) / 1000;
- gpmc_nand_write(info->gpmc_cs,
- GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
+ writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
- status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
+ status = readb(info->reg.gpmc_nand_data);
if (status & NAND_STATUS_READY)
break;
cond_resched();
@@ -888,22 +1010,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ val = readl(info->reg.gpmc_status);
+
if ((val & 0x100) == 0x100) {
- /* Clear IRQ Interrupt */
- val |= 0x100;
- val &= ~(0x0);
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
+ return 1;
} else {
- unsigned int cnt = 0;
- while (cnt++ < 0x1FF) {
- if ((val & 0x100) == 0x100)
- return 0;
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
- }
+ return 0;
}
-
- return 1;
}
#ifdef CONFIG_MTD_NAND_OMAP_BCH
@@ -1134,6 +1247,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
int i, offset;
dma_cap_mask_t mask;
unsigned sig;
+ struct resource *res;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1153,7 +1267,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
info->gpmc_cs = pdata->cs;
- info->phys_base = pdata->phys_base;
+ info->reg = pdata->reg;
info->mtd.priv = &info->nand;
info->mtd.name = dev_name(&pdev->dev);
@@ -1162,16 +1276,23 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
- /* NAND write protect off */
- gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "error getting memory resource\n");
+ goto out_free_info;
+ }
- if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ info->phys_base = res->start;
+ info->mem_size = resource_size(res);
+
+ if (!request_mem_region(info->phys_base, info->mem_size,
pdev->dev.driver->name)) {
err = -EBUSY;
goto out_free_info;
}
- info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
if (!info->nand.IO_ADDR_R) {
err = -ENOMEM;
goto out_release_mem_region;
@@ -1244,17 +1365,39 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
break;
case NAND_OMAP_PREFETCH_IRQ:
- err = request_irq(pdata->gpmc_irq,
- omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+ if (info->gpmc_irq_fifo <= 0) {
+ dev_err(&pdev->dev, "error getting fifo irq\n");
+ err = -ENODEV;
+ goto out_release_mem_region;
+ }
+ err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
+ IRQF_SHARED, "gpmc-nand-fifo", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- pdata->gpmc_irq, err);
+ info->gpmc_irq_fifo, err);
+ info->gpmc_irq_fifo = 0;
+ goto out_release_mem_region;
+ }
+
+ info->gpmc_irq_count = platform_get_irq(pdev, 1);
+ if (info->gpmc_irq_count <= 0) {
+ dev_err(&pdev->dev, "error getting count irq\n");
+ err = -ENODEV;
+ goto out_release_mem_region;
+ }
+ err = request_irq(info->gpmc_irq_count, omap_nand_irq,
+ IRQF_SHARED, "gpmc-nand-count", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ info->gpmc_irq_count, err);
+ info->gpmc_irq_count = 0;
goto out_release_mem_region;
- } else {
- info->gpmc_irq = pdata->gpmc_irq;
- info->nand.read_buf = omap_read_buf_irq_pref;
- info->nand.write_buf = omap_write_buf_irq_pref;
}
+
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+
break;
default:
@@ -1340,7 +1483,11 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
out_release_mem_region:
if (info->dma)
dma_release_channel(info->dma);
- release_mem_region(info->phys_base, NAND_IO_SIZE);
+ if (info->gpmc_irq_count > 0)
+ free_irq(info->gpmc_irq_count, info);
+ if (info->gpmc_irq_fifo > 0)
+ free_irq(info->gpmc_irq_fifo, info);
+ release_mem_region(info->phys_base, info->mem_size);
out_free_info:
kfree(info);
@@ -1358,8 +1505,10 @@ static int omap_nand_remove(struct platform_device *pdev)
if (info->dma)
dma_release_channel(info->dma);
- if (info->gpmc_irq)
- free_irq(info->gpmc_irq, info);
+ if (info->gpmc_irq_count > 0)
+ free_irq(info->gpmc_irq_count, info);
+ if (info->gpmc_irq_fifo > 0)
+ free_irq(info->gpmc_irq_fifo, info);
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 9ee436d30932..aefaf8cd31ef 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,7 +21,7 @@
#include <linux/err.h>
#include <asm/io.h>
#include <asm/sizes.h>
-#include <plat/orion_nand.h>
+#include <linux/platform_data/mtd-orion_nand.h>
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5df91d554dac..37ee75c7bacb 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -22,9 +22,11 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <mach/dma.h>
-#include <plat/pxa3xx_nand.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
#define NAND_STOP_DELAY (2 * HZ/50)
@@ -1027,7 +1029,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
struct pxa3xx_nand_platform_data *pdata;
struct pxa3xx_nand_info *info;
struct pxa3xx_nand_host *host;
- struct nand_chip *chip;
+ struct nand_chip *chip = NULL;
struct mtd_info *mtd;
struct resource *r;
int ret, irq, cs;
@@ -1075,21 +1077,31 @@ static int alloc_nand_resource(struct platform_device *pdev)
}
clk_enable(info->clk);
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for data DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
- }
- info->drcmr_dat = r->start;
+ /*
+ * This is a dirty hack to make this driver work from devicetree
+ * bindings. It can be removed once we have a prober DMA controller
+ * framework for DT.
+ */
+ if (pdev->dev.of_node && cpu_is_pxa3xx()) {
+ info->drcmr_dat = 97;
+ info->drcmr_cmd = 99;
+ } else {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_dat = r->start;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for command DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for command DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_cmd = r->start;
}
- info->drcmr_cmd = r->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -1194,12 +1206,55 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id pxa3xx_nand_dt_ids[] = {
+ { .compatible = "marvell,pxa3xx-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
+
+static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+
+ if (!of_id)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
+ pdata->enable_arbiter = 1;
+ if (of_get_property(np, "marvell,nand-keep-config", NULL))
+ pdata->keep_config = 1;
+ of_property_read_u32(np, "num-cs", &pdata->num_cs);
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_part_parser_data ppdata = {};
struct pxa3xx_nand_info *info;
int ret, cs, probe_success;
+ ret = pxa3xx_nand_probe_dt(pdev);
+ if (ret)
+ return ret;
+
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
@@ -1223,8 +1278,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
continue;
}
+ ppdata.of_node = pdev->dev.of_node;
ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
- NULL, pdata->parts[cs],
+ &ppdata, pdata->parts[cs],
pdata->nr_parts[cs]);
if (!ret)
probe_success = 1;
@@ -1300,6 +1356,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
static struct platform_driver pxa3xx_nand_driver = {
.driver = {
.name = "pxa3xx-nand",
+ .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
},
.probe = pxa3xx_nand_probe,
.remove = pxa3xx_nand_remove,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 792cee846221..295e4bedad96 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -47,7 +47,7 @@
#include <linux/mtd/partitions.h>
#include <plat/regs-nand.h>
-#include <plat/nand.h>
+#include <linux/platform_data/mtd-nand-s3c2410.h>
/* new oob placement block for use with hardware ecc generation
*/