Merge tag 'v3.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux

Having missed the merge window, update to 3.6-rc2 to avoid conflicts with
new patches.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index e0aedb7..fe122d6 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -1216,8 +1216,6 @@
 #define	NAND_BBT_LASTBLOCK	0x00000010
 /* The bbt is at the given page, else we must scan for the bbt */
 #define NAND_BBT_ABSPAGE	0x00000020
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_SEARCH		0x00000040
 /* bbt is stored per chip on multichip devices */
 #define NAND_BBT_PERCHIP	0x00000080
 /* bbt has a version counter at offset veroffs */
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index a200695..d555421 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -3,7 +3,9 @@
 Required properties:
 - compatible : "atmel,at91rm9200-nand".
 - reg : should specify localbus address and size used for the chip,
-	and if availlable the ECC.
+	and hardware ECC controller if available.
+	If the hardware ECC is PMECC, it should contain address and size for
+	PMECC, PMECC Error Location controller and ROM which has lookup tables.
 - atmel,nand-addr-offset : offset for the address latch.
 - atmel,nand-cmd-offset : offset for the command latch.
 - #address-cells, #size-cells : Must be present if the device has sub-nodes
@@ -16,6 +18,15 @@
 - nand-ecc-mode : String, operation mode of the NAND ecc mode, soft by default.
   Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first",
   "soft_bch".
+- atmel,has-pmecc : boolean to enable Programmable Multibit ECC hardware.
+  Only supported by at91sam9x5 or later sam9 product.
+- atmel,pmecc-cap : error correct capability for Programmable Multibit ECC
+  Controller. Supported values are: 2, 4, 8, 12, 24.
+- atmel,pmecc-sector-size : sector size for ECC computation. Supported values
+  are: 512, 1024.
+- atmel,pmecc-lookup-table-offset : includes two offsets of lookup table in ROM
+  for different sector size. First one is for sector size 512, the next is for
+  sector size 1024.
 - nand-bus-width : 8 or 16 bus width if not present 8
 - nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
 
@@ -39,3 +50,30 @@
 		...
 	};
 };
+
+/* for PMECC supported chips */
+nand0: nand@40000000 {
+	compatible = "atmel,at91rm9200-nand";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	reg = < 0x40000000 0x10000000	/* bus addr & size */
+		0xffffe000 0x00000600	/* PMECC addr & size */
+		0xffffe600 0x00000200	/* PMECC ERRLOC addr & size */
+		0x00100000 0x00100000	/* ROM addr & size */
+		>;
+	atmel,nand-addr-offset = <21>;	/* ale */
+	atmel,nand-cmd-offset = <22>;	/* cle */
+	nand-on-flash-bbt;
+	nand-ecc-mode = "hw";
+	atmel,has-pmecc;	/* enable PMECC */
+	atmel,pmecc-cap = <2>;
+	atmel,pmecc-sector-size = <512>;
+	atmel,pmecc-lookup-table-offset = <0x8000 0x10000>;
+	gpios = <&pioD 5 0	/* rdy */
+		 &pioD 4 0	/* nce */
+		 0		/* cd */
+		>;
+	partition@0 {
+		...
+	};
+};
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
index 1a5bbd3..3fb3f901 100644
--- a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
@@ -12,6 +12,10 @@
   - interrupt-names : The interrupt names "gpmi-dma", "bch";
   - fsl,gpmi-dma-channel : Should contain the dma channel it uses.
 
+Optional properties:
+  - nand-on-flash-bbt: boolean to enable on flash bbt option if not
+                       present false
+
 The device tree may optionally contain sub-nodes describing partitions of the
 address space. See partition.txt for more detail.
 
diff --git a/Documentation/devicetree/bindings/mtd/lpc32xx-mlc.txt b/Documentation/devicetree/bindings/mtd/lpc32xx-mlc.txt
new file mode 100644
index 0000000..d0a3725
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/lpc32xx-mlc.txt
@@ -0,0 +1,50 @@
+NXP LPC32xx SoC NAND MLC controller
+
+Required properties:
+- compatible: "nxp,lpc3220-mlc"
+- reg: Address and size of the controller
+- interrupts: The NAND interrupt specification
+- gpios: GPIO specification for NAND write protect
+
+The following required properties are very controller specific. See the LPC32xx
+User Manual 7.5.14 MLC NAND Timing Register (the values here are specified in
+Hz, to make them independent of actual clock speed and to provide for good
+accuracy:)
+- nxp,tcea_delay: TCEA_DELAY
+- nxp,busy_delay: BUSY_DELAY
+- nxp,nand_ta: NAND_TA
+- nxp,rd_high: RD_HIGH
+- nxp,rd_low: RD_LOW
+- nxp,wr_high: WR_HIGH
+- nxp,wr_low: WR_LOW
+
+Optional subnodes:
+- Partitions, see Documentation/devicetree/bindings/mtd/partition.txt
+
+Example:
+
+	mlc: flash@200A8000 {
+		compatible = "nxp,lpc3220-mlc";
+		reg = <0x200A8000 0x11000>;
+		interrupts = <11 0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		nxp,tcea-delay = <333333333>;
+		nxp,busy-delay = <10000000>;
+		nxp,nand-ta = <18181818>;
+		nxp,rd-high = <31250000>;
+		nxp,rd-low = <45454545>;
+		nxp,wr-high = <40000000>;
+		nxp,wr-low = <83333333>;
+		gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
+
+		mtd0@00000000 {
+			label = "boot";
+			reg = <0x00000000 0x00064000>;
+			read-only;
+		};
+
+		...
+
+	};
diff --git a/Documentation/devicetree/bindings/mtd/lpc32xx-slc.txt b/Documentation/devicetree/bindings/mtd/lpc32xx-slc.txt
new file mode 100644
index 0000000..d94edc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/lpc32xx-slc.txt
@@ -0,0 +1,52 @@
+NXP LPC32xx SoC NAND SLC controller
+
+Required properties:
+- compatible: "nxp,lpc3220-slc"
+- reg: Address and size of the controller
+- nand-on-flash-bbt: Use bad block table on flash
+- gpios: GPIO specification for NAND write protect
+
+The following required properties are very controller specific. See the LPC32xx
+User Manual:
+- nxp,wdr-clks: Delay before Ready signal is tested on write (W_RDY)
+- nxp,rdr-clks: Delay before Ready signal is tested on read (R_RDY)
+(The following values are specified in Hz, to make them independent of actual
+clock speed:)
+- nxp,wwidth: Write pulse width (W_WIDTH)
+- nxp,whold: Write hold time (W_HOLD)
+- nxp,wsetup: Write setup time (W_SETUP)
+- nxp,rwidth: Read pulse width (R_WIDTH)
+- nxp,rhold: Read hold time (R_HOLD)
+- nxp,rsetup: Read setup time (R_SETUP)
+
+Optional subnodes:
+- Partitions, see Documentation/devicetree/bindings/mtd/partition.txt
+
+Example:
+
+	slc: flash@20020000 {
+		compatible = "nxp,lpc3220-slc";
+		reg = <0x20020000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		nxp,wdr-clks = <14>;
+		nxp,wwidth = <40000000>;
+		nxp,whold = <100000000>;
+		nxp,wsetup = <100000000>;
+		nxp,rdr-clks = <14>;
+		nxp,rwidth = <40000000>;
+		nxp,rhold = <66666666>;
+		nxp,rsetup = <100000000>;
+		nand-on-flash-bbt;
+		gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
+
+		mtd0@00000000 {
+			label = "phy3250-boot";
+			reg = <0x00000000 0x00064000>;
+			read-only;
+		};
+
+		...
+
+	};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index aba28dc..aaa0c0a 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -263,6 +263,13 @@
 				status = "disabled";
 			};
 
+			nand@83fdb000 {
+				compatible = "fsl,imx51-nand";
+				reg = <0x83fdb000 0x1000 0xcfff0000 0x10000>;
+				interrupts = <8>;
+				status = "disabled";
+			};
+
 			ssi3: ssi@83fe8000 {
 				compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
 				reg = <0x83fe8000 0x4000>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index cd37165..dc00c62 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -321,6 +321,13 @@
 				status = "disabled";
 			};
 
+			nand@63fdb000 {
+				compatible = "fsl,imx53-nand";
+				reg = <0x63fdb000 0x1000 0xf7ff0000 0x10000>;
+				interrupts = <8>;
+				status = "disabled";
+			};
+
 			ssi3: ssi@63fe8000 {
 				compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
 				reg = <0x63fe8000 0x4000>;
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index 4bdcaa9..e81f17a 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -367,6 +367,7 @@
 	clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi");
 	clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi");
 	clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi");
+	clk_register_clkdev(clk[nfc_gate], NULL, "83fdb000.nand");
 
 	/* set the usboh3 parent to pll2_sw */
 	clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
@@ -455,6 +456,7 @@
 	clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi");
 	clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi");
 	clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi");
+	clk_register_clkdev(clk[nfc_gate], NULL, "63fdb000.nand");
 
 	/* set SDHC root clock to 200MHZ*/
 	clk_set_rate(clk[esdhc_a_podf], 200000000);
diff --git a/arch/arm/plat-mxc/devices/platform-mxc_nand.c b/arch/arm/plat-mxc/devices/platform-mxc_nand.c
index 1568f39..95b75cc 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc_nand.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc_nand.c
@@ -63,10 +63,6 @@
 	/* AXI has to come first, that's how the mxc_nand driver expect it */
 	struct resource res[] = {
 		{
-			.start = data->axibase,
-			.end = data->axibase + SZ_16K - 1,
-			.flags = IORESOURCE_MEM,
-		}, {
 			.start = data->iobase,
 			.end = data->iobase + data->iosize - 1,
 			.flags = IORESOURCE_MEM,
@@ -74,10 +70,13 @@
 			.start = data->irq,
 			.end = data->irq,
 			.flags = IORESOURCE_IRQ,
+		}, {
+			.start = data->axibase,
+			.end = data->axibase + SZ_16K - 1,
+			.flags = IORESOURCE_MEM,
 		},
 	};
 	return imx_add_platform_device("mxc_nand", data->id,
-			res + !data->axibase,
-			ARRAY_SIZE(res) - !data->axibase,
+			res, ARRAY_SIZE(res) - !data->axibase,
 			pdata, sizeof(*pdata));
 }
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index b1e3c26..e469b01 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -43,9 +43,6 @@
 	prompt "Flash cmd/query data swapping"
 	depends on MTD_CFI_ADV_OPTIONS
 	default MTD_CFI_NOSWAP
-
-config MTD_CFI_NOSWAP
-	bool "NO"
 	---help---
 	  This option defines the way in which the CPU attempts to arrange
 	  data bits when writing the 'magic' commands to the chips. Saying
@@ -55,12 +52,8 @@
 	  Specific arrangements are possible with the BIG_ENDIAN_BYTE and
 	  LITTLE_ENDIAN_BYTE, if the bytes are reversed.
 
-	  If you have a LART, on which the data (and address) lines were
-	  connected in a fashion which ensured that the nets were as short
-	  as possible, resulting in a bit-shuffling which seems utterly
-	  random to the untrained eye, you need the LART_ENDIAN_BYTE option.
-
-	  Yes, there really exists something sicker than PDP-endian :)
+config MTD_CFI_NOSWAP
+	bool "NO"
 
 config MTD_CFI_BE_BYTE_SWAP
 	bool "BIG_ENDIAN_BYTE"
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 22d0493..5ff5c4a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -431,6 +431,68 @@
 	}
 }
 
+static int is_m29ew(struct cfi_private *cfi)
+{
+	if (cfi->mfr == CFI_MFR_INTEL &&
+	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
+	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
+		return 1;
+	return 0;
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
+ * Some revisions of the M29EW suffer from erase suspend hang ups. In
+ * particular, it can occur when the sequence
+ * Erase Confirm -> Suspend -> Program -> Resume
+ * causes a lockup due to internal timing issues. The consequence is that the
+ * erase cannot be resumed without inserting a dummy command after programming
+ * and prior to resuming. [...] The work-around is to issue a dummy write cycle
+ * that writes an F0 command code before the RESUME command.
+ */
+static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
+					  unsigned long adr)
+{
+	struct cfi_private *cfi = map->fldrv_priv;
+	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
+	if (is_m29ew(cfi))
+		map_write(map, CMD(0xF0), adr);
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
+ *
+ * Some revisions of the M29EW (for example, A1 and A2 step revisions)
+ * are affected by a problem that could cause a hang up when an ERASE SUSPEND
+ * command is issued after an ERASE RESUME operation without waiting for a
+ * minimum delay.  The result is that once the ERASE seems to be completed
+ * (no bits are toggling), the contents of the Flash memory block on which
+ * the erase was ongoing could be inconsistent with the expected values
+ * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
+ * values), causing a consequent failure of the ERASE operation.
+ * The occurrence of this issue could be high, especially when file system
+ * operations on the Flash are intensive.  As a result, it is recommended
+ * that a patch be applied.  Intensive file system operations can cause many
+ * calls to the garbage routine to free Flash space (also by erasing physical
+ * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
+ * commands can occur.  The problem disappears when a delay is inserted after
+ * the RESUME command by using the udelay() function available in Linux.
+ * The DELAY value must be tuned based on the customer's platform.
+ * The maximum value that fixes the problem in all cases is 500us.
+ * But, in our experience, a delay of 30 µs to 50 µs is sufficient
+ * in most cases.
+ * We have chosen 500µs because this latency is acceptable.
+ */
+static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
+{
+	/*
+	 * Resolving the Delay After Resume Issue see Micron TN-13-07
+	 * Worst case delay must be 500µs but 30-50µs should be ok as well
+	 */
+	if (is_m29ew(cfi))
+		cfi_udelay(500);
+}
+
 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 {
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -776,7 +838,10 @@
 
 	switch(chip->oldstate) {
 	case FL_ERASING:
+		cfi_fixup_m29ew_erase_suspend(map,
+			chip->in_progress_block_addr);
 		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
+		cfi_fixup_m29ew_delay_after_resume(cfi);
 		chip->oldstate = FL_READY;
 		chip->state = FL_ERASING;
 		break;
@@ -916,6 +981,8 @@
 			/* Disallow XIP again */
 			local_irq_disable();
 
+			/* Correct Erase Suspend Hangups for M29EW */
+			cfi_fixup_m29ew_erase_suspend(map, adr);
 			/* Resume the write or erase operation */
 			map_write(map, cfi->sector_erase_cmd, adr);
 			chip->state = oldstate;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 4cdb2af..6cc5a1a 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -97,7 +97,7 @@
 	  doesn't support the JEDEC ID instruction.
 
 config M25PXX_USE_FAST_READ
-	bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz"
+	bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz"
 	depends on MTD_M25P80
 	default y
 	help
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 5d0d68c..5257345 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -633,6 +633,8 @@
 	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
 	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
 
+	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+
 	/* EON -- en25xxx */
 	{ "en25f32", INFO(0x1c3116, 0, 64 * 1024,  64, SECT_4K) },
 	{ "en25p32", INFO(0x1c2016, 0, 64 * 1024,  64, 0) },
@@ -646,6 +648,7 @@
 	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
 	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
 	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
+	{ "n25q064",  INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
 
 	/* Macronix */
 	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
@@ -699,6 +702,7 @@
 	{ "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
 	{ "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
 	{ "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
+	{ "n25q032", INFO(0x20ba16,  0,  64 * 1024,  64, 0) },
 
 	{ "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) },
 	{ "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) },
@@ -714,6 +718,7 @@
 	{ "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) },
 	{ "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) },
 
+	{ "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) },
 	{ "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
 	{ "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
 
@@ -730,6 +735,7 @@
 	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
 	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
 	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
+	{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64, SECT_4K) },
 	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 6796036..b85f183 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/param.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/spear_smi.h>
@@ -240,8 +241,8 @@
 	/* copy dev->status (lower 16 bits) in order to release lock */
 	if (ret > 0)
 		ret = dev->status & 0xffff;
-	else
-		ret = -EIO;
+	else if (ret == 0)
+		ret = -ETIMEDOUT;
 
 	/* restore the ctrl regs state */
 	writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -269,16 +270,19 @@
 	finish = jiffies + timeout;
 	do {
 		status = spear_smi_read_sr(dev, bank);
-		if (status < 0)
-			continue; /* try till timeout */
-		else if (!(status & SR_WIP))
+		if (status < 0) {
+			if (status == -ETIMEDOUT)
+				continue; /* try till finish */
+			return status;
+		} else if (!(status & SR_WIP)) {
 			return 0;
+		}
 
 		cond_resched();
 	} while (!time_after_eq(jiffies, finish));
 
 	dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
-	return status;
+	return -EBUSY;
 }
 
 /**
@@ -335,6 +339,9 @@
 	val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
 
 	mutex_lock(&dev->lock);
+	/* clear all interrupt conditions */
+	writel(0, dev->io_base + SMI_SR);
+
 	writel(val, dev->io_base + SMI_CR1);
 	mutex_unlock(&dev->lock);
 }
@@ -391,11 +398,11 @@
 	writel(ctrlreg1, dev->io_base + SMI_CR1);
 	writel(0, dev->io_base + SMI_CR2);
 
-	if (ret <= 0) {
+	if (ret == 0) {
 		ret = -EIO;
 		dev_err(&dev->pdev->dev,
 			"smi controller failed on write enable\n");
-	} else {
+	} else if (ret > 0) {
 		/* check whether write mode status is set for required bank */
 		if (dev->status & (1 << (bank + WM_SHIFT)))
 			ret = 0;
@@ -462,10 +469,10 @@
 	ret = wait_event_interruptible_timeout(dev->cmd_complete,
 			dev->status & TFF, SMI_CMD_TIMEOUT);
 
-	if (ret <= 0) {
+	if (ret == 0) {
 		ret = -EIO;
 		dev_err(&dev->pdev->dev, "sector erase failed\n");
-	} else
+	} else if (ret > 0)
 		ret = 0; /* success */
 
 	/* restore ctrl regs */
@@ -1086,29 +1093,33 @@
 	return 0;
 }
 
-int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM
+static int spear_smi_suspend(struct device *dev)
 {
-	struct spear_smi *dev = platform_get_drvdata(pdev);
+	struct spear_smi *sdev = dev_get_drvdata(dev);
 
-	if (dev && dev->clk)
-		clk_disable_unprepare(dev->clk);
+	if (sdev && sdev->clk)
+		clk_disable_unprepare(sdev->clk);
 
 	return 0;
 }
 
-int spear_smi_resume(struct platform_device *pdev)
+static int spear_smi_resume(struct device *dev)
 {
-	struct spear_smi *dev = platform_get_drvdata(pdev);
+	struct spear_smi *sdev = dev_get_drvdata(dev);
 	int ret = -EPERM;
 
-	if (dev && dev->clk)
-		ret = clk_prepare_enable(dev->clk);
+	if (sdev && sdev->clk)
+		ret = clk_prepare_enable(sdev->clk);
 
 	if (!ret)
-		spear_smi_hw_init(dev);
+		spear_smi_hw_init(sdev);
 	return ret;
 }
 
+static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
+#endif
+
 #ifdef CONFIG_OF
 static const struct of_device_id spear_smi_id_table[] = {
 	{ .compatible = "st,spear600-smi" },
@@ -1123,11 +1134,12 @@
 		.bus = &platform_bus_type,
 		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(spear_smi_id_table),
+#ifdef CONFIG_PM
+		.pm = &spear_smi_pm_ops,
+#endif
 	},
 	.probe = spear_smi_probe,
 	.remove = __devexit_p(spear_smi_remove),
-	.suspend = spear_smi_suspend,
-	.resume = spear_smi_resume,
 };
 
 static int spear_smi_init(void)
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5ba2458..53850f1 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -447,18 +447,6 @@
 	help
 	  Map driver to support image based filesystems for uClinux.
 
-config MTD_WRSBC8260
-	tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
-	depends on (SBC82xx || SBC8560)
-	select MTD_MAP_BANK_WIDTH_4
-	select MTD_MAP_BANK_WIDTH_1
-	select MTD_CFI_I1
-	select MTD_CFI_I4
-	help
-	  Map driver for WindRiver PowerQUICC II MPC82xx board. Drives
-	  all three flash regions on CS0, CS1 and CS6 if they are configured
-	  correctly by the boot loader.
-
 config MTD_DMV182
         tristate "Map driver for Dy-4 SVME/DMV-182 board."
         depends on DMV182
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 68a9a91..deb43e9 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -47,7 +47,6 @@
 obj-$(CONFIG_MTD_H720X)		+= h720x-flash.o
 obj-$(CONFIG_MTD_IXP4XX)	+= ixp4xx.o
 obj-$(CONFIG_MTD_IXP2000)	+= ixp2000.o
-obj-$(CONFIG_MTD_WRSBC8260)	+= wr_sbc82xx_flash.o
 obj-$(CONFIG_MTD_DMV182)	+= dmv182.o
 obj-$(CONFIG_MTD_PLATRAM)	+= plat-ram.o
 obj-$(CONFIG_MTD_INTEL_VR_NOR)	+= intel_vr_nor.o
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
deleted file mode 100644
index e7534c8..0000000
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
- *
- * Copyright (C) 2004 Red Hat, Inc.
- *
- * Author: David Woodhouse <dwmw2@infradead.org>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/immap_cpm2.h>
-
-static struct mtd_info *sbcmtd[3];
-
-struct map_info sbc82xx_flash_map[3] = {
-	{.name = "Boot flash"},
-	{.name = "Alternate boot flash"},
-	{.name = "User flash"}
-};
-
-static struct mtd_partition smallflash_parts[] = {
-	{
-		.name =		"space",
-		.size =		0x100000,
-		.offset =	0,
-	}, {
-		.name =		"bootloader",
-		.size =		MTDPART_SIZ_FULL,
-		.offset =	MTDPART_OFS_APPEND,
-	}
-};
-
-static struct mtd_partition bigflash_parts[] = {
-	{
-		.name =		"bootloader",
-		.size =		0x00100000,
-		.offset =	0,
-	}, {
-		.name =		"file system",
-		.size =		0x01f00000,
-		.offset =	MTDPART_OFS_APPEND,
-	}, {
-		.name =		"boot config",
-		.size =		0x00100000,
-		.offset =	MTDPART_OFS_APPEND,
-	}, {
-		.name =		"space",
-		.size =		0x01f00000,
-		.offset =	MTDPART_OFS_APPEND,
-	}
-};
-
-static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
-
-#define init_sbc82xx_one_flash(map, br, or)			\
-do {								\
-	(map).phys = (br & 1) ? (br & 0xffff8000) : 0;		\
-	(map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0;	\
-	switch (br & 0x00001800) {				\
-	case 0x00000000:					\
-	case 0x00000800:	(map).bankwidth = 1;	break;	\
-	case 0x00001000:	(map).bankwidth = 2;	break;	\
-	case 0x00001800:	(map).bankwidth = 4;	break;	\
-	}							\
-} while (0);
-
-static int __init init_sbc82xx_flash(void)
-{
-	volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
-	int bigflash;
-	int i;
-
-#ifdef CONFIG_SBC8560
-	mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
-#else
-	mc = &cpm2_immr->im_memctl;
-#endif
-
-	bigflash = 1;
-	if ((mc->memc_br0 & 0x00001800) == 0x00001800)
-		bigflash = 0;
-
-	init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
-	init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
-	init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
-
-#ifdef CONFIG_SBC8560
-	iounmap((void *) mc);
-#endif
-
-	for (i=0; i<3; i++) {
-		int8_t flashcs[3] = { 0, 6, 1 };
-		int nr_parts;
-		struct mtd_partition *defparts;
-
-		printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
-		       sbc82xx_flash_map[i].name,
-		       (sbc82xx_flash_map[i].size >> 20),
-		       flashcs[i]);
-		if (!sbc82xx_flash_map[i].phys) {
-			/* We know it can't be at zero. */
-			printk("): disabled by bootloader.\n");
-			continue;
-		}
-		printk(" at %08lx)\n",  sbc82xx_flash_map[i].phys);
-
-		sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys,
-						    sbc82xx_flash_map[i].size);
-
-		if (!sbc82xx_flash_map[i].virt) {
-			printk("Failed to ioremap\n");
-			continue;
-		}
-
-		simple_map_init(&sbc82xx_flash_map[i]);
-
-		sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
-
-		if (!sbcmtd[i])
-			continue;
-
-		sbcmtd[i]->owner = THIS_MODULE;
-
-		/* No partitioning detected. Use default */
-		if (i == 2) {
-			defparts = NULL;
-			nr_parts = 0;
-		} else if (i == bigflash) {
-			defparts = bigflash_parts;
-			nr_parts = ARRAY_SIZE(bigflash_parts);
-		} else {
-			defparts = smallflash_parts;
-			nr_parts = ARRAY_SIZE(smallflash_parts);
-		}
-
-		mtd_device_parse_register(sbcmtd[i], part_probes, NULL,
-					  defparts, nr_parts);
-	}
-	return 0;
-}
-
-static void __exit cleanup_sbc82xx_flash(void)
-{
-	int i;
-
-	for (i=0; i<3; i++) {
-		if (!sbcmtd[i])
-			continue;
-
-		mtd_device_unregister(sbcmtd[i]);
-
-		map_destroy(sbcmtd[i]);
-
-		iounmap((void *)sbc82xx_flash_map[i].virt);
-		sbc82xx_flash_map[i].virt = 0;
-	}
-}
-
-module_init(init_sbc82xx_flash);
-module_exit(cleanup_sbc82xx_flash);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5757307..fcfce24 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -858,6 +858,15 @@
 }
 EXPORT_SYMBOL_GPL(mtd_panic_write);
 
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+	ops->retlen = ops->oobretlen = 0;
+	if (!mtd->_read_oob)
+		return -EOPNOTSUPP;
+	return mtd->_read_oob(mtd, from, ops);
+}
+EXPORT_SYMBOL_GPL(mtd_read_oob);
+
 /*
  * Method to access the protection register area, present in some flash
  * devices. The user data is one time programmable but the factory data is read
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 551e316..788f00b 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -169,14 +169,7 @@
 			cxt->nextpage = 0;
 	}
 
-	while (1) {
-		ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
-		if (!ret)
-			break;
-		if (ret < 0) {
-			printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
-			return;
-		}
+	while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
 badblock:
 		printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
 		       cxt->nextpage * record_size);
@@ -190,6 +183,11 @@
 		}
 	}
 
+	if (ret < 0) {
+		printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
+		return;
+	}
+
 	for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
 		ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
 
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8ca4176..588e989 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -406,46 +406,6 @@
 	help
 	  Enables support for NAND Flash / Smart Media Card interface
 	  on Atmel AT91 and AVR32 processors.
-choice
-	prompt "ECC management for NAND Flash / SmartMedia on AT91 / AVR32"
-	depends on MTD_NAND_ATMEL
-
-config MTD_NAND_ATMEL_ECC_HW
-	bool "Hardware ECC"
-	depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 || AVR32
-	help
-	  Use hardware ECC instead of software ECC when the chip
-	  supports it.
-
-	  The hardware ECC controller is capable of single bit error
-	  correction and 2-bit random detection per page.
-
-	  NB : hardware and software ECC schemes are incompatible.
-	  If you switch from one to another, you'll have to erase your
-	  mtd partition.
-
-	  If unsure, say Y
-
-config MTD_NAND_ATMEL_ECC_SOFT
-	bool "Software ECC"
-	help
-	  Use software ECC.
-
-	  NB : hardware and software ECC schemes are incompatible.
-	  If you switch from one to another, you'll have to erase your
-	  mtd partition.
-
-config MTD_NAND_ATMEL_ECC_NONE
-	bool "No ECC (testing only, DANGEROUS)"
-	depends on DEBUG_KERNEL
-	help
-	  No ECC will be used.
-	  It's not a good idea and it should be reserved for testing
-	  purpose only.
-
-	  If unsure, say N
-
-endchoice
 
 config MTD_NAND_PXA3xx
 	tristate "Support for NAND flash devices on PXA3xx"
@@ -454,6 +414,28 @@
 	  This enables the driver for the NAND flash device found on
 	  PXA3xx processors
 
+config MTD_NAND_SLC_LPC32XX
+	tristate "NXP LPC32xx SLC Controller"
+	depends on ARCH_LPC32XX
+	help
+	  Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
+	  chips) NAND controller. This is the default for the PHYTEC 3250
+	  reference board which contains a NAND256R3A2CZA6 chip.
+
+	  Please check the actual NAND chip connected and its support
+	  by the SLC NAND controller.
+
+config MTD_NAND_MLC_LPC32XX
+	tristate "NXP LPC32xx MLC Controller"
+	depends on ARCH_LPC32XX
+	help
+	  Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
+	  controller. This is the default for the WORK92105 controller
+	  board.
+
+	  Please check the actual NAND chip connected and its support
+	  by the MLC NAND controller.
+
 config MTD_NAND_CM_X270
 	tristate "Support for NAND Flash on CM-X270 modules"
 	depends on MACH_ARMCORE
@@ -550,7 +532,7 @@
 
 config MTD_NAND_MXC
 	tristate "MXC NAND support"
-	depends on IMX_HAVE_PLATFORM_MXC_NAND
+	depends on ARCH_MXC
 	help
 	  This enables the driver for the NAND flash controller on the
 	  MXC processors.
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d4b4d87..ddee818 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -40,6 +40,8 @@
 obj-$(CONFIG_MTD_NAND_FSL_ELBC)		+= fsl_elbc_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_IFC)		+= fsl_ifc_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_UPM)		+= fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SLC_LPC32XX)      += lpc32xx_slc.o
+obj-$(CONFIG_MTD_NAND_MLC_LPC32XX)      += lpc32xx_mlc.o
 obj-$(CONFIG_MTD_NAND_SH_FLCTL)		+= sh_flctl.o
 obj-$(CONFIG_MTD_NAND_MXC)		+= mxc_nand.o
 obj-$(CONFIG_MTD_NAND_SOCRATES)		+= socrates_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 97ac671..6472755 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1,20 +1,22 @@
 /*
- *  Copyright (C) 2003 Rick Bronson
+ *  Copyright © 2003 Rick Bronson
  *
  *  Derived from drivers/mtd/nand/autcpu12.c
- *	 Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *	 Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
  *
  *  Derived from drivers/mtd/spia.c
- *	 Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
+ *	 Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
  *
  *
  *  Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
- *     Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
+ *     Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
  *
  *     Derived from Das U-Boot source code
  *     		(u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
- *     (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *     © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
  *
+ *  Add Programmable Multibit ECC support for various AT91 SoC
+ *     © Copyright 2012 ATMEL, Hong Xu
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -93,8 +95,36 @@
 
 	struct completion	comp;
 	struct dma_chan		*dma_chan;
+
+	bool			has_pmecc;
+	u8			pmecc_corr_cap;
+	u16			pmecc_sector_size;
+	u32			pmecc_lookup_table_offset;
+
+	int			pmecc_bytes_per_sector;
+	int			pmecc_sector_number;
+	int			pmecc_degree;	/* Degree of remainders */
+	int			pmecc_cw_len;	/* Length of codeword */
+
+	void __iomem		*pmerrloc_base;
+	void __iomem		*pmecc_rom_base;
+
+	/* lookup table for alpha_to and index_of */
+	void __iomem		*pmecc_alpha_to;
+	void __iomem		*pmecc_index_of;
+
+	/* data for pmecc computation */
+	int16_t			*pmecc_partial_syn;
+	int16_t			*pmecc_si;
+	int16_t			*pmecc_smu;	/* Sigma table */
+	int16_t			*pmecc_lmu;	/* polynomal order */
+	int			*pmecc_mu;
+	int			*pmecc_dmu;
+	int			*pmecc_delta;
 };
 
+static struct nand_ecclayout atmel_pmecc_oobinfo;
+
 static int cpu_has_dma(void)
 {
 	return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
@@ -288,6 +318,703 @@
 }
 
 /*
+ * Return number of ecc bytes per sector according to sector size and
+ * correction capability
+ *
+ * Following table shows what at91 PMECC supported:
+ * Correction Capability	Sector_512_bytes	Sector_1024_bytes
+ * =====================	================	=================
+ *                2-bits                 4-bytes                  4-bytes
+ *                4-bits                 7-bytes                  7-bytes
+ *                8-bits                13-bytes                 14-bytes
+ *               12-bits                20-bytes                 21-bytes
+ *               24-bits                39-bytes                 42-bytes
+ */
+static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size)
+{
+	int m = 12 + sector_size / 512;
+	return (m * cap + 7) / 8;
+}
+
+static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+	int oobsize, int ecc_len)
+{
+	int i;
+
+	layout->eccbytes = ecc_len;
+
+	/* ECC will occupy the last ecc_len bytes continuously */
+	for (i = 0; i < ecc_len; i++)
+		layout->eccpos[i] = oobsize - ecc_len + i;
+
+	layout->oobfree[0].offset = 2;
+	layout->oobfree[0].length =
+		oobsize - ecc_len - layout->oobfree[0].offset;
+}
+
+static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+{
+	int table_size;
+
+	table_size = host->pmecc_sector_size == 512 ?
+		PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
+
+	return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
+			table_size * sizeof(int16_t);
+}
+
+static void pmecc_data_free(struct atmel_nand_host *host)
+{
+	kfree(host->pmecc_partial_syn);
+	kfree(host->pmecc_si);
+	kfree(host->pmecc_lmu);
+	kfree(host->pmecc_smu);
+	kfree(host->pmecc_mu);
+	kfree(host->pmecc_dmu);
+	kfree(host->pmecc_delta);
+}
+
+static int __devinit pmecc_data_alloc(struct atmel_nand_host *host)
+{
+	const int cap = host->pmecc_corr_cap;
+
+	host->pmecc_partial_syn = kzalloc((2 * cap + 1) * sizeof(int16_t),
+					GFP_KERNEL);
+	host->pmecc_si = kzalloc((2 * cap + 1) * sizeof(int16_t), GFP_KERNEL);
+	host->pmecc_lmu = kzalloc((cap + 1) * sizeof(int16_t), GFP_KERNEL);
+	host->pmecc_smu = kzalloc((cap + 2) * (2 * cap + 1) * sizeof(int16_t),
+					GFP_KERNEL);
+	host->pmecc_mu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
+	host->pmecc_dmu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
+	host->pmecc_delta = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
+
+	if (host->pmecc_partial_syn &&
+			host->pmecc_si &&
+			host->pmecc_lmu &&
+			host->pmecc_smu &&
+			host->pmecc_mu &&
+			host->pmecc_dmu &&
+			host->pmecc_delta)
+		return 0;
+
+	/* error happened */
+	pmecc_data_free(host);
+	return -ENOMEM;
+}
+
+static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct atmel_nand_host *host = nand_chip->priv;
+	int i;
+	uint32_t value;
+
+	/* Fill odd syndromes */
+	for (i = 0; i < host->pmecc_corr_cap; i++) {
+		value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
+		if (i & 1)
+			value >>= 16;
+		value &= 0xffff;
+		host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
+	}
+}
+
+static void pmecc_substitute(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct atmel_nand_host *host = nand_chip->priv;
+	int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+	int16_t __iomem *index_of = host->pmecc_index_of;
+	int16_t *partial_syn = host->pmecc_partial_syn;
+	const int cap = host->pmecc_corr_cap;
+	int16_t *si;
+	int i, j;
+
+	/* si[] is a table that holds the current syndrome value,
+	 * an element of that table belongs to the field
+	 */
+	si = host->pmecc_si;
+
+	memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
+
+	/* Computation 2t syndromes based on S(x) */
+	/* Odd syndromes */
+	for (i = 1; i < 2 * cap; i += 2) {
+		for (j = 0; j < host->pmecc_degree; j++) {
+			if (partial_syn[i] & ((unsigned short)0x1 << j))
+				si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
+		}
+	}
+	/* Even syndrome = (Odd syndrome) ** 2 */
+	for (i = 2, j = 1; j <= cap; i = ++j << 1) {
+		if (si[j] == 0) {
+			si[i] = 0;
+		} else {
+			int16_t tmp;
+
+			tmp = readw_relaxed(index_of + si[j]);
+			tmp = (tmp * 2) % host->pmecc_cw_len;
+			si[i] = readw_relaxed(alpha_to + tmp);
+		}
+	}
+
+	return;
+}
+
+static void pmecc_get_sigma(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct atmel_nand_host *host = nand_chip->priv;
+
+	int16_t *lmu = host->pmecc_lmu;
+	int16_t *si = host->pmecc_si;
+	int *mu = host->pmecc_mu;
+	int *dmu = host->pmecc_dmu;	/* Discrepancy */
+	int *delta = host->pmecc_delta; /* Delta order */
+	int cw_len = host->pmecc_cw_len;
+	const int16_t cap = host->pmecc_corr_cap;
+	const int num = 2 * cap + 1;
+	int16_t __iomem	*index_of = host->pmecc_index_of;
+	int16_t __iomem	*alpha_to = host->pmecc_alpha_to;
+	int i, j, k;
+	uint32_t dmu_0_count, tmp;
+	int16_t *smu = host->pmecc_smu;
+
+	/* index of largest delta */
+	int ro;
+	int largest;
+	int diff;
+
+	dmu_0_count = 0;
+
+	/* First Row */
+
+	/* Mu */
+	mu[0] = -1;
+
+	memset(smu, 0, sizeof(int16_t) * num);
+	smu[0] = 1;
+
+	/* discrepancy set to 1 */
+	dmu[0] = 1;
+	/* polynom order set to 0 */
+	lmu[0] = 0;
+	delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
+
+	/* Second Row */
+
+	/* Mu */
+	mu[1] = 0;
+	/* Sigma(x) set to 1 */
+	memset(&smu[num], 0, sizeof(int16_t) * num);
+	smu[num] = 1;
+
+	/* discrepancy set to S1 */
+	dmu[1] = si[1];
+
+	/* polynom order set to 0 */
+	lmu[1] = 0;
+
+	delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
+
+	/* Init the Sigma(x) last row */
+	memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
+
+	for (i = 1; i <= cap; i++) {
+		mu[i + 1] = i << 1;
+		/* Begin Computing Sigma (Mu+1) and L(mu) */
+		/* check if discrepancy is set to 0 */
+		if (dmu[i] == 0) {
+			dmu_0_count++;
+
+			tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
+			if ((cap - (lmu[i] >> 1) - 1) & 0x1)
+				tmp += 2;
+			else
+				tmp += 1;
+
+			if (dmu_0_count == tmp) {
+				for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+					smu[(cap + 1) * num + j] =
+							smu[i * num + j];
+
+				lmu[cap + 1] = lmu[i];
+				return;
+			}
+
+			/* copy polynom */
+			for (j = 0; j <= lmu[i] >> 1; j++)
+				smu[(i + 1) * num + j] = smu[i * num + j];
+
+			/* copy previous polynom order to the next */
+			lmu[i + 1] = lmu[i];
+		} else {
+			ro = 0;
+			largest = -1;
+			/* find largest delta with dmu != 0 */
+			for (j = 0; j < i; j++) {
+				if ((dmu[j]) && (delta[j] > largest)) {
+					largest = delta[j];
+					ro = j;
+				}
+			}
+
+			/* compute difference */
+			diff = (mu[i] - mu[ro]);
+
+			/* Compute degree of the new smu polynomial */
+			if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+				lmu[i + 1] = lmu[i];
+			else
+				lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+			/* Init smu[i+1] with 0 */
+			for (k = 0; k < num; k++)
+				smu[(i + 1) * num + k] = 0;
+
+			/* Compute smu[i+1] */
+			for (k = 0; k <= lmu[ro] >> 1; k++) {
+				int16_t a, b, c;
+
+				if (!(smu[ro * num + k] && dmu[i]))
+					continue;
+				a = readw_relaxed(index_of + dmu[i]);
+				b = readw_relaxed(index_of + dmu[ro]);
+				c = readw_relaxed(index_of + smu[ro * num + k]);
+				tmp = a + (cw_len - b) + c;
+				a = readw_relaxed(alpha_to + tmp % cw_len);
+				smu[(i + 1) * num + (k + diff)] = a;
+			}
+
+			for (k = 0; k <= lmu[i] >> 1; k++)
+				smu[(i + 1) * num + k] ^= smu[i * num + k];
+		}
+
+		/* End Computing Sigma (Mu+1) and L(mu) */
+		/* In either case compute delta */
+		delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+		/* Do not compute discrepancy for the last iteration */
+		if (i >= cap)
+			continue;
+
+		for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+			tmp = 2 * (i - 1);
+			if (k == 0) {
+				dmu[i + 1] = si[tmp + 3];
+			} else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+				int16_t a, b, c;
+				a = readw_relaxed(index_of +
+						smu[(i + 1) * num + k]);
+				b = si[2 * (i - 1) + 3 - k];
+				c = readw_relaxed(index_of + b);
+				tmp = a + c;
+				tmp %= cw_len;
+				dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
+					dmu[i + 1];
+			}
+		}
+	}
+
+	return;
+}
+
+static int pmecc_err_location(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct atmel_nand_host *host = nand_chip->priv;
+	unsigned long end_time;
+	const int cap = host->pmecc_corr_cap;
+	const int num = 2 * cap + 1;
+	int sector_size = host->pmecc_sector_size;
+	int err_nbr = 0;	/* number of error */
+	int roots_nbr;		/* number of roots */
+	int i;
+	uint32_t val;
+	int16_t *smu = host->pmecc_smu;
+
+	pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
+
+	for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
+		pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
+				      smu[(cap + 1) * num + i]);
+		err_nbr++;
+	}
+
+	val = (err_nbr - 1) << 16;
+	if (sector_size == 1024)
+		val |= 1;
+
+	pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
+	pmerrloc_writel(host->pmerrloc_base, ELEN,
+			sector_size * 8 + host->pmecc_degree * cap);
+
+	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+	while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+		 & PMERRLOC_CALC_DONE)) {
+		if (unlikely(time_after(jiffies, end_time))) {
+			dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
+			return -1;
+		}
+		cpu_relax();
+	}
+
+	roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+		& PMERRLOC_ERR_NUM_MASK) >> 8;
+	/* Number of roots == degree of smu hence <= cap */
+	if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
+		return err_nbr - 1;
+
+	/* Number of roots does not match the degree of smu
+	 * unable to correct error */
+	return -1;
+}
+
+static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
+		int sector_num, int extra_bytes, int err_nbr)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct atmel_nand_host *host = nand_chip->priv;
+	int i = 0;
+	int byte_pos, bit_pos, sector_size, pos;
+	uint32_t tmp;
+	uint8_t err_byte;
+
+	sector_size = host->pmecc_sector_size;
+
+	while (err_nbr) {
+		tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
+		byte_pos = tmp / 8;
+		bit_pos  = tmp % 8;
+
+		if (byte_pos >= (sector_size + extra_bytes))
+			BUG();	/* should never happen */
+
+		if (byte_pos < sector_size) {
+			err_byte = *(buf + byte_pos);
+			*(buf + byte_pos) ^= (1 << bit_pos);
+
+			pos = sector_num * host->pmecc_sector_size + byte_pos;
+			dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+				pos, bit_pos, err_byte, *(buf + byte_pos));
+		} else {
+			/* Bit flip in OOB area */
+			tmp = sector_num * host->pmecc_bytes_per_sector
+					+ (byte_pos - sector_size);
+			err_byte = ecc[tmp];
+			ecc[tmp] ^= (1 << bit_pos);
+
+			pos = tmp + nand_chip->ecc.layout->eccpos[0];
+			dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+				pos, bit_pos, err_byte, ecc[tmp]);
+		}
+
+		i++;
+		err_nbr--;
+	}
+
+	return;
+}
+
+static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
+	u8 *ecc)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct atmel_nand_host *host = nand_chip->priv;
+	int i, err_nbr, eccbytes;
+	uint8_t *buf_pos;
+
+	eccbytes = nand_chip->ecc.bytes;
+	for (i = 0; i < eccbytes; i++)
+		if (ecc[i] != 0xff)
+			goto normal_check;
+	/* Erased page, return OK */
+	return 0;
+
+normal_check:
+	for (i = 0; i < host->pmecc_sector_number; i++) {
+		err_nbr = 0;
+		if (pmecc_stat & 0x1) {
+			buf_pos = buf + i * host->pmecc_sector_size;
+
+			pmecc_gen_syndrome(mtd, i);
+			pmecc_substitute(mtd);
+			pmecc_get_sigma(mtd);
+
+			err_nbr = pmecc_err_location(mtd);
+			if (err_nbr == -1) {
+				dev_err(host->dev, "PMECC: Too many errors\n");
+				mtd->ecc_stats.failed++;
+				return -EIO;
+			} else {
+				pmecc_correct_data(mtd, buf_pos, ecc, i,
+					host->pmecc_bytes_per_sector, err_nbr);
+				mtd->ecc_stats.corrected += err_nbr;
+			}
+		}
+		pmecc_stat >>= 1;
+	}
+
+	return 0;
+}
+
+static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
+	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+	struct atmel_nand_host *host = chip->priv;
+	int eccsize = chip->ecc.size;
+	uint8_t *oob = chip->oob_poi;
+	uint32_t *eccpos = chip->ecc.layout->eccpos;
+	uint32_t stat;
+	unsigned long end_time;
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+	pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG)
+		& ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE);
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+
+	chip->read_buf(mtd, buf, eccsize);
+	chip->read_buf(mtd, oob, mtd->oobsize);
+
+	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+	while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+		if (unlikely(time_after(jiffies, end_time))) {
+			dev_err(host->dev, "PMECC: Timeout to get error status.\n");
+			return -EIO;
+		}
+		cpu_relax();
+	}
+
+	stat = pmecc_readl_relaxed(host->ecc, ISR);
+	if (stat != 0)
+		if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
+			return -EIO;
+
+	return 0;
+}
+
+static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
+		struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+	struct atmel_nand_host *host = chip->priv;
+	uint32_t *eccpos = chip->ecc.layout->eccpos;
+	int i, j;
+	unsigned long end_time;
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+
+	pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) |
+		PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE);
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+
+	chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+
+	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+	while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+		if (unlikely(time_after(jiffies, end_time))) {
+			dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
+			return -EIO;
+		}
+		cpu_relax();
+	}
+
+	for (i = 0; i < host->pmecc_sector_number; i++) {
+		for (j = 0; j < host->pmecc_bytes_per_sector; j++) {
+			int pos;
+
+			pos = i * host->pmecc_bytes_per_sector + j;
+			chip->oob_poi[eccpos[pos]] =
+				pmecc_readb_ecc_relaxed(host->ecc, i, j);
+		}
+	}
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+static void atmel_pmecc_core_init(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct atmel_nand_host *host = nand_chip->priv;
+	uint32_t val = 0;
+	struct nand_ecclayout *ecc_layout;
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+
+	switch (host->pmecc_corr_cap) {
+	case 2:
+		val = PMECC_CFG_BCH_ERR2;
+		break;
+	case 4:
+		val = PMECC_CFG_BCH_ERR4;
+		break;
+	case 8:
+		val = PMECC_CFG_BCH_ERR8;
+		break;
+	case 12:
+		val = PMECC_CFG_BCH_ERR12;
+		break;
+	case 24:
+		val = PMECC_CFG_BCH_ERR24;
+		break;
+	}
+
+	if (host->pmecc_sector_size == 512)
+		val |= PMECC_CFG_SECTOR512;
+	else if (host->pmecc_sector_size == 1024)
+		val |= PMECC_CFG_SECTOR1024;
+
+	switch (host->pmecc_sector_number) {
+	case 1:
+		val |= PMECC_CFG_PAGE_1SECTOR;
+		break;
+	case 2:
+		val |= PMECC_CFG_PAGE_2SECTORS;
+		break;
+	case 4:
+		val |= PMECC_CFG_PAGE_4SECTORS;
+		break;
+	case 8:
+		val |= PMECC_CFG_PAGE_8SECTORS;
+		break;
+	}
+
+	val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
+		| PMECC_CFG_AUTO_DISABLE);
+	pmecc_writel(host->ecc, CFG, val);
+
+	ecc_layout = nand_chip->ecc.layout;
+	pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
+	pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
+	pmecc_writel(host->ecc, EADDR,
+			ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
+	/* See datasheet about PMECC Clock Control Register */
+	pmecc_writel(host->ecc, CLK, 2);
+	pmecc_writel(host->ecc, IDR, 0xff);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+}
+
+static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
+					 struct atmel_nand_host *host)
+{
+	struct mtd_info *mtd = &host->mtd;
+	struct nand_chip *nand_chip = &host->nand_chip;
+	struct resource *regs, *regs_pmerr, *regs_rom;
+	int cap, sector_size, err_no;
+
+	cap = host->pmecc_corr_cap;
+	sector_size = host->pmecc_sector_size;
+	dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
+		 cap, sector_size);
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!regs) {
+		dev_warn(host->dev,
+			"Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		return 0;
+	}
+
+	host->ecc = ioremap(regs->start, resource_size(regs));
+	if (host->ecc == NULL) {
+		dev_err(host->dev, "ioremap failed\n");
+		err_no = -EIO;
+		goto err_pmecc_ioremap;
+	}
+
+	regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+	if (regs_pmerr && regs_rom) {
+		host->pmerrloc_base = ioremap(regs_pmerr->start,
+			resource_size(regs_pmerr));
+		host->pmecc_rom_base = ioremap(regs_rom->start,
+			resource_size(regs_rom));
+	}
+
+	if (!host->pmerrloc_base || !host->pmecc_rom_base) {
+		dev_err(host->dev,
+			"Can not get I/O resource for PMECC ERRLOC controller or ROM!\n");
+		err_no = -EIO;
+		goto err_pmloc_ioremap;
+	}
+
+	/* ECC is calculated for the whole page (1 step) */
+	nand_chip->ecc.size = mtd->writesize;
+
+	/* set ECC page size and oob layout */
+	switch (mtd->writesize) {
+	case 2048:
+		host->pmecc_degree = PMECC_GF_DIMENSION_13;
+		host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
+		host->pmecc_sector_number = mtd->writesize / sector_size;
+		host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
+			cap, sector_size);
+		host->pmecc_alpha_to = pmecc_get_alpha_to(host);
+		host->pmecc_index_of = host->pmecc_rom_base +
+			host->pmecc_lookup_table_offset;
+
+		nand_chip->ecc.steps = 1;
+		nand_chip->ecc.strength = cap;
+		nand_chip->ecc.bytes = host->pmecc_bytes_per_sector *
+				       host->pmecc_sector_number;
+		if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
+			dev_err(host->dev, "No room for ECC bytes\n");
+			err_no = -EINVAL;
+			goto err_no_ecc_room;
+		}
+		pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
+					mtd->oobsize,
+					nand_chip->ecc.bytes);
+		nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
+		break;
+	case 512:
+	case 1024:
+	case 4096:
+		/* TODO */
+		dev_warn(host->dev,
+			"Unsupported page size for PMECC, use Software ECC\n");
+	default:
+		/* page size not handled by HW ECC */
+		/* switching back to soft ECC */
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		return 0;
+	}
+
+	/* Allocate data for PMECC computation */
+	err_no = pmecc_data_alloc(host);
+	if (err_no) {
+		dev_err(host->dev,
+				"Cannot allocate memory for PMECC computation!\n");
+		goto err_pmecc_data_alloc;
+	}
+
+	nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
+	nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
+
+	atmel_pmecc_core_init(mtd);
+
+	return 0;
+
+err_pmecc_data_alloc:
+err_no_ecc_room:
+err_pmloc_ioremap:
+	iounmap(host->ecc);
+	if (host->pmerrloc_base)
+		iounmap(host->pmerrloc_base);
+	if (host->pmecc_rom_base)
+		iounmap(host->pmecc_rom_base);
+err_pmecc_ioremap:
+	return err_no;
+}
+
+/*
  * Calculate HW ECC
  *
  * function called after a write
@@ -481,7 +1208,8 @@
 static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
 					 struct device_node *np)
 {
-	u32 val;
+	u32 val, table_offset;
+	u32 offset[2];
 	int ecc_mode;
 	struct atmel_nand_data *board = &host->board;
 	enum of_gpio_flags flags;
@@ -517,6 +1245,50 @@
 	board->enable_pin = of_get_gpio(np, 1);
 	board->det_pin = of_get_gpio(np, 2);
 
+	host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
+
+	if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
+		return 0;	/* Not using PMECC */
+
+	/* use PMECC, get correction capability, sector size and lookup
+	 * table offset.
+	 */
+	if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
+		dev_err(host->dev, "Cannot decide PMECC Capability\n");
+		return -EINVAL;
+	} else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+	    (val != 24)) {
+		dev_err(host->dev,
+			"Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+			val);
+		return -EINVAL;
+	}
+	host->pmecc_corr_cap = (u8)val;
+
+	if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
+		dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
+		return -EINVAL;
+	} else if ((val != 512) && (val != 1024)) {
+		dev_err(host->dev,
+			"Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+			val);
+		return -EINVAL;
+	}
+	host->pmecc_sector_size = (u16)val;
+
+	if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
+			offset, 2) != 0) {
+		dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
+		return -EINVAL;
+	}
+	table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
+
+	if (!table_offset) {
+		dev_err(host->dev, "Invalid PMECC lookup table offset\n");
+		return -EINVAL;
+	}
+	host->pmecc_lookup_table_offset = table_offset;
+
 	return 0;
 }
 #else
@@ -527,6 +1299,66 @@
 }
 #endif
 
+static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
+					 struct atmel_nand_host *host)
+{
+	struct mtd_info *mtd = &host->mtd;
+	struct nand_chip *nand_chip = &host->nand_chip;
+	struct resource		*regs;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!regs) {
+		dev_err(host->dev,
+			"Can't get I/O resource regs, use software ECC\n");
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		return 0;
+	}
+
+	host->ecc = ioremap(regs->start, resource_size(regs));
+	if (host->ecc == NULL) {
+		dev_err(host->dev, "ioremap failed\n");
+		return -EIO;
+	}
+
+	/* ECC is calculated for the whole page (1 step) */
+	nand_chip->ecc.size = mtd->writesize;
+
+	/* set ECC page size and oob layout */
+	switch (mtd->writesize) {
+	case 512:
+		nand_chip->ecc.layout = &atmel_oobinfo_small;
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
+		break;
+	case 1024:
+		nand_chip->ecc.layout = &atmel_oobinfo_large;
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
+		break;
+	case 2048:
+		nand_chip->ecc.layout = &atmel_oobinfo_large;
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
+		break;
+	case 4096:
+		nand_chip->ecc.layout = &atmel_oobinfo_large;
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
+		break;
+	default:
+		/* page size not handled by HW ECC */
+		/* switching back to soft ECC */
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		return 0;
+	}
+
+	/* set up for HW ECC */
+	nand_chip->ecc.calculate = atmel_nand_calculate;
+	nand_chip->ecc.correct = atmel_nand_correct;
+	nand_chip->ecc.hwctl = atmel_nand_hwctl;
+	nand_chip->ecc.read_page = atmel_nand_read_page;
+	nand_chip->ecc.bytes = 4;
+	nand_chip->ecc.strength = 1;
+
+	return 0;
+}
+
 /*
  * Probe for the NAND device.
  */
@@ -535,7 +1367,6 @@
 	struct atmel_nand_host *host;
 	struct mtd_info *mtd;
 	struct nand_chip *nand_chip;
-	struct resource *regs;
 	struct resource *mem;
 	struct mtd_part_parser_data ppdata = {};
 	int res;
@@ -587,29 +1418,6 @@
 		nand_chip->dev_ready = atmel_nand_device_ready;
 
 	nand_chip->ecc.mode = host->board.ecc_mode;
-
-	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!regs && nand_chip->ecc.mode == NAND_ECC_HW) {
-		printk(KERN_ERR "atmel_nand: can't get I/O resource "
-				"regs\nFalling back on software ECC\n");
-		nand_chip->ecc.mode = NAND_ECC_SOFT;
-	}
-
-	if (nand_chip->ecc.mode == NAND_ECC_HW) {
-		host->ecc = ioremap(regs->start, resource_size(regs));
-		if (host->ecc == NULL) {
-			printk(KERN_ERR "atmel_nand: ioremap failed\n");
-			res = -EIO;
-			goto err_ecc_ioremap;
-		}
-		nand_chip->ecc.calculate = atmel_nand_calculate;
-		nand_chip->ecc.correct = atmel_nand_correct;
-		nand_chip->ecc.hwctl = atmel_nand_hwctl;
-		nand_chip->ecc.read_page = atmel_nand_read_page;
-		nand_chip->ecc.bytes = 4;
-		nand_chip->ecc.strength = 1;
-	}
-
 	nand_chip->chip_delay = 20;		/* 20us command delay time */
 
 	if (host->board.bus_width_16)	/* 16-bit bus width */
@@ -661,40 +1469,13 @@
 	}
 
 	if (nand_chip->ecc.mode == NAND_ECC_HW) {
-		/* ECC is calculated for the whole page (1 step) */
-		nand_chip->ecc.size = mtd->writesize;
+		if (host->has_pmecc)
+			res = atmel_pmecc_nand_init_params(pdev, host);
+		else
+			res = atmel_hw_nand_init_params(pdev, host);
 
-		/* set ECC page size and oob layout */
-		switch (mtd->writesize) {
-		case 512:
-			nand_chip->ecc.layout = &atmel_oobinfo_small;
-			ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
-			break;
-		case 1024:
-			nand_chip->ecc.layout = &atmel_oobinfo_large;
-			ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
-			break;
-		case 2048:
-			nand_chip->ecc.layout = &atmel_oobinfo_large;
-			ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
-			break;
-		case 4096:
-			nand_chip->ecc.layout = &atmel_oobinfo_large;
-			ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
-			break;
-		default:
-			/* page size not handled by HW ECC */
-			/* switching back to soft ECC */
-			nand_chip->ecc.mode = NAND_ECC_SOFT;
-			nand_chip->ecc.calculate = NULL;
-			nand_chip->ecc.correct = NULL;
-			nand_chip->ecc.hwctl = NULL;
-			nand_chip->ecc.read_page = NULL;
-			nand_chip->ecc.postpad = 0;
-			nand_chip->ecc.prepad = 0;
-			nand_chip->ecc.bytes = 0;
-			break;
-		}
+		if (res != 0)
+			goto err_hw_ecc;
 	}
 
 	/* second phase scan */
@@ -711,15 +1492,23 @@
 		return res;
 
 err_scan_tail:
+	if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+		pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+		pmecc_data_free(host);
+	}
+	if (host->ecc)
+		iounmap(host->ecc);
+	if (host->pmerrloc_base)
+		iounmap(host->pmerrloc_base);
+	if (host->pmecc_rom_base)
+		iounmap(host->pmecc_rom_base);
+err_hw_ecc:
 err_scan_ident:
 err_no_card:
 	atmel_nand_disable(host);
 	platform_set_drvdata(pdev, NULL);
 	if (host->dma_chan)
 		dma_release_channel(host->dma_chan);
-	if (host->ecc)
-		iounmap(host->ecc);
-err_ecc_ioremap:
 	iounmap(host->io_base);
 err_nand_ioremap:
 	kfree(host);
@@ -738,8 +1527,19 @@
 
 	atmel_nand_disable(host);
 
+	if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+		pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+		pmerrloc_writel(host->pmerrloc_base, ELDIS,
+				PMERRLOC_DISABLE);
+		pmecc_data_free(host);
+	}
+
 	if (host->ecc)
 		iounmap(host->ecc);
+	if (host->pmecc_rom_base)
+		iounmap(host->pmecc_rom_base);
+	if (host->pmerrloc_base)
+		iounmap(host->pmerrloc_base);
 
 	if (host->dma_chan)
 		dma_release_channel(host->dma_chan);
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 578c776..8a1e9a6 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -3,7 +3,7 @@
  * Based on AT91SAM9260 datasheet revision B.
  *
  * Copyright (C) 2007 Andrew Victor
- * Copyright (C) 2007 Atmel Corporation.
+ * Copyright (C) 2007 - 2012 Atmel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -36,4 +36,116 @@
 #define ATMEL_ECC_NPR		0x10			/* NParity register */
 #define		ATMEL_ECC_NPARITY	(0xffff << 0)		/* NParity */
 
+/* PMECC Register Definitions */
+#define ATMEL_PMECC_CFG			0x000	/* Configuration Register */
+#define		PMECC_CFG_BCH_ERR2		(0 << 0)
+#define		PMECC_CFG_BCH_ERR4		(1 << 0)
+#define		PMECC_CFG_BCH_ERR8		(2 << 0)
+#define		PMECC_CFG_BCH_ERR12		(3 << 0)
+#define		PMECC_CFG_BCH_ERR24		(4 << 0)
+
+#define		PMECC_CFG_SECTOR512		(0 << 4)
+#define		PMECC_CFG_SECTOR1024		(1 << 4)
+
+#define		PMECC_CFG_PAGE_1SECTOR		(0 << 8)
+#define		PMECC_CFG_PAGE_2SECTORS		(1 << 8)
+#define		PMECC_CFG_PAGE_4SECTORS		(2 << 8)
+#define		PMECC_CFG_PAGE_8SECTORS		(3 << 8)
+
+#define		PMECC_CFG_READ_OP		(0 << 12)
+#define		PMECC_CFG_WRITE_OP		(1 << 12)
+
+#define		PMECC_CFG_SPARE_ENABLE		(1 << 16)
+#define		PMECC_CFG_SPARE_DISABLE		(0 << 16)
+
+#define		PMECC_CFG_AUTO_ENABLE		(1 << 20)
+#define		PMECC_CFG_AUTO_DISABLE		(0 << 20)
+
+#define ATMEL_PMECC_SAREA		0x004	/* Spare area size */
+#define ATMEL_PMECC_SADDR		0x008	/* PMECC starting address */
+#define ATMEL_PMECC_EADDR		0x00c	/* PMECC ending address */
+#define ATMEL_PMECC_CLK			0x010	/* PMECC clock control */
+#define		PMECC_CLK_133MHZ		(2 << 0)
+
+#define ATMEL_PMECC_CTRL		0x014	/* PMECC control register */
+#define		PMECC_CTRL_RST			(1 << 0)
+#define		PMECC_CTRL_DATA			(1 << 1)
+#define		PMECC_CTRL_USER			(1 << 2)
+#define		PMECC_CTRL_ENABLE		(1 << 4)
+#define		PMECC_CTRL_DISABLE		(1 << 5)
+
+#define ATMEL_PMECC_SR			0x018	/* PMECC status register */
+#define		PMECC_SR_BUSY			(1 << 0)
+#define		PMECC_SR_ENABLE			(1 << 4)
+
+#define ATMEL_PMECC_IER			0x01c	/* PMECC interrupt enable */
+#define		PMECC_IER_ENABLE		(1 << 0)
+#define ATMEL_PMECC_IDR			0x020	/* PMECC interrupt disable */
+#define		PMECC_IER_DISABLE		(1 << 0)
+#define ATMEL_PMECC_IMR			0x024	/* PMECC interrupt mask */
+#define		PMECC_IER_MASK			(1 << 0)
+#define ATMEL_PMECC_ISR			0x028	/* PMECC interrupt status */
+#define ATMEL_PMECC_ECCx		0x040	/* PMECC ECC x */
+#define ATMEL_PMECC_REMx		0x240	/* PMECC REM x */
+
+/* PMERRLOC Register Definitions */
+#define ATMEL_PMERRLOC_ELCFG		0x000	/* Error location config */
+#define		PMERRLOC_ELCFG_SECTOR_512	(0 << 0)
+#define		PMERRLOC_ELCFG_SECTOR_1024	(1 << 0)
+#define		PMERRLOC_ELCFG_NUM_ERRORS(n)	((n) << 16)
+
+#define ATMEL_PMERRLOC_ELPRIM		0x004	/* Error location primitive */
+#define ATMEL_PMERRLOC_ELEN		0x008	/* Error location enable */
+#define ATMEL_PMERRLOC_ELDIS		0x00c	/* Error location disable */
+#define		PMERRLOC_DISABLE		(1 << 0)
+
+#define ATMEL_PMERRLOC_ELSR		0x010	/* Error location status */
+#define		PMERRLOC_ELSR_BUSY		(1 << 0)
+#define ATMEL_PMERRLOC_ELIER		0x014	/* Error location int enable */
+#define ATMEL_PMERRLOC_ELIDR		0x018	/* Error location int disable */
+#define ATMEL_PMERRLOC_ELIMR		0x01c	/* Error location int mask */
+#define ATMEL_PMERRLOC_ELISR		0x020	/* Error location int status */
+#define		PMERRLOC_ERR_NUM_MASK		(0x1f << 8)
+#define		PMERRLOC_CALC_DONE		(1 << 0)
+#define ATMEL_PMERRLOC_SIGMAx		0x028	/* Error location SIGMA x */
+#define ATMEL_PMERRLOC_ELx		0x08c	/* Error location x */
+
+/* Register access macros for PMECC */
+#define pmecc_readl_relaxed(addr, reg) \
+	readl_relaxed((addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_writel(addr, reg, value) \
+	writel((value), (addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_readb_ecc_relaxed(addr, sector, n) \
+	readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
+
+#define pmecc_readl_rem_relaxed(addr, sector, n) \
+	readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
+
+#define pmerrloc_readl_relaxed(addr, reg) \
+	readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel(addr, reg, value) \
+	writel((value), (addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
+	writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_sigma_relaxed(addr, n) \
+	readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_el_relaxed(addr, n) \
+	readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13			13
+#define PMECC_GF_DIMENSION_14			14
+
+#define PMECC_LOOKUP_TABLE_SIZE_512		0x2000
+#define PMECC_LOOKUP_TABLE_SIZE_1024		0x4000
+
+/* Time out value for reading PMECC status register */
+#define PMECC_MAX_TIMEOUT_MS			100
+
 #endif
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
index 5914bb3..c8799a0 100644
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -23,7 +23,7 @@
 /* ---- Private Function Prototypes -------------------------------------- */
 static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
 	struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
-static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
+static int bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
 	struct nand_chip *chip, const uint8_t *buf, int oob_required);
 
 /* ---- Private Variables ------------------------------------------------ */
@@ -194,7 +194,7 @@
 *  @oob_required:	must write chip->oob_poi to OOB
 *
 ***************************************************************************/
-static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
+static int bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
 	struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
 	int sectorIdx = 0;
@@ -214,4 +214,6 @@
 	}
 
 	bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
 }
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 3f1c185..ab0caa7 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -566,11 +566,13 @@
 	return 0;
 }
 
-static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-		const uint8_t *buf, int oob_required)
+static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
+		struct nand_chip *chip,	const uint8_t *buf, int oob_required)
 {
 	bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
 	bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
 }
 
 /*
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index f3f6cfe..08248a0 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -377,7 +377,7 @@
  * @buf:	buffer to store read data
  * @oob_required:	caller expects OOB data read to chip->oob_poi
  *
- * The hw generator calculates the error syndrome automatically. Therefor
+ * The hw generator calculates the error syndrome automatically. Therefore
  * we need a special oob layout and handling.
  */
 static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -520,7 +520,7 @@
 };
 
 
-static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
+static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
 					  struct nand_chip *chip,
 					  const uint8_t *buf, int oob_required)
 {
@@ -531,6 +531,8 @@
 
 	/* Set up ECC autogeneration */
 	cafe->ctl2 |= (1<<30);
+
+	return 0;
 }
 
 static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -542,9 +544,12 @@
 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
 	if (unlikely(raw))
-		chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+		status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
 	else
-		chip->ecc.write_page(mtd, chip, buf, oob_required);
+		status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+	if (status < 0)
+		return status;
 
 	/*
 	 * Cached progamming disabled for now, Not sure if its worth the
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0650aaf..e706a23 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1028,7 +1028,7 @@
 
 /* writes a page. user specifies type, and this function handles the
  * configuration details. */
-static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
 			const uint8_t *buf, bool raw_xfer)
 {
 	struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1078,6 +1078,8 @@
 
 	denali_enable_dma(denali, false);
 	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
+
+	return 0;
 }
 
 /* NAND core entry points */
@@ -1086,24 +1088,24 @@
  * writing a page with ECC or without is similar, all the work is done
  * by write_page above.
  * */
-static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 				const uint8_t *buf, int oob_required)
 {
 	/* for regular page writes, we let HW handle all the ECC
 	 * data written to the device. */
-	write_page(mtd, chip, buf, false);
+	return write_page(mtd, chip, buf, false);
 }
 
 /* This is the callback that the NAND core calls to write a page without ECC.
  * raw access is similar to ECC page writes, so all the work is done in the
  * write_page() function above.
  */
-static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 					const uint8_t *buf, int oob_required)
 {
 	/* for raw page writes, we want to disable ECC and simply write
 	   whatever data is in the buffer. */
-	write_page(mtd, chip, buf, true);
+	return write_page(mtd, chip, buf, true);
 }
 
 static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index a225e49..0f2ffd7 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -898,7 +898,7 @@
 	write_nop(docptr);
 }
 
-static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
+static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
 		       const uint8_t *buf, bool use_ecc)
 {
 	struct docg4_priv *doc = nand->priv;
@@ -950,15 +950,17 @@
 	write_nop(docptr);
 	writew(0, docptr + DOC_DATAEND);
 	write_nop(docptr);
+
+	return 0;
 }
 
-static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
 				 const uint8_t *buf, int oob_required)
 {
 	return write_page(mtd, nand, buf, false);
 }
 
-static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
 			     const uint8_t *buf, int oob_required)
 {
 	return write_page(mtd, nand, buf, true);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 7842938..8143873 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -766,11 +766,13 @@
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 				const uint8_t *buf, int oob_required)
 {
 	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
 	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
 }
 
 static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -805,7 +807,6 @@
 	chip->bbt_md = &bbt_mirror_descr;
 
 	/* set up nand options */
-	chip->options = NAND_NO_READRDY;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 	chip->controller = &elbc_fcm_ctrl->controller;
@@ -916,7 +917,8 @@
 	elbc_fcm_ctrl->chips[bank] = priv;
 	priv->bank = bank;
 	priv->ctrl = fsl_lbc_ctrl_dev;
-	priv->dev = dev;
+	priv->dev = &pdev->dev;
+	dev_set_drvdata(priv->dev, priv);
 
 	priv->vbase = ioremap(res.start, resource_size(&res));
 	if (!priv->vbase) {
@@ -963,11 +965,10 @@
 
 static int fsl_elbc_nand_remove(struct platform_device *pdev)
 {
-	int i;
 	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
-	for (i = 0; i < MAX_BANKS; i++)
-		if (elbc_fcm_ctrl->chips[i])
-			fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]);
+	struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+
+	fsl_elbc_chip_remove(priv);
 
 	mutex_lock(&fsl_elbc_nand_mutex);
 	elbc_fcm_ctrl->counter--;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9602c1b..1f71b54 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -721,11 +721,13 @@
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 			       const uint8_t *buf, int oob_required)
 {
 	fsl_ifc_write_buf(mtd, buf, mtd->writesize);
 	fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
 }
 
 static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -805,7 +807,6 @@
 	out_be32(&ifc->ifc_nand.ncfgr, 0x0);
 
 	/* set up nand options */
-	chip->options = NAND_NO_READRDY;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index a6cad5c..9da9ee8 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -27,6 +27,7 @@
 #include <linux/pinctrl/consumer.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_mtd.h>
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
@@ -930,7 +931,7 @@
 	return ret;
 }
 
-static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 				const uint8_t *buf, int oob_required)
 {
 	struct gpmi_nand_data *this = chip->priv;
@@ -972,7 +973,7 @@
 				&payload_virt, &payload_phys);
 		if (ret) {
 			pr_err("Inadequate payload DMA buffer\n");
-			return;
+			return 0;
 		}
 
 		ret = send_page_prepare(this,
@@ -1002,6 +1003,8 @@
 				nfc_geo->payload_size,
 				payload_virt, payload_phys);
 	}
+
+	return 0;
 }
 
 /*
@@ -1064,6 +1067,9 @@
  * ECC-based or raw view of the page is implicit in which function it calls
  * (there is a similar pair of ECC-based/raw functions for writing).
  *
+ * FIXME: The following paragraph is incorrect, now that there exist
+ * ecc.read_oob_raw and ecc.write_oob_raw functions.
+ *
  * Since MTD assumes the OOB is not covered by ECC, there is no pair of
  * ECC-based/raw functions for reading or or writing the OOB. The fact that the
  * caller wants an ECC-based or raw view of the page is not propagated down to
@@ -1436,6 +1442,7 @@
 	/* Adjust the ECC strength according to the chip. */
 	this->nand.ecc.strength = this->bch_geometry.ecc_strength;
 	this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
+	this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength;
 
 	/* NAND boot init, depends on the gpmi_set_geometry(). */
 	return nand_boot_init(this);
@@ -1497,6 +1504,8 @@
 	chip->ecc.size		= 1;
 	chip->ecc.strength	= 8;
 	chip->ecc.layout	= &gpmi_hw_ecclayout;
+	if (of_get_nand_on_flash_bbt(this->dev->of_node))
+		chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
 
 	/* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
 	this->bch_geometry.payload_size = 1024;
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
new file mode 100644
index 0000000..1cf3593
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -0,0 +1,925 @@
+/*
+ * Driver for NAND MLC Controller in LPC32xx
+ *
+ * Author: Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 WORK Microwave GmbH
+ * Copyright © 2011, 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * NAND Flash Controller Operation:
+ * - Read: Auto Decode
+ * - Write: Auto Encode
+ * - Tested Page Sizes: 2048, 4096
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/amba/pl08x.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+
+#define DRV_NAME "lpc32xx_mlc"
+
+/**********************************************************************
+* MLC NAND controller register offsets
+**********************************************************************/
+
+#define MLC_BUFF(x)			(x + 0x00000)
+#define MLC_DATA(x)			(x + 0x08000)
+#define MLC_CMD(x)			(x + 0x10000)
+#define MLC_ADDR(x)			(x + 0x10004)
+#define MLC_ECC_ENC_REG(x)		(x + 0x10008)
+#define MLC_ECC_DEC_REG(x)		(x + 0x1000C)
+#define MLC_ECC_AUTO_ENC_REG(x)		(x + 0x10010)
+#define MLC_ECC_AUTO_DEC_REG(x)		(x + 0x10014)
+#define MLC_RPR(x)			(x + 0x10018)
+#define MLC_WPR(x)			(x + 0x1001C)
+#define MLC_RUBP(x)			(x + 0x10020)
+#define MLC_ROBP(x)			(x + 0x10024)
+#define MLC_SW_WP_ADD_LOW(x)		(x + 0x10028)
+#define MLC_SW_WP_ADD_HIG(x)		(x + 0x1002C)
+#define MLC_ICR(x)			(x + 0x10030)
+#define MLC_TIME_REG(x)			(x + 0x10034)
+#define MLC_IRQ_MR(x)			(x + 0x10038)
+#define MLC_IRQ_SR(x)			(x + 0x1003C)
+#define MLC_LOCK_PR(x)			(x + 0x10044)
+#define MLC_ISR(x)			(x + 0x10048)
+#define MLC_CEH(x)			(x + 0x1004C)
+
+/**********************************************************************
+* MLC_CMD bit definitions
+**********************************************************************/
+#define MLCCMD_RESET			0xFF
+
+/**********************************************************************
+* MLC_ICR bit definitions
+**********************************************************************/
+#define MLCICR_WPROT			(1 << 3)
+#define MLCICR_LARGEBLOCK		(1 << 2)
+#define MLCICR_LONGADDR			(1 << 1)
+#define MLCICR_16BIT			(1 << 0)  /* unsupported by LPC32x0! */
+
+/**********************************************************************
+* MLC_TIME_REG bit definitions
+**********************************************************************/
+#define MLCTIMEREG_TCEA_DELAY(n)	(((n) & 0x03) << 24)
+#define MLCTIMEREG_BUSY_DELAY(n)	(((n) & 0x1F) << 19)
+#define MLCTIMEREG_NAND_TA(n)		(((n) & 0x07) << 16)
+#define MLCTIMEREG_RD_HIGH(n)		(((n) & 0x0F) << 12)
+#define MLCTIMEREG_RD_LOW(n)		(((n) & 0x0F) << 8)
+#define MLCTIMEREG_WR_HIGH(n)		(((n) & 0x0F) << 4)
+#define MLCTIMEREG_WR_LOW(n)		(((n) & 0x0F) << 0)
+
+/**********************************************************************
+* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
+**********************************************************************/
+#define MLCIRQ_NAND_READY		(1 << 5)
+#define MLCIRQ_CONTROLLER_READY		(1 << 4)
+#define MLCIRQ_DECODE_FAILURE		(1 << 3)
+#define MLCIRQ_DECODE_ERROR		(1 << 2)
+#define MLCIRQ_ECC_READY		(1 << 1)
+#define MLCIRQ_WRPROT_FAULT		(1 << 0)
+
+/**********************************************************************
+* MLC_LOCK_PR bit definitions
+**********************************************************************/
+#define MLCLOCKPR_MAGIC			0xA25E
+
+/**********************************************************************
+* MLC_ISR bit definitions
+**********************************************************************/
+#define MLCISR_DECODER_FAILURE		(1 << 6)
+#define MLCISR_ERRORS			((1 << 4) | (1 << 5))
+#define MLCISR_ERRORS_DETECTED		(1 << 3)
+#define MLCISR_ECC_READY		(1 << 2)
+#define MLCISR_CONTROLLER_READY		(1 << 1)
+#define MLCISR_NAND_READY		(1 << 0)
+
+/**********************************************************************
+* MLC_CEH bit definitions
+**********************************************************************/
+#define MLCCEH_NORMAL			(1 << 0)
+
+struct lpc32xx_nand_cfg_mlc {
+	uint32_t tcea_delay;
+	uint32_t busy_delay;
+	uint32_t nand_ta;
+	uint32_t rd_high;
+	uint32_t rd_low;
+	uint32_t wr_high;
+	uint32_t wr_low;
+	int wp_gpio;
+	struct mtd_partition *parts;
+	unsigned num_parts;
+};
+
+static struct nand_ecclayout lpc32xx_nand_oob = {
+	.eccbytes = 40,
+	.eccpos = { 6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+		   22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+		   38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+		   54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
+	.oobfree = {
+		{ .offset = 0,
+		  .length = 6, },
+		{ .offset = 16,
+		  .length = 6, },
+		{ .offset = 32,
+		  .length = 6, },
+		{ .offset = 48,
+		  .length = 6, },
+		},
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt = {
+	.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+		   NAND_BBT_WRITE,
+	.pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
+	.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+		   NAND_BBT_WRITE,
+	.pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+struct lpc32xx_nand_host {
+	struct nand_chip	nand_chip;
+	struct clk		*clk;
+	struct mtd_info		mtd;
+	void __iomem		*io_base;
+	int			irq;
+	struct lpc32xx_nand_cfg_mlc	*ncfg;
+	struct completion       comp_nand;
+	struct completion       comp_controller;
+	uint32_t llptr;
+	/*
+	 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
+	 */
+	dma_addr_t		oob_buf_phy;
+	/*
+	 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
+	 */
+	uint8_t			*oob_buf;
+	/* Physical address of DMA base address */
+	dma_addr_t		io_base_phy;
+
+	struct completion	comp_dma;
+	struct dma_chan		*dma_chan;
+	struct dma_slave_config	dma_slave_config;
+	struct scatterlist	sgl;
+	uint8_t			*dma_buf;
+	uint8_t			*dummy_buf;
+	int			mlcsubpages; /* number of 512bytes-subpages */
+};
+
+/*
+ * Activate/Deactivate DMA Operation:
+ *
+ * Using the PL080 DMA Controller for transferring the 512 byte subpages
+ * instead of doing readl() / writel() in a loop slows it down significantly.
+ * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
+ *
+ * - readl() of 128 x 32 bits in a loop: ~20us
+ * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
+ * - DMA read of 512 bytes (32 bit, no bursts): ~100us
+ *
+ * This applies to the transfer itself. In the DMA case: only the
+ * wait_for_completion() (DMA setup _not_ included).
+ *
+ * Note that the 512 bytes subpage transfer is done directly from/to a
+ * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
+ * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
+ * controller transferring data between its internal buffer to/from the NAND
+ * chip.)
+ *
+ * Therefore, using the PL080 DMA is disabled by default, for now.
+ *
+ */
+static int use_dma;
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+	uint32_t clkrate, tmp;
+
+	/* Reset MLC controller */
+	writel(MLCCMD_RESET, MLC_CMD(host->io_base));
+	udelay(1000);
+
+	/* Get base clock for MLC block */
+	clkrate = clk_get_rate(host->clk);
+	if (clkrate == 0)
+		clkrate = 104000000;
+
+	/* Unlock MLC_ICR
+	 * (among others, will be locked again automatically) */
+	writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+	/* Configure MLC Controller: Large Block, 5 Byte Address */
+	tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
+	writel(tmp, MLC_ICR(host->io_base));
+
+	/* Unlock MLC_TIME_REG
+	 * (among others, will be locked again automatically) */
+	writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+	/* Compute clock setup values, see LPC and NAND manual */
+	tmp = 0;
+	tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
+	tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
+	tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
+	tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
+	tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
+	tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
+	tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
+	writel(tmp, MLC_TIME_REG(host->io_base));
+
+	/* Enable IRQ for CONTROLLER_READY and NAND_READY */
+	writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
+			MLC_IRQ_MR(host->io_base));
+
+	/* Normal nCE operation: nCE controlled by controller */
+	writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+				  unsigned int ctrl)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct lpc32xx_nand_host *host = nand_chip->priv;
+
+	if (cmd != NAND_CMD_NONE) {
+		if (ctrl & NAND_CLE)
+			writel(cmd, MLC_CMD(host->io_base));
+		else
+			writel(cmd, MLC_ADDR(host->io_base));
+	}
+}
+
+/*
+ * Read Device Ready (NAND device _and_ controller ready)
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct lpc32xx_nand_host *host = nand_chip->priv;
+
+	if ((readb(MLC_ISR(host->io_base)) &
+	     (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
+	    (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
+		return  1;
+
+	return 0;
+}
+
+static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+{
+	uint8_t sr;
+
+	/* Clear interrupt flag by reading status */
+	sr = readb(MLC_IRQ_SR(host->io_base));
+	if (sr & MLCIRQ_NAND_READY)
+		complete(&host->comp_nand);
+	if (sr & MLCIRQ_CONTROLLER_READY)
+		complete(&host->comp_controller);
+
+	return IRQ_HANDLED;
+}
+
+static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct lpc32xx_nand_host *host = chip->priv;
+
+	if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
+		goto exit;
+
+	wait_for_completion(&host->comp_nand);
+
+	while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
+		/* Seems to be delayed sometimes by controller */
+		dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
+		cpu_relax();
+	}
+
+exit:
+	return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
+				       struct nand_chip *chip)
+{
+	struct lpc32xx_nand_host *host = chip->priv;
+
+	if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
+		goto exit;
+
+	wait_for_completion(&host->comp_controller);
+
+	while (!(readb(MLC_ISR(host->io_base)) &
+		 MLCISR_CONTROLLER_READY)) {
+		dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
+		cpu_relax();
+	}
+
+exit:
+	return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	lpc32xx_waitfunc_nand(mtd, chip);
+	lpc32xx_waitfunc_controller(mtd, chip);
+
+	return NAND_STATUS_READY;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+	complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
+			    enum dma_transfer_direction dir)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+	struct dma_async_tx_descriptor *desc;
+	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	int res;
+
+	sg_init_one(&host->sgl, mem, len);
+
+	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+			 DMA_BIDIRECTIONAL);
+	if (res != 1) {
+		dev_err(mtd->dev.parent, "Failed to map sg list\n");
+		return -ENXIO;
+	}
+	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+				       flags);
+	if (!desc) {
+		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+		goto out1;
+	}
+
+	init_completion(&host->comp_dma);
+	desc->callback = lpc32xx_dma_complete_func;
+	desc->callback_param = &host->comp_dma;
+
+	dmaengine_submit(desc);
+	dma_async_issue_pending(host->dma_chan);
+
+	wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
+
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+	return 0;
+out1:
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+	return -ENXIO;
+}
+
+static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			     uint8_t *buf, int oob_required, int page)
+{
+	struct lpc32xx_nand_host *host = chip->priv;
+	int i, j;
+	uint8_t *oobbuf = chip->oob_poi;
+	uint32_t mlc_isr;
+	int res;
+	uint8_t *dma_buf;
+	bool dma_mapped;
+
+	if ((void *)buf <= high_memory) {
+		dma_buf = buf;
+		dma_mapped = true;
+	} else {
+		dma_buf = host->dma_buf;
+		dma_mapped = false;
+	}
+
+	/* Writing Command and Address */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	/* For all sub-pages */
+	for (i = 0; i < host->mlcsubpages; i++) {
+		/* Start Auto Decode Command */
+		writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
+
+		/* Wait for Controller Ready */
+		lpc32xx_waitfunc_controller(mtd, chip);
+
+		/* Check ECC Error status */
+		mlc_isr = readl(MLC_ISR(host->io_base));
+		if (mlc_isr & MLCISR_DECODER_FAILURE) {
+			mtd->ecc_stats.failed++;
+			dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
+		} else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
+			mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
+		}
+
+		/* Read 512 + 16 Bytes */
+		if (use_dma) {
+			res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+					       DMA_DEV_TO_MEM);
+			if (res)
+				return res;
+		} else {
+			for (j = 0; j < (512 >> 2); j++) {
+				*((uint32_t *)(buf)) =
+					readl(MLC_BUFF(host->io_base));
+				buf += 4;
+			}
+		}
+		for (j = 0; j < (16 >> 2); j++) {
+			*((uint32_t *)(oobbuf)) =
+				readl(MLC_BUFF(host->io_base));
+			oobbuf += 4;
+		}
+	}
+
+	if (use_dma && !dma_mapped)
+		memcpy(buf, dma_buf, mtd->writesize);
+
+	return 0;
+}
+
+static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
+				       struct nand_chip *chip,
+				       const uint8_t *buf, int oob_required)
+{
+	struct lpc32xx_nand_host *host = chip->priv;
+	const uint8_t *oobbuf = chip->oob_poi;
+	uint8_t *dma_buf = (uint8_t *)buf;
+	int res;
+	int i, j;
+
+	if (use_dma && (void *)buf >= high_memory) {
+		dma_buf = host->dma_buf;
+		memcpy(dma_buf, buf, mtd->writesize);
+	}
+
+	for (i = 0; i < host->mlcsubpages; i++) {
+		/* Start Encode */
+		writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
+
+		/* Write 512 + 6 Bytes to Buffer */
+		if (use_dma) {
+			res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+					       DMA_MEM_TO_DEV);
+			if (res)
+				return res;
+		} else {
+			for (j = 0; j < (512 >> 2); j++) {
+				writel(*((uint32_t *)(buf)),
+				       MLC_BUFF(host->io_base));
+				buf += 4;
+			}
+		}
+		writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
+		oobbuf += 4;
+		writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
+		oobbuf += 12;
+
+		/* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
+		writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
+
+		/* Wait for Controller Ready */
+		lpc32xx_waitfunc_controller(mtd, chip);
+	}
+	return 0;
+}
+
+static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			      const uint8_t *buf, int oob_required, int page,
+			      int cached, int raw)
+{
+	int res;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+	res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+	lpc32xx_waitfunc(mtd, chip);
+
+	return res;
+}
+
+static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			    int page)
+{
+	struct lpc32xx_nand_host *host = chip->priv;
+
+	/* Read whole page - necessary with MLC controller! */
+	lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
+
+	return 0;
+}
+
+static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			      int page)
+{
+	/* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
+	return 0;
+}
+
+/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
+static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
+{
+	/* Always enabled! */
+}
+
+static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
+{
+	struct mtd_info *mtd = &host->mtd;
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	host->dma_chan = dma_request_channel(mask, pl08x_filter_id, "nand-mlc");
+	if (!host->dma_chan) {
+		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * Set direction to a sensible value even if the dmaengine driver
+	 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
+	 * driver criticizes it as "alien transfer direction".
+	 */
+	host->dma_slave_config.direction = DMA_DEV_TO_MEM;
+	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.src_maxburst = 128;
+	host->dma_slave_config.dst_maxburst = 128;
+	/* DMA controller does flow control: */
+	host->dma_slave_config.device_fc = false;
+	host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
+	host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
+	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+		goto out1;
+	}
+
+	return 0;
+out1:
+	dma_release_channel(host->dma_chan);
+	return -ENXIO;
+}
+
+#ifdef CONFIG_OF
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+	struct lpc32xx_nand_cfg_mlc *pdata;
+	struct device_node *np = dev->of_node;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(dev, "could not allocate memory for platform data\n");
+		return NULL;
+	}
+
+	of_property_read_u32(np, "nxp,tcea-delay", &pdata->tcea_delay);
+	of_property_read_u32(np, "nxp,busy-delay", &pdata->busy_delay);
+	of_property_read_u32(np, "nxp,nand-ta", &pdata->nand_ta);
+	of_property_read_u32(np, "nxp,rd-high", &pdata->rd_high);
+	of_property_read_u32(np, "nxp,rd-low", &pdata->rd_low);
+	of_property_read_u32(np, "nxp,wr-high", &pdata->wr_high);
+	of_property_read_u32(np, "nxp,wr-low", &pdata->wr_low);
+
+	if (!pdata->tcea_delay || !pdata->busy_delay || !pdata->nand_ta ||
+	    !pdata->rd_high || !pdata->rd_low || !pdata->wr_high ||
+	    !pdata->wr_low) {
+		dev_err(dev, "chip parameters not specified correctly\n");
+		return NULL;
+	}
+
+	pdata->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+	return pdata;
+}
+#else
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
+/*
+ * Probe for NAND controller
+ */
+static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host;
+	struct mtd_info *mtd;
+	struct nand_chip *nand_chip;
+	struct resource *rc;
+	int res;
+	struct mtd_part_parser_data ppdata = {};
+
+	/* Allocate memory for the device structure (and zero it) */
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host) {
+		dev_err(&pdev->dev, "failed to allocate device structure.\n");
+		return -ENOMEM;
+	}
+
+	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (rc == NULL) {
+		dev_err(&pdev->dev, "No memory resource found for device!\r\n");
+		return -ENXIO;
+	}
+
+	host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
+	if (host->io_base == NULL) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		return -EIO;
+	}
+	host->io_base_phy = rc->start;
+
+	mtd = &host->mtd;
+	nand_chip = &host->nand_chip;
+	if (pdev->dev.of_node)
+		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+	else
+		host->ncfg = pdev->dev.platform_data;
+	if (!host->ncfg) {
+		dev_err(&pdev->dev, "Missing platform data\n");
+		return -ENOENT;
+	}
+	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	if (gpio_is_valid(host->ncfg->wp_gpio) &&
+			gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+		dev_err(&pdev->dev, "GPIO not available\n");
+		return -EBUSY;
+	}
+	lpc32xx_wp_disable(host);
+
+	nand_chip->priv = host;		/* link the private data structures */
+	mtd->priv = nand_chip;
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = &pdev->dev;
+
+	/* Get NAND clock */
+	host->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host->clk)) {
+		dev_err(&pdev->dev, "Clock initialization failure\n");
+		res = -ENOENT;
+		goto err_exit1;
+	}
+	clk_enable(host->clk);
+
+	nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+	nand_chip->dev_ready = lpc32xx_nand_device_ready;
+	nand_chip->chip_delay = 25; /* us */
+	nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
+	nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
+
+	/* Init NAND controller */
+	lpc32xx_nand_setup(host);
+
+	platform_set_drvdata(pdev, host);
+
+	/* Initialize function pointers */
+	nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
+	nand_chip->ecc.read_page_raw = lpc32xx_read_page;
+	nand_chip->ecc.read_page = lpc32xx_read_page;
+	nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+	nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+	nand_chip->ecc.write_oob = lpc32xx_write_oob;
+	nand_chip->ecc.read_oob = lpc32xx_read_oob;
+	nand_chip->ecc.strength = 4;
+	nand_chip->write_page = lpc32xx_write_page;
+	nand_chip->waitfunc = lpc32xx_waitfunc;
+
+	nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+	nand_chip->bbt_td = &lpc32xx_nand_bbt;
+	nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
+
+	/* bitflip_threshold's default is defined as ecc_strength anyway.
+	 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
+	 * being 0, it causes bad block table scanning errors in
+	 * nand_scan_tail(), so preparing it here. */
+	mtd->bitflip_threshold = nand_chip->ecc.strength;
+
+	if (use_dma) {
+		res = lpc32xx_dma_setup(host);
+		if (res) {
+			res = -EIO;
+			goto err_exit2;
+		}
+	}
+
+	/*
+	 * Scan to find existance of the device and
+	 * Get the type of NAND device SMALL block or LARGE block
+	 */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+	if (!host->dma_buf) {
+		dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
+		res = -ENOMEM;
+		goto err_exit3;
+	}
+
+	host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+	if (!host->dummy_buf) {
+		dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
+		res = -ENOMEM;
+		goto err_exit3;
+	}
+
+	nand_chip->ecc.mode = NAND_ECC_HW;
+	nand_chip->ecc.size = mtd->writesize;
+	nand_chip->ecc.layout = &lpc32xx_nand_oob;
+	host->mlcsubpages = mtd->writesize / 512;
+
+	/* initially clear interrupt status */
+	readb(MLC_IRQ_SR(host->io_base));
+
+	init_completion(&host->comp_nand);
+	init_completion(&host->comp_controller);
+
+	host->irq = platform_get_irq(pdev, 0);
+	if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+		dev_err(&pdev->dev, "failed to get platform irq\n");
+		res = -EINVAL;
+		goto err_exit3;
+	}
+
+	if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+			IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+		dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	/*
+	 * Fills out all the uninitialized function pointers with the defaults
+	 * And scans for a bad block table if appropriate.
+	 */
+	if (nand_scan_tail(mtd)) {
+		res = -ENXIO;
+		goto err_exit4;
+	}
+
+	mtd->name = DRV_NAME;
+
+	ppdata.of_node = pdev->dev.of_node;
+	res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+					host->ncfg->num_parts);
+	if (!res)
+		return res;
+
+	nand_release(mtd);
+
+err_exit4:
+	free_irq(host->irq, host);
+err_exit3:
+	if (use_dma)
+		dma_release_channel(host->dma_chan);
+err_exit2:
+	clk_disable(host->clk);
+	clk_put(host->clk);
+	platform_set_drvdata(pdev, NULL);
+err_exit1:
+	lpc32xx_wp_enable(host);
+	gpio_free(host->ncfg->wp_gpio);
+
+	return res;
+}
+
+/*
+ * Remove NAND device
+ */
+static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = &host->mtd;
+
+	nand_release(mtd);
+	free_irq(host->irq, host);
+	if (use_dma)
+		dma_release_channel(host->dma_chan);
+
+	clk_disable(host->clk);
+	clk_put(host->clk);
+	platform_set_drvdata(pdev, NULL);
+
+	lpc32xx_wp_enable(host);
+	gpio_free(host->ncfg->wp_gpio);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Re-enable NAND clock */
+	clk_enable(host->clk);
+
+	/* Fresh init of NAND controller */
+	lpc32xx_nand_setup(host);
+
+	/* Disable write protect */
+	lpc32xx_wp_disable(host);
+
+	return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Enable write protect for safety */
+	lpc32xx_wp_enable(host);
+
+	/* Disable clock */
+	clk_disable(host->clk);
+	return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+#if defined(CONFIG_OF)
+static const struct of_device_id lpc32xx_nand_match[] = {
+	{ .compatible = "nxp,lpc3220-mlc" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+#endif
+
+static struct platform_driver lpc32xx_nand_driver = {
+	.probe		= lpc32xx_nand_probe,
+	.remove		= __devexit_p(lpc32xx_nand_remove),
+	.resume		= lpc32xx_nand_resume,
+	.suspend	= lpc32xx_nand_suspend,
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(lpc32xx_nand_match),
+	},
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
new file mode 100644
index 0000000..c8c1d06
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -0,0 +1,1059 @@
+/*
+ * NXP LPC32XX NAND SLC driver
+ *
+ * Authors:
+ *    Kevin Wells <kevin.wells@nxp.com>
+ *    Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 NXP Semiconductors
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/amba/pl08x.h>
+
+#define LPC32XX_MODNAME		"lpc32xx-nand"
+
+/**********************************************************************
+* SLC NAND controller register offsets
+**********************************************************************/
+
+#define SLC_DATA(x)		(x + 0x000)
+#define SLC_ADDR(x)		(x + 0x004)
+#define SLC_CMD(x)		(x + 0x008)
+#define SLC_STOP(x)		(x + 0x00C)
+#define SLC_CTRL(x)		(x + 0x010)
+#define SLC_CFG(x)		(x + 0x014)
+#define SLC_STAT(x)		(x + 0x018)
+#define SLC_INT_STAT(x)		(x + 0x01C)
+#define SLC_IEN(x)		(x + 0x020)
+#define SLC_ISR(x)		(x + 0x024)
+#define SLC_ICR(x)		(x + 0x028)
+#define SLC_TAC(x)		(x + 0x02C)
+#define SLC_TC(x)		(x + 0x030)
+#define SLC_ECC(x)		(x + 0x034)
+#define SLC_DMA_DATA(x)		(x + 0x038)
+
+/**********************************************************************
+* slc_ctrl register definitions
+**********************************************************************/
+#define SLCCTRL_SW_RESET	(1 << 2) /* Reset the NAND controller bit */
+#define SLCCTRL_ECC_CLEAR	(1 << 1) /* Reset ECC bit */
+#define SLCCTRL_DMA_START	(1 << 0) /* Start DMA channel bit */
+
+/**********************************************************************
+* slc_cfg register definitions
+**********************************************************************/
+#define SLCCFG_CE_LOW		(1 << 5) /* Force CE low bit */
+#define SLCCFG_DMA_ECC		(1 << 4) /* Enable DMA ECC bit */
+#define SLCCFG_ECC_EN		(1 << 3) /* ECC enable bit */
+#define SLCCFG_DMA_BURST	(1 << 2) /* DMA burst bit */
+#define SLCCFG_DMA_DIR		(1 << 1) /* DMA write(0)/read(1) bit */
+#define SLCCFG_WIDTH		(1 << 0) /* External device width, 0=8bit */
+
+/**********************************************************************
+* slc_stat register definitions
+**********************************************************************/
+#define SLCSTAT_DMA_FIFO	(1 << 2) /* DMA FIFO has data bit */
+#define SLCSTAT_SLC_FIFO	(1 << 1) /* SLC FIFO has data bit */
+#define SLCSTAT_NAND_READY	(1 << 0) /* NAND device is ready bit */
+
+/**********************************************************************
+* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
+**********************************************************************/
+#define SLCSTAT_INT_TC		(1 << 1) /* Transfer count bit */
+#define SLCSTAT_INT_RDY_EN	(1 << 0) /* Ready interrupt bit */
+
+/**********************************************************************
+* slc_tac register definitions
+**********************************************************************/
+/* Clock setting for RDY write sample wait time in 2*n clocks */
+#define SLCTAC_WDR(n)		(((n) & 0xF) << 28)
+/* Write pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_WWIDTH(n)	(((n) & 0xF) << 24)
+/* Write hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WHOLD(n)		(((n) & 0xF) << 20)
+/* Write setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WSETUP(n)	(((n) & 0xF) << 16)
+/* Clock setting for RDY read sample wait time in 2*n clocks */
+#define SLCTAC_RDR(n)		(((n) & 0xF) << 12)
+/* Read pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_RWIDTH(n)	(((n) & 0xF) << 8)
+/* Read hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RHOLD(n)		(((n) & 0xF) << 4)
+/* Read setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RSETUP(n)	(((n) & 0xF) << 0)
+
+/**********************************************************************
+* slc_ecc register definitions
+**********************************************************************/
+/* ECC line party fetch macro */
+#define SLCECC_TO_LINEPAR(n)	(((n) >> 6) & 0x7FFF)
+#define SLCECC_TO_COLPAR(n)	((n) & 0x3F)
+
+/*
+ * DMA requires storage space for the DMA local buffer and the hardware ECC
+ * storage area. The DMA local buffer is only used if DMA mapping fails
+ * during runtime.
+ */
+#define LPC32XX_DMA_DATA_SIZE		4096
+#define LPC32XX_ECC_SAVE_SIZE		((4096 / 256) * 4)
+
+/* Number of bytes used for ECC stored in NAND per 256 bytes */
+#define LPC32XX_SLC_DEV_ECC_BYTES	3
+
+/*
+ * If the NAND base clock frequency can't be fetched, this frequency will be
+ * used instead as the base. This rate is used to setup the timing registers
+ * used for NAND accesses.
+ */
+#define LPC32XX_DEF_BUS_RATE		133250000
+
+/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
+#define LPC32XX_DMA_TIMEOUT		100
+
+/*
+ * NAND ECC Layout for small page NAND devices
+ * Note: For large and huge page devices, the default layouts are used
+ */
+static struct nand_ecclayout lpc32xx_nand_oob_16 = {
+	.eccbytes = 6,
+	.eccpos = {10, 11, 12, 13, 14, 15},
+	.oobfree = {
+		{ .offset = 0, .length = 4 },
+		{ .offset = 6, .length = 4 },
+	},
+};
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
+ * Note: Large page devices used the default layout
+ */
+static struct nand_bbt_descr bbt_smallpage_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	0,
+	.len = 4,
+	.veroffs = 6,
+	.maxblocks = 4,
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	0,
+	.len = 4,
+	.veroffs = 6,
+	.maxblocks = 4,
+	.pattern = mirror_pattern
+};
+
+/*
+ * NAND platform configuration structure
+ */
+struct lpc32xx_nand_cfg_slc {
+	uint32_t wdr_clks;
+	uint32_t wwidth;
+	uint32_t whold;
+	uint32_t wsetup;
+	uint32_t rdr_clks;
+	uint32_t rwidth;
+	uint32_t rhold;
+	uint32_t rsetup;
+	bool use_bbt;
+	int wp_gpio;
+	struct mtd_partition *parts;
+	unsigned num_parts;
+};
+
+struct lpc32xx_nand_host {
+	struct nand_chip	nand_chip;
+	struct clk		*clk;
+	struct mtd_info		mtd;
+	void __iomem		*io_base;
+	struct lpc32xx_nand_cfg_slc *ncfg;
+
+	struct completion	comp;
+	struct dma_chan		*dma_chan;
+	uint32_t		dma_buf_len;
+	struct dma_slave_config	dma_slave_config;
+	struct scatterlist	sgl;
+
+	/*
+	 * DMA and CPU addresses of ECC work area and data buffer
+	 */
+	uint32_t		*ecc_buf;
+	uint8_t			*data_buf;
+	dma_addr_t		io_base_dma;
+};
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+	uint32_t clkrate, tmp;
+
+	/* Reset SLC controller */
+	writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
+	udelay(1000);
+
+	/* Basic setup */
+	writel(0, SLC_CFG(host->io_base));
+	writel(0, SLC_IEN(host->io_base));
+	writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
+		SLC_ICR(host->io_base));
+
+	/* Get base clock for SLC block */
+	clkrate = clk_get_rate(host->clk);
+	if (clkrate == 0)
+		clkrate = LPC32XX_DEF_BUS_RATE;
+
+	/* Compute clock setup values */
+	tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
+		SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
+		SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
+		SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
+		SLCTAC_RDR(host->ncfg->rdr_clks) |
+		SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
+		SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
+		SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
+	writel(tmp, SLC_TAC(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+	unsigned int ctrl)
+{
+	uint32_t tmp;
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+
+	/* Does CE state need to be changed? */
+	tmp = readl(SLC_CFG(host->io_base));
+	if (ctrl & NAND_NCE)
+		tmp |= SLCCFG_CE_LOW;
+	else
+		tmp &= ~SLCCFG_CE_LOW;
+	writel(tmp, SLC_CFG(host->io_base));
+
+	if (cmd != NAND_CMD_NONE) {
+		if (ctrl & NAND_CLE)
+			writel(cmd, SLC_CMD(host->io_base));
+		else
+			writel(cmd, SLC_ADDR(host->io_base));
+	}
+}
+
+/*
+ * Read the Device Ready pin
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+	int rdy = 0;
+
+	if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
+		rdy = 1;
+
+	return rdy;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+/*
+ * Prepares SLC for transfers with H/W ECC enabled
+ */
+static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
+{
+	/* Hardware ECC is enabled automatically in hardware as needed */
+}
+
+/*
+ * Calculates the ECC for the data
+ */
+static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
+				      const unsigned char *buf,
+				      unsigned char *code)
+{
+	/*
+	 * ECC is calculated automatically in hardware during syndrome read
+	 * and write operations, so it doesn't need to be calculated here.
+	 */
+	return 0;
+}
+
+/*
+ * Read a single byte from NAND device
+ */
+static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+
+	return (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device read without ECC
+ */
+static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+
+	/* Direct device read with no ECC */
+	while (len-- > 0)
+		*buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device write without ECC
+ */
+static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+
+	/* Direct device write with no ECC */
+	while (len-- > 0)
+		writel((uint32_t)*buf++, SLC_DATA(host->io_base));
+}
+
+/*
+ * Verify data in buffer to data on device
+ */
+static int lpc32xx_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+	int i;
+
+	/* DATA register must be read as 32 bits or it will fail */
+	for (i = 0; i < len; i++) {
+		if (buf[i] != (uint8_t)readl(SLC_DATA(host->io_base)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * Read the OOB data from the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
+					  struct nand_chip *chip, int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/*
+ * Write the OOB data to the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
+	struct nand_chip *chip, int page)
+{
+	int status;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/* Send command to program the OOB data */
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
+ */
+static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
+{
+	int i;
+
+	for (i = 0; i < (count * 3); i += 3) {
+		uint32_t ce = ecc[i / 3];
+		ce = ~(ce << 2) & 0xFFFFFF;
+		spare[i + 2] = (uint8_t)(ce & 0xFF);
+		ce >>= 8;
+		spare[i + 1] = (uint8_t)(ce & 0xFF);
+		ce >>= 8;
+		spare[i] = (uint8_t)(ce & 0xFF);
+	}
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+	complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
+			    void *mem, int len, enum dma_transfer_direction dir)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+	struct dma_async_tx_descriptor *desc;
+	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	int res;
+
+	host->dma_slave_config.direction = dir;
+	host->dma_slave_config.src_addr = dma;
+	host->dma_slave_config.dst_addr = dma;
+	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.src_maxburst = 4;
+	host->dma_slave_config.dst_maxburst = 4;
+	/* DMA controller does flow control: */
+	host->dma_slave_config.device_fc = false;
+	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+		return -ENXIO;
+	}
+
+	sg_init_one(&host->sgl, mem, len);
+
+	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+			 DMA_BIDIRECTIONAL);
+	if (res != 1) {
+		dev_err(mtd->dev.parent, "Failed to map sg list\n");
+		return -ENXIO;
+	}
+	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+				       flags);
+	if (!desc) {
+		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+		goto out1;
+	}
+
+	init_completion(&host->comp);
+	desc->callback = lpc32xx_dma_complete_func;
+	desc->callback_param = &host->comp;
+
+	dmaengine_submit(desc);
+	dma_async_issue_pending(host->dma_chan);
+
+	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
+
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+
+	return 0;
+out1:
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+	return -ENXIO;
+}
+
+/*
+ * DMA read/write transfers with ECC support
+ */
+static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
+			int read)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct lpc32xx_nand_host *host = chip->priv;
+	int i, status = 0;
+	unsigned long timeout;
+	int res;
+	enum dma_transfer_direction dir =
+		read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+	uint8_t *dma_buf;
+	bool dma_mapped;
+
+	if ((void *)buf <= high_memory) {
+		dma_buf = buf;
+		dma_mapped = true;
+	} else {
+		dma_buf = host->data_buf;
+		dma_mapped = false;
+		if (!read)
+			memcpy(host->data_buf, buf, mtd->writesize);
+	}
+
+	if (read) {
+		writel(readl(SLC_CFG(host->io_base)) |
+		       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+		       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
+	} else {
+		writel((readl(SLC_CFG(host->io_base)) |
+			SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
+		       ~SLCCFG_DMA_DIR,
+			SLC_CFG(host->io_base));
+	}
+
+	/* Clear initial ECC */
+	writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
+
+	/* Transfer size is data area only */
+	writel(mtd->writesize, SLC_TC(host->io_base));
+
+	/* Start transfer in the NAND controller */
+	writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
+	       SLC_CTRL(host->io_base));
+
+	for (i = 0; i < chip->ecc.steps; i++) {
+		/* Data */
+		res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
+				       dma_buf + i * chip->ecc.size,
+				       mtd->writesize / chip->ecc.steps, dir);
+		if (res)
+			return res;
+
+		/* Always _read_ ECC */
+		if (i == chip->ecc.steps - 1)
+			break;
+		if (!read) /* ECC availability delayed on write */
+			udelay(10);
+		res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
+				       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
+		if (res)
+			return res;
+	}
+
+	/*
+	 * According to NXP, the DMA can be finished here, but the NAND
+	 * controller may still have buffered data. After porting to using the
+	 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
+	 * appears to be always true, according to tests. Keeping the check for
+	 * safety reasons for now.
+	 */
+	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
+		dev_warn(mtd->dev.parent, "FIFO not empty!\n");
+		timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
+		while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
+		       time_before(jiffies, timeout))
+			cpu_relax();
+		if (!time_before(jiffies, timeout)) {
+			dev_err(mtd->dev.parent, "FIFO held data too long\n");
+			status = -EIO;
+		}
+	}
+
+	/* Read last calculated ECC value */
+	if (!read)
+		udelay(10);
+	host->ecc_buf[chip->ecc.steps - 1] =
+		readl(SLC_ECC(host->io_base));
+
+	/* Flush DMA */
+	dmaengine_terminate_all(host->dma_chan);
+
+	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
+	    readl(SLC_TC(host->io_base))) {
+		/* Something is left in the FIFO, something is wrong */
+		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
+		status = -EIO;
+	}
+
+	/* Stop DMA & HW ECC */
+	writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
+	       SLC_CTRL(host->io_base));
+	writel(readl(SLC_CFG(host->io_base)) &
+	       ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+		 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
+
+	if (!dma_mapped && read)
+		memcpy(buf, host->data_buf, mtd->writesize);
+
+	return status;
+}
+
+/*
+ * Read the data and OOB data from the device, use ECC correction with the
+ * data, disable ECC for the OOB data
+ */
+static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
+					   struct nand_chip *chip, uint8_t *buf,
+					   int oob_required, int page)
+{
+	struct lpc32xx_nand_host *host = chip->priv;
+	int stat, i, status;
+	uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
+
+	/* Issue read command */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	/* Read data and oob, calculate ECC */
+	status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
+
+	/* Get OOB data */
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/* Convert to stored ECC format */
+	lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
+
+	/* Pointer to ECC data retrieved from NAND spare area */
+	oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
+
+	for (i = 0; i < chip->ecc.steps; i++) {
+		stat = chip->ecc.correct(mtd, buf, oobecc,
+					 &tmpecc[i * chip->ecc.bytes]);
+		if (stat < 0)
+			mtd->ecc_stats.failed++;
+		else
+			mtd->ecc_stats.corrected += stat;
+
+		buf += chip->ecc.size;
+		oobecc += chip->ecc.bytes;
+	}
+
+	return status;
+}
+
+/*
+ * Read the data and OOB data from the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
+					       struct nand_chip *chip,
+					       uint8_t *buf, int oob_required,
+					       int page)
+{
+	/* Issue read command */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	/* Raw reads can just use the FIFO interface */
+	chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, use ECC with the data,
+ * disable ECC for the OOB data
+ */
+static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
+					    struct nand_chip *chip,
+					    const uint8_t *buf, int oob_required)
+{
+	struct lpc32xx_nand_host *host = chip->priv;
+	uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
+	int error;
+
+	/* Write data, calculate ECC on outbound data */
+	error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
+	if (error)
+		return error;
+
+	/*
+	 * The calculated ECC needs some manual work done to it before
+	 * committing it to NAND. Process the calculated ECC and place
+	 * the resultant values directly into the OOB buffer. */
+	lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
+
+	/* Write ECC data to device */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
+						struct nand_chip *chip,
+						const uint8_t *buf,
+						int oob_required)
+{
+	/* Raw writes can just use the FIFO interface */
+	chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
+{
+	struct mtd_info *mtd = &host->mtd;
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	host->dma_chan = dma_request_channel(mask, pl08x_filter_id, "nand-slc");
+	if (!host->dma_chan) {
+		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+	struct lpc32xx_nand_cfg_slc *pdata;
+	struct device_node *np = dev->of_node;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(dev, "could not allocate memory for platform data\n");
+		return NULL;
+	}
+
+	of_property_read_u32(np, "nxp,wdr-clks", &pdata->wdr_clks);
+	of_property_read_u32(np, "nxp,wwidth", &pdata->wwidth);
+	of_property_read_u32(np, "nxp,whold", &pdata->whold);
+	of_property_read_u32(np, "nxp,wsetup", &pdata->wsetup);
+	of_property_read_u32(np, "nxp,rdr-clks", &pdata->rdr_clks);
+	of_property_read_u32(np, "nxp,rwidth", &pdata->rwidth);
+	of_property_read_u32(np, "nxp,rhold", &pdata->rhold);
+	of_property_read_u32(np, "nxp,rsetup", &pdata->rsetup);
+
+	if (!pdata->wdr_clks || !pdata->wwidth || !pdata->whold ||
+	    !pdata->wsetup || !pdata->rdr_clks || !pdata->rwidth ||
+	    !pdata->rhold || !pdata->rsetup) {
+		dev_err(dev, "chip parameters not specified correctly\n");
+		return NULL;
+	}
+
+	pdata->use_bbt = of_get_nand_on_flash_bbt(np);
+	pdata->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+	return pdata;
+}
+#else
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
+/*
+ * Probe for NAND controller
+ */
+static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host;
+	struct mtd_info *mtd;
+	struct nand_chip *chip;
+	struct resource *rc;
+	struct mtd_part_parser_data ppdata = {};
+	int res;
+
+	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (rc == NULL) {
+		dev_err(&pdev->dev, "No memory resource found for device\n");
+		return -EBUSY;
+	}
+
+	/* Allocate memory for the device structure (and zero it) */
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host) {
+		dev_err(&pdev->dev, "failed to allocate device structure\n");
+		return -ENOMEM;
+	}
+	host->io_base_dma = rc->start;
+
+	host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
+	if (host->io_base == NULL) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	if (pdev->dev.of_node)
+		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+	else
+		host->ncfg = pdev->dev.platform_data;
+	if (!host->ncfg) {
+		dev_err(&pdev->dev, "Missing platform data\n");
+		return -ENOENT;
+	}
+	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	if (gpio_is_valid(host->ncfg->wp_gpio) &&
+			gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+		dev_err(&pdev->dev, "GPIO not available\n");
+		return -EBUSY;
+	}
+	lpc32xx_wp_disable(host);
+
+	mtd = &host->mtd;
+	chip = &host->nand_chip;
+	chip->priv = host;
+	mtd->priv = chip;
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = &pdev->dev;
+
+	/* Get NAND clock */
+	host->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host->clk)) {
+		dev_err(&pdev->dev, "Clock failure\n");
+		res = -ENOENT;
+		goto err_exit1;
+	}
+	clk_enable(host->clk);
+
+	/* Set NAND IO addresses and command/ready functions */
+	chip->IO_ADDR_R = SLC_DATA(host->io_base);
+	chip->IO_ADDR_W = SLC_DATA(host->io_base);
+	chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+	chip->dev_ready = lpc32xx_nand_device_ready;
+	chip->chip_delay = 20; /* 20us command delay time */
+
+	/* Init NAND controller */
+	lpc32xx_nand_setup(host);
+
+	platform_set_drvdata(pdev, host);
+
+	/* NAND callbacks for LPC32xx SLC hardware */
+	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+	chip->read_byte = lpc32xx_nand_read_byte;
+	chip->read_buf = lpc32xx_nand_read_buf;
+	chip->write_buf = lpc32xx_nand_write_buf;
+	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+	chip->ecc.correct = nand_correct_data;
+	chip->ecc.strength = 1;
+	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+	chip->verify_buf = lpc32xx_verify_buf;
+
+	/* bitflip_threshold's default is defined as ecc_strength anyway.
+	 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
+	 * being 0, it causes bad block table scanning errors in
+	 * nand_scan_tail(), so preparing it here already. */
+	mtd->bitflip_threshold = chip->ecc.strength;
+
+	/*
+	 * Allocate a large enough buffer for a single huge page plus
+	 * extra space for the spare area and ECC storage area
+	 */
+	host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
+	host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
+				      GFP_KERNEL);
+	if (host->data_buf == NULL) {
+		dev_err(&pdev->dev, "Error allocating memory\n");
+		res = -ENOMEM;
+		goto err_exit2;
+	}
+
+	res = lpc32xx_nand_dma_setup(host);
+	if (res) {
+		res = -EIO;
+		goto err_exit2;
+	}
+
+	/* Find NAND device */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	/* OOB and ECC CPU and DMA work areas */
+	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+	/*
+	 * Small page FLASH has a unique OOB layout, but large and huge
+	 * page FLASH use the standard layout. Small page FLASH uses a
+	 * custom BBT marker layout.
+	 */
+	if (mtd->writesize <= 512)
+		chip->ecc.layout = &lpc32xx_nand_oob_16;
+
+	/* These sizes remain the same regardless of page size */
+	chip->ecc.size = 256;
+	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+	chip->ecc.prepad = chip->ecc.postpad = 0;
+
+	/* Avoid extra scan if using BBT, setup BBT support */
+	if (host->ncfg->use_bbt) {
+		chip->options |= NAND_SKIP_BBTSCAN;
+		chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+		/*
+		 * Use a custom BBT marker setup for small page FLASH that
+		 * won't interfere with the ECC layout. Large and huge page
+		 * FLASH use the standard layout.
+		 */
+		if (mtd->writesize <= 512) {
+			chip->bbt_td = &bbt_smallpage_main_descr;
+			chip->bbt_md = &bbt_smallpage_mirror_descr;
+		}
+	}
+
+	/*
+	 * Fills out all the uninitialized function pointers with the defaults
+	 */
+	if (nand_scan_tail(mtd)) {
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	/* Standard layout in FLASH for bad block tables */
+	if (host->ncfg->use_bbt) {
+		if (nand_default_bbt(mtd) < 0)
+			dev_err(&pdev->dev,
+			       "Error initializing default bad block tables\n");
+	}
+
+	mtd->name = "nxp_lpc3220_slc";
+	ppdata.of_node = pdev->dev.of_node;
+	res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+					host->ncfg->num_parts);
+	if (!res)
+		return res;
+
+	nand_release(mtd);
+
+err_exit3:
+	dma_release_channel(host->dma_chan);
+err_exit2:
+	clk_disable(host->clk);
+	clk_put(host->clk);
+	platform_set_drvdata(pdev, NULL);
+err_exit1:
+	lpc32xx_wp_enable(host);
+	gpio_free(host->ncfg->wp_gpio);
+
+	return res;
+}
+
+/*
+ * Remove NAND device.
+ */
+static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+{
+	uint32_t tmp;
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = &host->mtd;
+
+	nand_release(mtd);
+	dma_release_channel(host->dma_chan);
+
+	/* Force CE high */
+	tmp = readl(SLC_CTRL(host->io_base));
+	tmp &= ~SLCCFG_CE_LOW;
+	writel(tmp, SLC_CTRL(host->io_base));
+
+	clk_disable(host->clk);
+	clk_put(host->clk);
+	platform_set_drvdata(pdev, NULL);
+	lpc32xx_wp_enable(host);
+	gpio_free(host->ncfg->wp_gpio);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Re-enable NAND clock */
+	clk_enable(host->clk);
+
+	/* Fresh init of NAND controller */
+	lpc32xx_nand_setup(host);
+
+	/* Disable write protect */
+	lpc32xx_wp_disable(host);
+
+	return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+	uint32_t tmp;
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Force CE high */
+	tmp = readl(SLC_CTRL(host->io_base));
+	tmp &= ~SLCCFG_CE_LOW;
+	writel(tmp, SLC_CTRL(host->io_base));
+
+	/* Enable write protect for safety */
+	lpc32xx_wp_enable(host);
+
+	/* Disable clock */
+	clk_disable(host->clk);
+
+	return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+#if defined(CONFIG_OF)
+static const struct of_device_id lpc32xx_nand_match[] = {
+	{ .compatible = "nxp,lpc3220-slc" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+#endif
+
+static struct platform_driver lpc32xx_nand_driver = {
+	.probe		= lpc32xx_nand_probe,
+	.remove		= __devexit_p(lpc32xx_nand_remove),
+	.resume		= lpc32xx_nand_resume,
+	.suspend	= lpc32xx_nand_suspend,
+	.driver		= {
+		.name	= LPC32XX_MODNAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(lpc32xx_nand_match),
+	},
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 6acc790..3f94e1f 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -43,8 +43,8 @@
 
 #define nfc_is_v21()		(cpu_is_mx25() || cpu_is_mx35())
 #define nfc_is_v1()		(cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
-#define nfc_is_v3_2()		(cpu_is_mx51() || cpu_is_mx53())
-#define nfc_is_v3()		nfc_is_v3_2()
+#define nfc_is_v3_2a()		cpu_is_mx51()
+#define nfc_is_v3_2b()		cpu_is_mx53()
 
 /* Addresses for NFC registers */
 #define NFC_V1_V2_BUF_SIZE		(host->regs + 0x00)
@@ -122,7 +122,7 @@
 #define NFC_V3_CONFIG2_2CMD_PHASES		(1 << 4)
 #define NFC_V3_CONFIG2_NUM_ADDR_PHASE0		(1 << 5)
 #define NFC_V3_CONFIG2_ECC_MODE_8		(1 << 6)
-#define NFC_V3_CONFIG2_PPB(x)			(((x) & 0x3) << 7)
+#define NFC_V3_CONFIG2_PPB(x, shift)		(((x) & 0x3) << shift)
 #define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x)	(((x) & 0x3) << 12)
 #define NFC_V3_CONFIG2_INT_MSK			(1 << 15)
 #define NFC_V3_CONFIG2_ST_CMD(x)		(((x) & 0xff) << 24)
@@ -174,6 +174,7 @@
 	int spare_len;
 	int eccbytes;
 	int eccsize;
+	int ppb_shift;
 };
 
 struct mxc_nand_host {
@@ -784,7 +785,7 @@
 	if (chip == -1) {
 		/* Disable the NFC clock */
 		if (host->clk_act) {
-			clk_disable(host->clk);
+			clk_disable_unprepare(host->clk);
 			host->clk_act = 0;
 		}
 		return;
@@ -792,7 +793,7 @@
 
 	if (!host->clk_act) {
 		/* Enable the NFC clock */
-		clk_enable(host->clk);
+		clk_prepare_enable(host->clk);
 		host->clk_act = 1;
 	}
 
@@ -1021,7 +1022,9 @@
 	}
 
 	if (mtd->writesize) {
-		config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6);
+		config2 |= NFC_V3_CONFIG2_PPB(
+				ffs(mtd->erasesize / mtd->writesize) - 6,
+				host->devtype_data->ppb_shift);
 		host->eccsize = get_eccsize(mtd);
 		if (host->eccsize == 8)
 			config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
@@ -1234,7 +1237,7 @@
 	.eccsize = 0,
 };
 
-/* v3: i.MX51, i.MX53 */
+/* v3.2a: i.MX51 */
 static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
 	.preset = preset_v3,
 	.send_cmd = send_cmd_v3,
@@ -1258,6 +1261,34 @@
 	.spare_len = 64,
 	.eccbytes = 0,
 	.eccsize = 0,
+	.ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+	.preset = preset_v3,
+	.send_cmd = send_cmd_v3,
+	.send_addr = send_addr_v3,
+	.send_page = send_page_v3,
+	.send_read_id = send_read_id_v3,
+	.get_dev_status = get_dev_status_v3,
+	.check_int = check_int_v3,
+	.irq_control = irq_control_v3,
+	.get_ecc_status = get_ecc_status_v3,
+	.ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+	.ecclayout_2k = &nandv2_hw_eccoob_largepage,
+	.ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v2_v3,
+	.irqpending_quirk = 0,
+	.needs_ip = 1,
+	.regs_offset = 0,
+	.spare0_offset = 0x1000,
+	.axi_offset = 0x1e00,
+	.spare_len = 64,
+	.eccbytes = 0,
+	.eccsize = 0,
+	.ppb_shift = 8,
 };
 
 #ifdef CONFIG_OF_MTD
@@ -1274,6 +1305,9 @@
 	}, {
 		.compatible = "fsl,imx51-nand",
 		.data = &imx51_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx53-nand",
+		.data = &imx53_nand_devtype_data,
 	},
 	{ /* sentinel */ }
 };
@@ -1327,8 +1361,10 @@
 			host->devtype_data = &imx27_nand_devtype_data;
 	} else if (nfc_is_v21()) {
 		host->devtype_data = &imx25_nand_devtype_data;
-	} else if (nfc_is_v3_2()) {
+	} else if (nfc_is_v3_2a()) {
 		host->devtype_data = &imx51_nand_devtype_data;
+	} else if (nfc_is_v3_2b()) {
+		host->devtype_data = &imx53_nand_devtype_data;
 	} else
 		BUG();
 
@@ -1344,8 +1380,8 @@
 	int err = 0;
 
 	/* Allocate memory for MTD device structure and private data */
-	host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
-			NAND_MAX_OOBSIZE, GFP_KERNEL);
+	host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) +
+			NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL);
 	if (!host)
 		return -ENOMEM;
 
@@ -1372,34 +1408,37 @@
 	this->read_buf = mxc_nand_read_buf;
 	this->verify_buf = mxc_nand_verify_buf;
 
-	host->clk = clk_get(&pdev->dev, "nfc");
-	if (IS_ERR(host->clk)) {
-		err = PTR_ERR(host->clk);
-		goto eclk;
-	}
-
-	clk_prepare_enable(host->clk);
-	host->clk_act = 1;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		err = -ENODEV;
-		goto eres;
-	}
-
-	host->base = ioremap(res->start, resource_size(res));
-	if (!host->base) {
-		err = -ENOMEM;
-		goto eres;
-	}
-
-	host->main_area0 = host->base;
+	host->clk = devm_clk_get(&pdev->dev, "nfc");
+	if (IS_ERR(host->clk))
+		return PTR_ERR(host->clk);
 
 	err = mxcnd_probe_dt(host);
 	if (err > 0)
 		err = mxcnd_probe_pdata(host);
 	if (err < 0)
-		goto eirq;
+		return err;
+
+	if (host->devtype_data->needs_ip) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res)
+			return -ENODEV;
+		host->regs_ip = devm_request_and_ioremap(&pdev->dev, res);
+		if (!host->regs_ip)
+			return -ENOMEM;
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	}
+
+	if (!res)
+		return -ENODEV;
+
+	host->base = devm_request_and_ioremap(&pdev->dev, res);
+	if (!host->base)
+		return -ENOMEM;
+
+	host->main_area0 = host->base;
 
 	if (host->devtype_data->regs_offset)
 		host->regs = host->base + host->devtype_data->regs_offset;
@@ -1414,19 +1453,6 @@
 	this->ecc.size = 512;
 	this->ecc.layout = host->devtype_data->ecclayout_512;
 
-	if (host->devtype_data->needs_ip) {
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-		if (!res) {
-			err = -ENODEV;
-			goto eirq;
-		}
-		host->regs_ip = ioremap(res->start, resource_size(res));
-		if (!host->regs_ip) {
-			err = -ENOMEM;
-			goto eirq;
-		}
-	}
-
 	if (host->pdata.hw_ecc) {
 		this->ecc.calculate = mxc_nand_calculate_ecc;
 		this->ecc.hwctl = mxc_nand_enable_hwecc;
@@ -1458,9 +1484,13 @@
 	 */
 	host->devtype_data->irq_control(host, 0);
 
-	err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+	err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+			IRQF_DISABLED, DRIVER_NAME, host);
 	if (err)
-		goto eirq;
+		return err;
+
+	clk_prepare_enable(host->clk);
+	host->clk_act = 1;
 
 	/*
 	 * Now that we "own" the interrupt make sure the interrupt mask bit is
@@ -1512,15 +1542,7 @@
 	return 0;
 
 escan:
-	free_irq(host->irq, host);
-eirq:
-	if (host->regs_ip)
-		iounmap(host->regs_ip);
-	iounmap(host->base);
-eres:
-	clk_put(host->clk);
-eclk:
-	kfree(host);
+	clk_disable_unprepare(host->clk);
 
 	return err;
 }
@@ -1529,16 +1551,9 @@
 {
 	struct mxc_nand_host *host = platform_get_drvdata(pdev);
 
-	clk_put(host->clk);
-
 	platform_set_drvdata(pdev, NULL);
 
 	nand_release(&host->mtd);
-	free_irq(host->irq, host);
-	if (host->regs_ip)
-		iounmap(host->regs_ip);
-	iounmap(host->base);
-	kfree(host);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a11253a0..ead301a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1565,14 +1565,6 @@
 					oobreadlen -= toread;
 				}
 			}
-
-			if (!(chip->options & NAND_NO_READRDY)) {
-				/* Apply delay or wait for ready/busy pin */
-				if (!chip->dev_ready)
-					udelay(chip->chip_delay);
-				else
-					nand_wait_ready(mtd);
-			}
 		} else {
 			memcpy(buf, chip->buffers->databuf + col, bytes);
 			buf += bytes;
@@ -1633,7 +1625,7 @@
 	ops.len = len;
 	ops.datbuf = buf;
 	ops.oobbuf = NULL;
-	ops.mode = 0;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ret = nand_do_read_ops(mtd, from, &ops);
 	*retlen = ops.retlen;
 	nand_release_device(mtd);
@@ -1837,14 +1829,6 @@
 		len = min(len, readlen);
 		buf = nand_transfer_oob(chip, buf, ops, len);
 
-		if (!(chip->options & NAND_NO_READRDY)) {
-			/* Apply delay or wait for ready/busy pin */
-			if (!chip->dev_ready)
-				udelay(chip->chip_delay);
-			else
-				nand_wait_ready(mtd);
-		}
-
 		readlen -= len;
 		if (!readlen)
 			break;
@@ -1927,12 +1911,14 @@
  *
  * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
-static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 				const uint8_t *buf, int oob_required)
 {
 	chip->write_buf(mtd, buf, mtd->writesize);
 	if (oob_required)
 		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
 }
 
 /**
@@ -1944,7 +1930,7 @@
  *
  * We need a special oob layout and handling even when ECC isn't checked.
  */
-static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
+static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
 					struct nand_chip *chip,
 					const uint8_t *buf, int oob_required)
 {
@@ -1974,6 +1960,8 @@
 	size = mtd->oobsize - (oob - chip->oob_poi);
 	if (size)
 		chip->write_buf(mtd, oob, size);
+
+	return 0;
 }
 /**
  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -1982,7 +1970,7 @@
  * @buf: data buffer
  * @oob_required: must write chip->oob_poi to OOB
  */
-static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
 				  const uint8_t *buf, int oob_required)
 {
 	int i, eccsize = chip->ecc.size;
@@ -1999,7 +1987,7 @@
 	for (i = 0; i < chip->ecc.total; i++)
 		chip->oob_poi[eccpos[i]] = ecc_calc[i];
 
-	chip->ecc.write_page_raw(mtd, chip, buf, 1);
+	return chip->ecc.write_page_raw(mtd, chip, buf, 1);
 }
 
 /**
@@ -2009,7 +1997,7 @@
  * @buf: data buffer
  * @oob_required: must write chip->oob_poi to OOB
  */
-static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
 				  const uint8_t *buf, int oob_required)
 {
 	int i, eccsize = chip->ecc.size;
@@ -2029,6 +2017,8 @@
 		chip->oob_poi[eccpos[i]] = ecc_calc[i];
 
 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
 }
 
 /**
@@ -2041,7 +2031,7 @@
  * The hw generator calculates the error syndrome automatically. Therefore we
  * need a special oob layout and handling.
  */
-static void nand_write_page_syndrome(struct mtd_info *mtd,
+static int nand_write_page_syndrome(struct mtd_info *mtd,
 				    struct nand_chip *chip,
 				    const uint8_t *buf, int oob_required)
 {
@@ -2075,6 +2065,8 @@
 	i = mtd->oobsize - (oob - chip->oob_poi);
 	if (i)
 		chip->write_buf(mtd, oob, i);
+
+	return 0;
 }
 
 /**
@@ -2096,9 +2088,12 @@
 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
 	if (unlikely(raw))
-		chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+		status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
 	else
-		chip->ecc.write_page(mtd, chip, buf, oob_required);
+		status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+	if (status < 0)
+		return status;
 
 	/*
 	 * Cached progamming disabled for now. Not sure if it's worth the
@@ -2336,7 +2331,7 @@
 	ops.len = len;
 	ops.datbuf = (uint8_t *)buf;
 	ops.oobbuf = NULL;
-	ops.mode = 0;
+	ops.mode = MTD_OPS_PLACE_OOB;
 
 	ret = nand_do_write_ops(mtd, to, &ops);
 
@@ -2365,7 +2360,7 @@
 	ops.len = len;
 	ops.datbuf = (uint8_t *)buf;
 	ops.oobbuf = NULL;
-	ops.mode = 0;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ret = nand_do_write_ops(mtd, to, &ops);
 	*retlen = ops.retlen;
 	nand_release_device(mtd);
@@ -2915,7 +2910,6 @@
 		*busw = NAND_BUSWIDTH_16;
 
 	chip->options &= ~NAND_CHIPOPTIONS_MSK;
-	chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
 
 	pr_info("ONFI flash detected\n");
 	return 1;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 30d1319..2d1d2fa 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -22,7 +22,7 @@
  * BBT on flash. If a BBT is found then the contents are read and the memory
  * based BBT is created. If a mirrored BBT is selected then the mirror is
  * searched too and the versions are compared. If the mirror has a greater
- * version number than the mirror BBT is used to build the memory based BBT.
+ * version number, then the mirror BBT is used to build the memory based BBT.
  * If the tables are not versioned, then we "or" the bad block information.
  * If one of the BBTs is out of date or does not exist it is (re)created.
  * If no BBT exists at all then the device is scanned for factory marked
@@ -71,12 +71,9 @@
 
 static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
 {
-	int ret;
-
-	ret = memcmp(buf, td->pattern, td->len);
-	if (!ret)
-		return ret;
-	return -1;
+	if (memcmp(buf, td->pattern, td->len))
+		return -1;
+	return 0;
 }
 
 /**
@@ -390,7 +387,7 @@
 	/* Read the mirror version, if available */
 	if (md && (md->options & NAND_BBT_VERSION)) {
 		scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
-			      mtd->writesize, td);
+			      mtd->writesize, md);
 		md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
 		pr_info("Bad block table at page %d, version 0x%02X\n",
 			 md->pages[0], md->version[0]);
@@ -1274,7 +1271,7 @@
 	.pattern = mirror_pattern
 };
 
-static struct nand_bbt_descr bbt_main_no_bbt_descr = {
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
 		| NAND_BBT_NO_OOB,
@@ -1284,7 +1281,7 @@
 	.pattern = bbt_pattern
 };
 
-static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
 		| NAND_BBT_NO_OOB,
@@ -1355,8 +1352,8 @@
 		/* Use the default pattern descriptors */
 		if (!this->bbt_td) {
 			if (this->bbt_options & NAND_BBT_NO_OOB) {
-				this->bbt_td = &bbt_main_no_bbt_descr;
-				this->bbt_md = &bbt_mirror_no_bbt_descr;
+				this->bbt_td = &bbt_main_no_oob_descr;
+				this->bbt_md = &bbt_mirror_no_oob_descr;
 			} else {
 				this->bbt_td = &bbt_main_descr;
 				this->bbt_md = &bbt_mirror_descr;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 621b70b..e3aa274 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@
 	 * These are the new chips with large page size. The pagesize and the
 	 * erasesize is determined from the extended id bytes
 	 */
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
 
 	/* 512 Megabit */
@@ -157,7 +157,7 @@
 	 * writes possible, but not implemented now
 	 */
 	{"AND 128MiB 3,3V 8-bit",	0x01, 2048, 128, 0x4000,
-	 NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
+	 NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
 
 	{NULL,}
 };
@@ -174,8 +174,9 @@
 	{NAND_MFR_STMICRO, "ST Micro"},
 	{NAND_MFR_HYNIX, "Hynix"},
 	{NAND_MFR_MICRON, "Micron"},
-	{NAND_MFR_AMD, "AMD"},
+	{NAND_MFR_AMD, "AMD/Spansion"},
 	{NAND_MFR_MACRONIX, "Macronix"},
+	{NAND_MFR_EON, "Eon"},
 	{0x0, "Unknown"}
 };
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 252aaef..e8a1ae9 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -681,11 +681,13 @@
 	info->state = STATE_IDLE;
 }
 
-static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
 		struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
 	chip->write_buf(mtd, buf, mtd->writesize);
 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
 }
 
 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -1005,7 +1007,6 @@
 	chip->ecc.size = host->page_size;
 	chip->ecc.strength = 1;
 
-	chip->options |= NAND_NO_READRDY;
 	if (host->reg_ndcr & NDCR_DWIDTH_M)
 		chip->options |= NAND_BUSWIDTH_16;
 
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index aa9b8a5..ed03ed2 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -43,11 +44,17 @@
 };
 
 static struct nand_ecclayout flctl_4secc_oob_64 = {
-	.eccbytes = 10,
-	.eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
+	.eccbytes = 4 * 10,
+	.eccpos = {
+		 6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+		22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+		38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+		54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
 	.oobfree = {
-		{.offset = 60,
-		. length = 4} },
+		{.offset =  2, .length = 4},
+		{.offset = 16, .length = 6},
+		{.offset = 32, .length = 6},
+		{.offset = 48, .length = 6} },
 };
 
 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -61,15 +68,15 @@
 
 static struct nand_bbt_descr flctl_4secc_largepage = {
 	.options = NAND_BBT_SCAN2NDPAGE,
-	.offs = 58,
+	.offs = 0,
 	.len = 2,
 	.pattern = scan_ff_pattern,
 };
 
 static void empty_fifo(struct sh_flctl *flctl)
 {
-	writel(0x000c0000, FLINTDMACR(flctl));	/* FIFO Clear */
-	writel(0x00000000, FLINTDMACR(flctl));	/* Clear Error flags */
+	writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
+	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
 }
 
 static void start_translation(struct sh_flctl *flctl)
@@ -158,27 +165,56 @@
 	timeout_error(flctl, __func__);
 }
 
-static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
+static enum flctl_ecc_res_t wait_recfifo_ready
+		(struct sh_flctl *flctl, int sector_number)
 {
 	uint32_t timeout = LOOP_TIMEOUT_MAX;
-	int checked[4];
 	void __iomem *ecc_reg[4];
 	int i;
+	int state = FL_SUCCESS;
 	uint32_t data, size;
 
-	memset(checked, 0, sizeof(checked));
-
+	/*
+	 * First this loops checks in FLDTCNTR if we are ready to read out the
+	 * oob data. This is the case if either all went fine without errors or
+	 * if the bottom part of the loop corrected the errors or marked them as
+	 * uncorrectable and the controller is given time to push the data into
+	 * the FIFO.
+	 */
 	while (timeout--) {
+		/* check if all is ok and we can read out the OOB */
 		size = readl(FLDTCNTR(flctl)) >> 24;
-		if (size & 0xFF)
-			return 0;	/* success */
+		if ((size & 0xFF) == 4)
+			return state;
 
-		if (readl(FL4ECCCR(flctl)) & _4ECCFA)
-			return 1;	/* can't correct */
-
-		udelay(1);
-		if (!(readl(FL4ECCCR(flctl)) & _4ECCEND))
+		/* check if a correction code has been calculated */
+		if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
+			/*
+			 * either we wait for the fifo to be filled or a
+			 * correction pattern is being generated
+			 */
+			udelay(1);
 			continue;
+		}
+
+		/* check for an uncorrectable error */
+		if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
+			/* check if we face a non-empty page */
+			for (i = 0; i < 512; i++) {
+				if (flctl->done_buff[i] != 0xff) {
+					state = FL_ERROR; /* can't correct */
+					break;
+				}
+			}
+
+			if (state == FL_SUCCESS)
+				dev_dbg(&flctl->pdev->dev,
+				"reading empty sector %d, ecc error ignored\n",
+				sector_number);
+
+			writel(0, FL4ECCCR(flctl));
+			continue;
+		}
 
 		/* start error correction */
 		ecc_reg[0] = FL4ECCRESULT0(flctl);
@@ -187,28 +223,26 @@
 		ecc_reg[3] = FL4ECCRESULT3(flctl);
 
 		for (i = 0; i < 3; i++) {
+			uint8_t org;
+			int index;
+
 			data = readl(ecc_reg[i]);
-			if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
-				uint8_t org;
-				int index;
 
-				if (flctl->page_size)
-					index = (512 * sector_number) +
-						(data >> 16);
-				else
-					index = data >> 16;
+			if (flctl->page_size)
+				index = (512 * sector_number) +
+					(data >> 16);
+			else
+				index = data >> 16;
 
-				org = flctl->done_buff[index];
-				flctl->done_buff[index] = org ^ (data & 0xFF);
-				checked[i] = 1;
-			}
+			org = flctl->done_buff[index];
+			flctl->done_buff[index] = org ^ (data & 0xFF);
 		}
-
+		state = FL_REPAIRABLE;
 		writel(0, FL4ECCCR(flctl));
 	}
 
 	timeout_error(flctl, __func__);
-	return 1;	/* timeout */
+	return FL_TIMEOUT;	/* timeout */
 }
 
 static void wait_wecfifo_ready(struct sh_flctl *flctl)
@@ -241,31 +275,33 @@
 {
 	int i, len_4align;
 	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
-	void *fifo_addr = (void *)FLDTFIFO(flctl);
 
 	len_4align = (rlen + 3) / 4;
 
 	for (i = 0; i < len_4align; i++) {
 		wait_rfifo_ready(flctl);
-		buf[i] = readl(fifo_addr);
+		buf[i] = readl(FLDTFIFO(flctl));
 		buf[i] = be32_to_cpu(buf[i]);
 	}
 }
 
-static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector)
+static enum flctl_ecc_res_t read_ecfiforeg
+		(struct sh_flctl *flctl, uint8_t *buff, int sector)
 {
 	int i;
+	enum flctl_ecc_res_t res;
 	unsigned long *ecc_buf = (unsigned long *)buff;
-	void *fifo_addr = (void *)FLECFIFO(flctl);
 
-	for (i = 0; i < 4; i++) {
-		if (wait_recfifo_ready(flctl , sector))
-			return 1;
-		ecc_buf[i] = readl(fifo_addr);
-		ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+	res = wait_recfifo_ready(flctl , sector);
+
+	if (res != FL_ERROR) {
+		for (i = 0; i < 4; i++) {
+			ecc_buf[i] = readl(FLECFIFO(flctl));
+			ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+		}
 	}
 
-	return 0;
+	return res;
 }
 
 static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
@@ -281,6 +317,18 @@
 	}
 }
 
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+	int i, len_4align;
+	unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
+
+	len_4align = (rlen + 3) / 4;
+	for (i = 0; i < len_4align; i++) {
+		wait_wecfifo_ready(flctl);
+		writel(cpu_to_be32(data[i]), FLECFIFO(flctl));
+	}
+}
+
 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -346,73 +394,64 @@
 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
 				uint8_t *buf, int oob_required, int page)
 {
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
-		chip->read_buf(mtd, p, eccsize);
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		if (flctl->hwecc_cant_correct[i])
-			mtd->ecc_stats.failed++;
-		else
-			mtd->ecc_stats.corrected += 0; /* FIXME */
-	}
-
+	chip->read_buf(mtd, buf, mtd->writesize);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 	return 0;
 }
 
-static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
 				   const uint8_t *buf, int oob_required)
 {
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	const uint8_t *p = buf;
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
-		chip->write_buf(mtd, p, eccsize);
+	chip->write_buf(mtd, buf, mtd->writesize);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
 }
 
 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
 	int sector, page_sectors;
+	enum flctl_ecc_res_t ecc_result;
 
-	if (flctl->page_size)
-		page_sectors = 4;
-	else
-		page_sectors = 1;
-
-	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
-		 FLCMNCR(flctl));
+	page_sectors = flctl->page_size ? 4 : 1;
 
 	set_cmd_regs(mtd, NAND_CMD_READ0,
 		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
 
+	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+		 FLCMNCR(flctl));
+	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+	writel(page_addr << 2, FLADR(flctl));
+
+	empty_fifo(flctl);
+	start_translation(flctl);
+
 	for (sector = 0; sector < page_sectors; sector++) {
-		int ret;
-
-		empty_fifo(flctl);
-		writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
-		writel(page_addr << 2 | sector, FLADR(flctl));
-
-		start_translation(flctl);
 		read_fiforeg(flctl, 512, 512 * sector);
 
-		ret = read_ecfiforeg(flctl,
+		ecc_result = read_ecfiforeg(flctl,
 			&flctl->done_buff[mtd->writesize + 16 * sector],
 			sector);
 
-		if (ret)
-			flctl->hwecc_cant_correct[sector] = 1;
-
-		writel(0x0, FL4ECCCR(flctl));
-		wait_completion(flctl);
+		switch (ecc_result) {
+		case FL_REPAIRABLE:
+			dev_info(&flctl->pdev->dev,
+				"applied ecc on page 0x%x", page_addr);
+			flctl->mtd.ecc_stats.corrected++;
+			break;
+		case FL_ERROR:
+			dev_warn(&flctl->pdev->dev,
+				"page 0x%x contains corrupted data\n",
+				page_addr);
+			flctl->mtd.ecc_stats.failed++;
+			break;
+		default:
+			;
+		}
 	}
+
+	wait_completion(flctl);
+
 	writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
 			FLCMNCR(flctl));
 }
@@ -420,30 +459,20 @@
 static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int page_sectors = flctl->page_size ? 4 : 1;
+	int i;
 
 	set_cmd_regs(mtd, NAND_CMD_READ0,
 		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
 
 	empty_fifo(flctl);
-	if (flctl->page_size) {
-		int i;
-		/* In case that the page size is 2k */
-		for (i = 0; i < 16 * 3; i++)
-			flctl->done_buff[i] = 0xFF;
 
-		set_addr(mtd, 3 * 528 + 512, page_addr);
+	for (i = 0; i < page_sectors; i++) {
+		set_addr(mtd, (512 + 16) * i + 512 , page_addr);
 		writel(16, FLDTCNTR(flctl));
 
 		start_translation(flctl);
-		read_fiforeg(flctl, 16, 16 * 3);
-		wait_completion(flctl);
-	} else {
-		/* In case that the page size is 512b */
-		set_addr(mtd, 512, page_addr);
-		writel(16, FLDTCNTR(flctl));
-
-		start_translation(flctl);
-		read_fiforeg(flctl, 16, 0);
+		read_fiforeg(flctl, 16, 16 * i);
 		wait_completion(flctl);
 	}
 }
@@ -451,34 +480,26 @@
 static void execmd_write_page_sector(struct mtd_info *mtd)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	int i, page_addr = flctl->seqin_page_addr;
+	int page_addr = flctl->seqin_page_addr;
 	int sector, page_sectors;
 
-	if (flctl->page_size)
-		page_sectors = 4;
-	else
-		page_sectors = 1;
-
-	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+	page_sectors = flctl->page_size ? 4 : 1;
 
 	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
 			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
 
+	empty_fifo(flctl);
+	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+	writel(page_addr << 2, FLADR(flctl));
+	start_translation(flctl);
+
 	for (sector = 0; sector < page_sectors; sector++) {
-		empty_fifo(flctl);
-		writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
-		writel(page_addr << 2 | sector, FLADR(flctl));
-
-		start_translation(flctl);
 		write_fiforeg(flctl, 512, 512 * sector);
-
-		for (i = 0; i < 4; i++) {
-			wait_wecfifo_ready(flctl); /* wait for write ready */
-			writel(0xFFFFFFFF, FLECFIFO(flctl));
-		}
-		wait_completion(flctl);
+		write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
 	}
 
+	wait_completion(flctl);
 	writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
 }
 
@@ -488,18 +509,12 @@
 	int page_addr = flctl->seqin_page_addr;
 	int sector, page_sectors;
 
-	if (flctl->page_size) {
-		sector = 3;
-		page_sectors = 4;
-	} else {
-		sector = 0;
-		page_sectors = 1;
-	}
+	page_sectors = flctl->page_size ? 4 : 1;
 
 	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
 			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
 
-	for (; sector < page_sectors; sector++) {
+	for (sector = 0; sector < page_sectors; sector++) {
 		empty_fifo(flctl);
 		set_addr(mtd, sector * 528 + 512, page_addr);
 		writel(16, FLDTCNTR(flctl));	/* set read size */
@@ -831,7 +846,7 @@
 		chip->ecc.mode = NAND_ECC_HW;
 
 		/* 4 symbols ECC enabled */
-		flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02;
+		flctl->flcmncr_base |= _4ECCEN;
 	} else {
 		chip->ecc.mode = NAND_ECC_SOFT;
 	}
@@ -839,6 +854,16 @@
 	return 0;
 }
 
+static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
+{
+	struct sh_flctl *flctl = dev_id;
+
+	dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
+	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+
+	return IRQ_HANDLED;
+}
+
 static int __devinit flctl_probe(struct platform_device *pdev)
 {
 	struct resource *res;
@@ -847,6 +872,7 @@
 	struct nand_chip *nand;
 	struct sh_flctl_platform_data *pdata;
 	int ret = -ENXIO;
+	int irq;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
@@ -872,14 +898,27 @@
 		goto err_iomap;
 	}
 
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get flste irq data\n");
+		goto err_flste;
+	}
+
+	ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
+	if (ret) {
+		dev_err(&pdev->dev, "request interrupt failed.\n");
+		goto err_flste;
+	}
+
 	platform_set_drvdata(pdev, flctl);
 	flctl_mtd = &flctl->mtd;
 	nand = &flctl->chip;
 	flctl_mtd->priv = nand;
 	flctl->pdev = pdev;
-	flctl->flcmncr_base = pdata->flcmncr_val;
 	flctl->hwecc = pdata->has_hwecc;
 	flctl->holden = pdata->use_holden;
+	flctl->flcmncr_base = pdata->flcmncr_val;
+	flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
 
 	/* Set address of hardware control function */
 	/* 20 us command delay time */
@@ -918,6 +957,9 @@
 
 err_chip:
 	pm_runtime_disable(&pdev->dev);
+	free_irq(irq, flctl);
+err_flste:
+	iounmap(flctl->reg);
 err_iomap:
 	kfree(flctl);
 	return ret;
@@ -929,6 +971,8 @@
 
 	nand_release(&flctl->mtd);
 	pm_runtime_disable(&pdev->dev);
+	free_irq(platform_get_irq(pdev, 0), flctl);
+	iounmap(flctl->reg);
 	kfree(flctl);
 
 	return 0;
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 2aec4f3..42b0f74 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -26,6 +26,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 
 #define PRINT_PREF KERN_INFO "mtd_speedtest: "
 
@@ -47,25 +48,13 @@
 static int pgcnt;
 static int goodebcnt;
 static struct timeval start, finish;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
-	next = next * 1103515245 + 12345;
-	return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
-	next = seed;
-}
 
 static void set_random_data(unsigned char *buf, size_t len)
 {
 	size_t i;
 
 	for (i = 0; i < len; ++i)
-		buf[i] = simple_rand();
+		buf[i] = random32();
 }
 
 static int erase_eraseblock(int ebnum)
@@ -407,7 +396,6 @@
 		goto out;
 	}
 
-	simple_srand(1);
 	set_random_data(iobuf, mtd->erasesize);
 
 	err = scan_for_bad_eraseblocks();
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 7b33f22..cb268ce 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
+#include <linux/random.h>
 
 #define PRINT_PREF KERN_INFO "mtd_stresstest: "
 
@@ -48,28 +49,13 @@
 static int bufsize;
 static int ebcnt;
 static int pgcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
-	next = next * 1103515245 + 12345;
-	return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
-	next = seed;
-}
 
 static int rand_eb(void)
 {
-	int eb;
+	unsigned int eb;
 
 again:
-	if (ebcnt < 32768)
-		eb = simple_rand();
-	else
-		eb = (simple_rand() << 15) | simple_rand();
+	eb = random32();
 	/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
 	eb %= (ebcnt - 1);
 	if (bbt[eb])
@@ -79,24 +65,18 @@
 
 static int rand_offs(void)
 {
-	int offs;
+	unsigned int offs;
 
-	if (bufsize < 32768)
-		offs = simple_rand();
-	else
-		offs = (simple_rand() << 15) | simple_rand();
+	offs = random32();
 	offs %= bufsize;
 	return offs;
 }
 
 static int rand_len(int offs)
 {
-	int len;
+	unsigned int len;
 
-	if (bufsize < 32768)
-		len = simple_rand();
-	else
-		len = (simple_rand() << 15) | simple_rand();
+	len = random32();
 	len %= (bufsize - offs);
 	return len;
 }
@@ -211,7 +191,7 @@
 
 static int do_operation(void)
 {
-	if (simple_rand() & 1)
+	if (random32() & 1)
 		return do_read();
 	else
 		return do_write();
@@ -302,9 +282,8 @@
 	}
 	for (i = 0; i < ebcnt; i++)
 		offsets[i] = mtd->erasesize;
-	simple_srand(current->pid);
 	for (i = 0; i < bufsize; i++)
-		writebuf[i] = simple_rand();
+		writebuf[i] = random32();
 
 	err = scan_for_bad_eraseblocks();
 	if (err)
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 650ef35..211ff67 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -78,8 +78,6 @@
 #define NAND_BBT_LASTBLOCK	0x00000010
 /* The bbt is at the given page, else we must scan for the bbt */
 #define NAND_BBT_ABSPAGE	0x00000020
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_SEARCH		0x00000040
 /* bbt is stored per chip on multichip devices */
 #define NAND_BBT_PERCHIP	0x00000080
 /* bbt has a version counter at offset veroffs */
@@ -110,7 +108,10 @@
  * OOB area. This option is passed to the default bad block table function.
  */
 #define NAND_BBT_USE_FLASH	0x00020000
-/* Do not store flash based bad block table in OOB area; store it in-band */
+/*
+ * Do not store flash based bad block table marker in the OOB area; store it
+ * in-band.
+ */
 #define NAND_BBT_NO_OOB		0x00040000
 /*
  * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 63dadc0..81d61e7 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -265,14 +265,7 @@
 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
 		    const u_char *buf);
 
-static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
-			       struct mtd_oob_ops *ops)
-{
-	ops->retlen = ops->oobretlen = 0;
-	if (!mtd->_read_oob)
-		return -EOPNOTSUPP;
-	return mtd->_read_oob(mtd, from, ops);
-}
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
 
 static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
 				struct mtd_oob_ops *ops)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 57977c6..6dce5a7 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -185,12 +185,6 @@
  * This happens with the Renesas AG-AND chips, possibly others.
  */
 #define BBT_AUTO_REFRESH	0x00000080
-/*
- * Chip does not require ready check on read. True
- * for all large page devices, as they do not support
- * autoincrement.
- */
-#define NAND_NO_READRDY		0x00000100
 /* Chip does not allow subpage writes */
 #define NAND_NO_SUBPAGE_WRITE	0x00000200
 
@@ -361,13 +355,13 @@
 			uint8_t *calc_ecc);
 	int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
 			uint8_t *buf, int oob_required, int page);
-	void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+	int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
 			const uint8_t *buf, int oob_required);
 	int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
 			uint8_t *buf, int oob_required, int page);
 	int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
 			uint32_t offs, uint32_t len, uint8_t *buf);
-	void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
+	int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
 			const uint8_t *buf, int oob_required);
 	int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
 			int page);
@@ -559,6 +553,7 @@
 #define NAND_MFR_MICRON		0x2c
 #define NAND_MFR_AMD		0x01
 #define NAND_MFR_MACRONIX	0xc2
+#define NAND_MFR_EON		0x92
 
 /**
  * struct nand_flash_dev - NAND Flash Device ID Structure
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index a38e1fa..01e4b15 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -49,7 +49,6 @@
 #define	FLERRADR(f)		(f->reg + 0x98)
 
 /* FLCMNCR control bits */
-#define ECCPOS2		(0x1 << 25)
 #define _4ECCCNTEN	(0x1 << 24)
 #define _4ECCEN		(0x1 << 23)
 #define _4ECCCORRECT	(0x1 << 22)
@@ -59,9 +58,6 @@
 #define QTSEL_E		(0x1 << 17)
 #define ENDIAN		(0x1 << 16)	/* 1 = little endian */
 #define FCKSEL_E	(0x1 << 15)
-#define ECCPOS_00	(0x00 << 12)
-#define ECCPOS_01	(0x01 << 12)
-#define ECCPOS_02	(0x02 << 12)
 #define ACM_SACCES_MODE	(0x01 << 10)
 #define NANWF_E		(0x1 << 9)
 #define SE_D		(0x1 << 8)	/* Spare area disable */
@@ -107,6 +103,14 @@
 #define DOCMD2_E	(0x1 << 17)	/* 2nd cmd stage execute */
 #define DOCMD1_E	(0x1 << 16)	/* 1st cmd stage execute */
 
+/* FLINTDMACR control bits */
+#define ESTERINTE	(0x1 << 24)	/* ECC error interrupt enable */
+#define AC1CLR		(0x1 << 19)	/* ECC FIFO clear */
+#define AC0CLR		(0x1 << 18)	/* Data FIFO clear */
+#define ECERB		(0x1 << 9)	/* ECC error */
+#define STERB		(0x1 << 8)	/* Status error */
+#define STERINTE	(0x1 << 4)	/* Status error enable */
+
 /* FLTRCR control bits */
 #define TRSTRT		(0x1 << 0)	/* translation start */
 #define TREND		(0x1 << 1)	/* translation end */
@@ -125,9 +129,15 @@
 #define	_4ECCEND	(0x1 << 1)	/* 4 symbols end */
 #define	_4ECCEXST	(0x1 << 0)	/* 4 symbols exist */
 
-#define INIT_FL4ECCRESULT_VAL	0x03FF03FF
 #define LOOP_TIMEOUT_MAX	0x00010000
 
+enum flctl_ecc_res_t {
+	FL_SUCCESS,
+	FL_REPAIRABLE,
+	FL_ERROR,
+	FL_TIMEOUT
+};
+
 struct sh_flctl {
 	struct mtd_info		mtd;
 	struct nand_chip	chip;
@@ -145,8 +155,7 @@
 	uint32_t erase_ADRCNT;		/* bits of FLCMDCR in ERASE1 cmd */
 	uint32_t rw_ADRCNT;	/* bits of FLCMDCR in READ WRITE cmd */
 	uint32_t flcmncr_base;	/* base value of FLCMNCR */
-
-	int	hwecc_cant_correct[4];
+	uint32_t flintdmacr_base;	/* irq enable bits */
 
 	unsigned page_size:1;	/* NAND page size (0 = 512, 1 = 2048) */
 	unsigned hwecc:1;	/* Hardware ECC (0 = disabled, 1 = enabled) */