Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / Freezer: Revert 27920651fe "PM / Freezer: Make fake_signal_wake_up() wake TASK_KILLABLE tasks too"
  PM / Freezer: Reimplement wait_event_freezekillable using freezer_do_not_count/freezer_count
  USB: Update last_busy time after autosuspend fails
  PM / Runtime: Automatically retry failed autosuspends
  PM / QoS: Remove redundant check
  PM / OPP: Fix build when CONFIG_PM_OPP is not set
  PM / Runtime: Fix runtime accounting calculation error
  PM / Sleep: Update freezer documentation
  PM / Sleep: Remove unused symbol 'suspend_cpu_hotplug'
  PM / Sleep: Fix race between CPU hotplug and freezer
  ACPI / PM: Add Sony VPCEB17FX to nonvs blacklist
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
index f5bb0a3..53d99ed 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
@@ -71,3 +71,10 @@
 		a dump device, as kdump requires resetting the device in order
 		to work reliably.
 
+Where:		/sys/bus/pci/devices/<dev>/ccissX/transport_mode
+Date:		July 2011
+Kernel Version:	3.0
+Contact:	iss_storagedev@hp.com
+Description:	Value of "simple" indicates that the controller has been placed
+		in "simple mode". Value of "performant" indicates that the
+		controller has been placed in "performant mode".
diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt
index c00c6a5..71464e0 100644
--- a/Documentation/blockdev/cciss.txt
+++ b/Documentation/blockdev/cciss.txt
@@ -78,6 +78,16 @@
 /dev/cciss/c1d1p2		Controller 1, disk 1, partition 2
 /dev/cciss/c1d1p3		Controller 1, disk 1, partition 3
 
+CCISS simple mode support
+-------------------------
+
+The "cciss_simple_mode=1" boot parameter may be used to prevent the driver
+from putting the controller into "performant" mode. The difference is that
+with simple mode, each command completion requires an interrupt, while with
+"performant mode" (the default, and ordinarily better performing) it is
+possible to have multiple command completions indicated by a single
+interrupt.
+
 SCSI tape drive and medium changer support
 ------------------------------------------
 
diff --git a/Documentation/watchdog/convert_drivers_to_kernel_api.txt b/Documentation/watchdog/convert_drivers_to_kernel_api.txt
new file mode 100644
index 0000000..ae1e900
--- /dev/null
+++ b/Documentation/watchdog/convert_drivers_to_kernel_api.txt
@@ -0,0 +1,195 @@
+Converting old watchdog drivers to the watchdog framework
+by Wolfram Sang <w.sang@pengutronix.de>
+=========================================================
+
+Before the watchdog framework came into the kernel, every driver had to
+implement the API on its own. Now, as the framework factored out the common
+components, those drivers can be lightened making it a user of the framework.
+This document shall guide you for this task. The necessary steps are described
+as well as things to look out for.
+
+
+Remove the file_operations struct
+---------------------------------
+
+Old drivers define their own file_operations for actions like open(), write(),
+etc... These are now handled by the framework and just call the driver when
+needed. So, in general, the 'file_operations' struct and assorted functions can
+go. Only very few driver-specific details have to be moved to other functions.
+Here is a overview of the functions and probably needed actions:
+
+- open: Everything dealing with resource management (file-open checks, magic
+  close preparations) can simply go. Device specific stuff needs to go to the
+  driver specific start-function. Note that for some drivers, the start-function
+  also serves as the ping-function. If that is the case and you need start/stop
+  to be balanced (clocks!), you are better off refactoring a separate start-function.
+
+- close: Same hints as for open apply.
+
+- write: Can simply go, all defined behaviour is taken care of by the framework,
+  i.e. ping on write and magic char ('V') handling.
+
+- ioctl: While the driver is allowed to have extensions to the IOCTL interface,
+  the most common ones are handled by the framework, supported by some assistance
+  from the driver:
+
+	WDIOC_GETSUPPORT:
+		Returns the mandatory watchdog_info struct from the driver
+
+	WDIOC_GETSTATUS:
+		Needs the status-callback defined, otherwise returns 0
+
+	WDIOC_GETBOOTSTATUS:
+		Needs the bootstatus member properly set. Make sure it is 0 if you
+		don't have further support!
+
+	WDIOC_SETOPTIONS:
+		No preparations needed
+
+	WDIOC_KEEPALIVE:
+		If wanted, options in watchdog_info need to have WDIOF_KEEPALIVEPING
+		set
+
+	WDIOC_SETTIMEOUT:
+		Options in watchdog_info need to have WDIOF_SETTIMEOUT set
+		and a set_timeout-callback has to be defined. The core will also
+		do limit-checking, if min_timeout and max_timeout in the watchdog
+		device are set. All is optional.
+
+	WDIOC_GETTIMEOUT:
+		No preparations needed
+
+  Other IOCTLs can be served using the ioctl-callback. Note that this is mainly
+  intended for porting old drivers; new drivers should not invent private IOCTLs.
+  Private IOCTLs are processed first. When the callback returns with
+  -ENOIOCTLCMD, the IOCTLs of the framework will be tried, too. Any other error
+  is directly given to the user.
+
+Example conversion:
+
+-static const struct file_operations s3c2410wdt_fops = {
+-       .owner          = THIS_MODULE,
+-       .llseek         = no_llseek,
+-       .write          = s3c2410wdt_write,
+-       .unlocked_ioctl = s3c2410wdt_ioctl,
+-       .open           = s3c2410wdt_open,
+-       .release        = s3c2410wdt_release,
+-};
+
+Check the functions for device-specific stuff and keep it for later
+refactoring. The rest can go.
+
+
+Remove the miscdevice
+---------------------
+
+Since the file_operations are gone now, you can also remove the 'struct
+miscdevice'. The framework will create it on watchdog_dev_register() called by
+watchdog_register_device().
+
+-static struct miscdevice s3c2410wdt_miscdev = {
+-       .minor          = WATCHDOG_MINOR,
+-       .name           = "watchdog",
+-       .fops           = &s3c2410wdt_fops,
+-};
+
+
+Remove obsolete includes and defines
+------------------------------------
+
+Because of the simplifications, a few defines are probably unused now. Remove
+them. Includes can be removed, too. For example:
+
+- #include <linux/fs.h>
+- #include <linux/miscdevice.h> (if MODULE_ALIAS_MISCDEV is not used)
+- #include <linux/uaccess.h> (if no custom IOCTLs are used)
+
+
+Add the watchdog operations
+---------------------------
+
+All possible callbacks are defined in 'struct watchdog_ops'. You can find it
+explained in 'watchdog-kernel-api.txt' in this directory. start(), stop() and
+owner must be set, the rest are optional. You will easily find corresponding
+functions in the old driver. Note that you will now get a pointer to the
+watchdog_device as a parameter to these functions, so you probably have to
+change the function header. Other changes are most likely not needed, because
+here simply happens the direct hardware access. If you have device-specific
+code left from the above steps, it should be refactored into these callbacks.
+
+Here is a simple example:
+
++static struct watchdog_ops s3c2410wdt_ops = {
++       .owner = THIS_MODULE,
++       .start = s3c2410wdt_start,
++       .stop = s3c2410wdt_stop,
++       .ping = s3c2410wdt_keepalive,
++       .set_timeout = s3c2410wdt_set_heartbeat,
++};
+
+A typical function-header change looks like:
+
+-static void s3c2410wdt_keepalive(void)
++static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
+ {
+...
++
++       return 0;
+ }
+
+...
+
+-       s3c2410wdt_keepalive();
++       s3c2410wdt_keepalive(&s3c2410_wdd);
+
+
+Add the watchdog device
+-----------------------
+
+Now we need to create a 'struct watchdog_device' and populate it with the
+necessary information for the framework. The struct is also explained in detail
+in 'watchdog-kernel-api.txt' in this directory. We pass it the mandatory
+watchdog_info struct and the newly created watchdog_ops. Often, old drivers
+have their own record-keeping for things like bootstatus and timeout using
+static variables. Those have to be converted to use the members in
+watchdog_device. Note that the timeout values are unsigned int. Some drivers
+use signed int, so this has to be converted, too.
+
+Here is a simple example for a watchdog device:
+
++static struct watchdog_device s3c2410_wdd = {
++       .info = &s3c2410_wdt_ident,
++       .ops = &s3c2410wdt_ops,
++};
+
+
+Register the watchdog device
+----------------------------
+
+Replace misc_register(&miscdev) with watchdog_register_device(&watchdog_dev).
+Make sure the return value gets checked and the error message, if present,
+still fits. Also convert the unregister case.
+
+-       ret = misc_register(&s3c2410wdt_miscdev);
++       ret = watchdog_register_device(&s3c2410_wdd);
+
+...
+
+-       misc_deregister(&s3c2410wdt_miscdev);
++       watchdog_unregister_device(&s3c2410_wdd);
+
+
+Update the Kconfig-entry
+------------------------
+
+The entry for the driver now needs to select WATCHDOG_CORE:
+
++       select WATCHDOG_CORE
+
+
+Create a patch and send it to upstream
+--------------------------------------
+
+Make sure you understood Documentation/SubmittingPatches and send your patch to
+linux-watchdog@vger.kernel.org. We are looking forward to it :)
+
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index e4a04e4..33c78d7 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -21,6 +21,9 @@
  * OneNAND features.
 */
 
+#ifndef ASM_PL080_H
+#define ASM_PL080_H
+
 #define PL080_INT_STATUS			(0x00)
 #define PL080_TC_STATUS				(0x04)
 #define PL080_TC_CLEAR				(0x08)
@@ -138,3 +141,4 @@
 	u32	control1;
 };
 
+#endif /* ASM_PL080_H */
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index a652735..44013e0 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -11,7 +11,7 @@
 
 config CPU_EXYNOS4210
 	bool
-	select S3C_PL330_DMA
+	select SAMSUNG_DMADEV
 	select ARM_CPU_SUSPEND if PM
 	help
 	  Enable EXYNOS4210 CPU support
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 0d59be3..a52024e 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -111,6 +111,11 @@
 	.name		= "sclk_usbphy1",
 };
 
+static struct clk dummy_apb_pclk = {
+	.name		= "apb_pclk",
+	.id		= -1,
+};
+
 static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable)
 {
 	return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable);
@@ -503,12 +508,12 @@
 		.enable		= exynos4_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 9),
 	}, {
-		.name		= "pdma",
+		.name		= "dma",
 		.devname	= "s3c-pl330.0",
 		.enable		= exynos4_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 0),
 	}, {
-		.name		= "pdma",
+		.name		= "dma",
 		.devname	= "s3c-pl330.1",
 		.enable		= exynos4_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 1),
@@ -1282,5 +1287,7 @@
 	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	register_syscore_ops(&exynos4_clock_syscore_ops);
+	s3c24xx_register_clock(&dummy_apb_pclk);
+
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-exynos4/dma.c b/arch/arm/mach-exynos4/dma.c
index 564bb53..d57d662 100644
--- a/arch/arm/mach-exynos4/dma.c
+++ b/arch/arm/mach-exynos4/dma.c
@@ -21,151 +21,228 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl330.h>
 
+#include <asm/irq.h>
 #include <plat/devs.h>
 #include <plat/irqs.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
-
-#include <plat/s3c-pl330-pdata.h>
+#include <mach/dma.h>
 
 static u64 dma_dmamask = DMA_BIT_MASK(32);
 
-static struct resource exynos4_pdma0_resource[] = {
-	[0] = {
-		.start	= EXYNOS4_PA_PDMA0,
-		.end	= EXYNOS4_PA_PDMA0 + SZ_4K,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_PDMA0,
-		.end	= IRQ_PDMA0,
-		.flags	= IORESOURCE_IRQ,
+struct dma_pl330_peri pdma0_peri[28] = {
+	{
+		.peri_id = (u8)DMACH_PCM0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ0,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ2,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0S_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART4_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART4_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS4_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS4_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_AC97_MICIN,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_AC97_PCMIN,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_AC97_PCMOUT,
+		.rqtype = MEMTODEV,
 	},
 };
 
-static struct s3c_pl330_platdata exynos4_pdma0_pdata = {
-	.peri = {
-		[0] = DMACH_PCM0_RX,
-		[1] = DMACH_PCM0_TX,
-		[2] = DMACH_PCM2_RX,
-		[3] = DMACH_PCM2_TX,
-		[4] = DMACH_MSM_REQ0,
-		[5] = DMACH_MSM_REQ2,
-		[6] = DMACH_SPI0_RX,
-		[7] = DMACH_SPI0_TX,
-		[8] = DMACH_SPI2_RX,
-		[9] = DMACH_SPI2_TX,
-		[10] = DMACH_I2S0S_TX,
-		[11] = DMACH_I2S0_RX,
-		[12] = DMACH_I2S0_TX,
-		[13] = DMACH_I2S2_RX,
-		[14] = DMACH_I2S2_TX,
-		[15] = DMACH_UART0_RX,
-		[16] = DMACH_UART0_TX,
-		[17] = DMACH_UART2_RX,
-		[18] = DMACH_UART2_TX,
-		[19] = DMACH_UART4_RX,
-		[20] = DMACH_UART4_TX,
-		[21] = DMACH_SLIMBUS0_RX,
-		[22] = DMACH_SLIMBUS0_TX,
-		[23] = DMACH_SLIMBUS2_RX,
-		[24] = DMACH_SLIMBUS2_TX,
-		[25] = DMACH_SLIMBUS4_RX,
-		[26] = DMACH_SLIMBUS4_TX,
-		[27] = DMACH_AC97_MICIN,
-		[28] = DMACH_AC97_PCMIN,
-		[29] = DMACH_AC97_PCMOUT,
-		[30] = DMACH_MAX,
-		[31] = DMACH_MAX,
-	},
+struct dma_pl330_platdata exynos4_pdma0_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(pdma0_peri),
+	.peri = pdma0_peri,
 };
 
-static struct platform_device exynos4_device_pdma0 = {
-	.name		= "s3c-pl330",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(exynos4_pdma0_resource),
-	.resource	= exynos4_pdma0_resource,
-	.dev		= {
+struct amba_device exynos4_device_pdma0 = {
+	.dev = {
+		.init_name = "dma-pl330.0",
 		.dma_mask = &dma_dmamask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = &exynos4_pdma0_pdata,
 	},
+	.res = {
+		.start = EXYNOS4_PA_PDMA0,
+		.end = EXYNOS4_PA_PDMA0 + SZ_4K,
+		.flags = IORESOURCE_MEM,
+	},
+	.irq = {IRQ_PDMA0, NO_IRQ},
+	.periphid = 0x00041330,
 };
 
-static struct resource exynos4_pdma1_resource[] = {
-	[0] = {
-		.start	= EXYNOS4_PA_PDMA1,
-		.end	= EXYNOS4_PA_PDMA1 + SZ_4K,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_PDMA1,
-		.end	= IRQ_PDMA1,
-		.flags	= IORESOURCE_IRQ,
+struct dma_pl330_peri pdma1_peri[25] = {
+	{
+		.peri_id = (u8)DMACH_PCM0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ1,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ3,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0S_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS5_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SLIMBUS5_TX,
+		.rqtype = MEMTODEV,
 	},
 };
 
-static struct s3c_pl330_platdata exynos4_pdma1_pdata = {
-	.peri = {
-		[0] = DMACH_PCM0_RX,
-		[1] = DMACH_PCM0_TX,
-		[2] = DMACH_PCM1_RX,
-		[3] = DMACH_PCM1_TX,
-		[4] = DMACH_MSM_REQ1,
-		[5] = DMACH_MSM_REQ3,
-		[6] = DMACH_SPI1_RX,
-		[7] = DMACH_SPI1_TX,
-		[8] = DMACH_I2S0S_TX,
-		[9] = DMACH_I2S0_RX,
-		[10] = DMACH_I2S0_TX,
-		[11] = DMACH_I2S1_RX,
-		[12] = DMACH_I2S1_TX,
-		[13] = DMACH_UART0_RX,
-		[14] = DMACH_UART0_TX,
-		[15] = DMACH_UART1_RX,
-		[16] = DMACH_UART1_TX,
-		[17] = DMACH_UART3_RX,
-		[18] = DMACH_UART3_TX,
-		[19] = DMACH_SLIMBUS1_RX,
-		[20] = DMACH_SLIMBUS1_TX,
-		[21] = DMACH_SLIMBUS3_RX,
-		[22] = DMACH_SLIMBUS3_TX,
-		[23] = DMACH_SLIMBUS5_RX,
-		[24] = DMACH_SLIMBUS5_TX,
-		[25] = DMACH_SLIMBUS0AUX_RX,
-		[26] = DMACH_SLIMBUS0AUX_TX,
-		[27] = DMACH_SPDIF,
-		[28] = DMACH_MAX,
-		[29] = DMACH_MAX,
-		[30] = DMACH_MAX,
-		[31] = DMACH_MAX,
-	},
+struct dma_pl330_platdata exynos4_pdma1_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(pdma1_peri),
+	.peri = pdma1_peri,
 };
 
-static struct platform_device exynos4_device_pdma1 = {
-	.name		= "s3c-pl330",
-	.id		= 1,
-	.num_resources	= ARRAY_SIZE(exynos4_pdma1_resource),
-	.resource	= exynos4_pdma1_resource,
-	.dev		= {
+struct amba_device exynos4_device_pdma1 = {
+	.dev = {
+		.init_name = "dma-pl330.1",
 		.dma_mask = &dma_dmamask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = &exynos4_pdma1_pdata,
 	},
-};
-
-static struct platform_device *exynos4_dmacs[] __initdata = {
-	&exynos4_device_pdma0,
-	&exynos4_device_pdma1,
+	.res = {
+		.start = EXYNOS4_PA_PDMA1,
+		.end = EXYNOS4_PA_PDMA1 + SZ_4K,
+		.flags = IORESOURCE_MEM,
+	},
+	.irq = {IRQ_PDMA1, NO_IRQ},
+	.periphid = 0x00041330,
 };
 
 static int __init exynos4_dma_init(void)
 {
-	platform_add_devices(exynos4_dmacs, ARRAY_SIZE(exynos4_dmacs));
+	amba_device_register(&exynos4_device_pdma0, &iomem_resource);
 
 	return 0;
 }
diff --git a/arch/arm/mach-exynos4/include/mach/dma.h b/arch/arm/mach-exynos4/include/mach/dma.h
index 81209eb..201842a 100644
--- a/arch/arm/mach-exynos4/include/mach/dma.h
+++ b/arch/arm/mach-exynos4/include/mach/dma.h
@@ -20,7 +20,7 @@
 #ifndef __MACH_DMA_H
 #define __MACH_DMA_H
 
-/* This platform uses the common S3C DMA API driver for PL330 */
-#include <plat/s3c-dma-pl330.h>
+/* This platform uses the common DMA API driver for PL330 */
+#include <plat/dma-pl330.h>
 
 #endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h
index b2b2a5b..ae8e482 100644
--- a/arch/arm/mach-s3c2410/include/mach/dma.h
+++ b/arch/arm/mach-s3c2410/include/mach/dma.h
@@ -13,7 +13,6 @@
 #ifndef __ASM_ARCH_DMA_H
 #define __ASM_ARCH_DMA_H __FILE__
 
-#include <plat/dma.h>
 #include <linux/sysdev.h>
 
 #define MAX_DMA_TRANSFER_SIZE   0x100000 /* Data Unit is half word  */
@@ -51,6 +50,18 @@
 	DMACH_MAX,		/* the end entry */
 };
 
+static inline bool samsung_dma_has_circular(void)
+{
+	return false;
+}
+
+static inline bool samsung_dma_is_dmadev(void)
+{
+	return false;
+}
+
+#include <plat/dma.h>
+
 #define DMACH_LOW_LEVEL	(1<<28)	/* use this to specifiy hardware ch no */
 
 /* we have 4 dma channels */
@@ -163,7 +174,7 @@
 	struct s3c2410_dma_client *client;
 
 	/* channel configuration */
-	enum s3c2410_dmasrc	 source;
+	enum dma_data_direction	 source;
 	enum dma_ch		 req_ch;
 	unsigned long		 dev_addr;
 	unsigned long		 load_timeout;
@@ -196,9 +207,4 @@
 
 typedef unsigned long dma_device_t;
 
-static inline bool s3c_dma_has_circular(void)
-{
-	return false;
-}
-
 #endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c
index c61e326..d2a7d5ef 100644
--- a/arch/arm/mach-s3c2412/dma.c
+++ b/arch/arm/mach-s3c2412/dma.c
@@ -130,11 +130,11 @@
 
 static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan,
 				  struct s3c24xx_dma_map *map,
-				  enum s3c2410_dmasrc dir)
+				  enum dma_data_direction dir)
 {
 	unsigned long chsel;
 
-	if (dir == S3C2410_DMASRC_HW)
+	if (dir == DMA_FROM_DEVICE)
 		chsel = map->channels_rx[0];
 	else
 		chsel = map->channels[0];
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 204bfaf..67c97fa 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -147,14 +147,14 @@
 	u32 control0, control1;
 
 	switch (chan->source) {
-	case S3C2410_DMASRC_HW:
+	case DMA_FROM_DEVICE:
 		src = chan->dev_addr;
 		dst = data;
 		control0 = PL080_CONTROL_SRC_AHB2;
 		control0 |= PL080_CONTROL_DST_INCR;
 		break;
 
-	case S3C2410_DMASRC_MEM:
+	case DMA_TO_DEVICE:
 		src = data;
 		dst = chan->dev_addr;
 		control0 = PL080_CONTROL_DST_AHB2;
@@ -416,7 +416,7 @@
 
 
 int s3c2410_dma_devconfig(enum dma_ch channel,
-			  enum s3c2410_dmasrc source,
+			  enum dma_data_direction source,
 			  unsigned long devaddr)
 {
 	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -437,11 +437,11 @@
 	pr_debug("%s: peripheral %d\n", __func__, peripheral);
 
 	switch (source) {
-	case S3C2410_DMASRC_HW:
+	case DMA_FROM_DEVICE:
 		config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
 		config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
 		break;
-	case S3C2410_DMASRC_MEM:
+	case DMA_TO_DEVICE:
 		config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
 		config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
 		break;
diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
index 0a5d926..fe1a98c 100644
--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
@@ -58,11 +58,15 @@
 	DMACH_MAX		/* the end */
 };
 
-static __inline__ bool s3c_dma_has_circular(void)
+static inline bool samsung_dma_has_circular(void)
 {
 	return true;
 }
 
+static inline bool samsung_dma_is_dmadev(void)
+{
+	return false;
+}
 #define S3C2410_DMAF_CIRCULAR		(1 << 0)
 
 #include <plat/dma.h>
@@ -95,7 +99,7 @@
 	unsigned char		 peripheral;
 
 	unsigned int		 flags;
-	enum s3c2410_dmasrc	 source;
+	enum dma_data_direction	 source;
 
 
 	dma_addr_t		dev_addr;
diff --git a/arch/arm/mach-s5p64x0/Kconfig b/arch/arm/mach-s5p64x0/Kconfig
index 65c7518..9527ed2 100644
--- a/arch/arm/mach-s5p64x0/Kconfig
+++ b/arch/arm/mach-s5p64x0/Kconfig
@@ -9,14 +9,14 @@
 
 config CPU_S5P6440
 	bool
-	select S3C_PL330_DMA
+	select SAMSUNG_DMADEV
 	select S5P_HRT
 	help
 	  Enable S5P6440 CPU support
 
 config CPU_S5P6450
 	bool
-	select S3C_PL330_DMA
+	select SAMSUNG_DMADEV
 	select S5P_HRT
 	help
 	  Enable S5P6450 CPU support
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index 0e9cd30..c1f548f 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -146,7 +146,7 @@
 		.enable		= s5p64x0_hclk0_ctrl,
 		.ctrlbit	= (1 << 8),
 	}, {
-		.name		= "pdma",
+		.name		= "dma",
 		.parent		= &clk_hclk_low.clk,
 		.enable		= s5p64x0_hclk0_ctrl,
 		.ctrlbit	= (1 << 12),
@@ -499,6 +499,11 @@
 	&clk_pclk_low,
 };
 
+static struct clk dummy_apb_pclk = {
+	.name		= "apb_pclk",
+	.id		= -1,
+};
+
 void __init_or_cpufreq s5p6440_setup_clocks(void)
 {
 	struct clk *xtal_clk;
@@ -581,5 +586,7 @@
 	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
+	s3c24xx_register_clock(&dummy_apb_pclk);
+
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index d9dc16c..3d9b609 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -179,7 +179,7 @@
 		.enable		= s5p64x0_hclk0_ctrl,
 		.ctrlbit	= (1 << 3),
 	}, {
-		.name		= "pdma",
+		.name		= "dma",
 		.parent		= &clk_hclk_low.clk,
 		.enable		= s5p64x0_hclk0_ctrl,
 		.ctrlbit	= (1 << 12),
@@ -553,6 +553,11 @@
 	&clk_sclk_audio0,
 };
 
+static struct clk dummy_apb_pclk = {
+	.name		= "apb_pclk",
+	.id		= -1,
+};
+
 void __init_or_cpufreq s5p6450_setup_clocks(void)
 {
 	struct clk *xtal_clk;
@@ -632,5 +637,7 @@
 	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
+	s3c24xx_register_clock(&dummy_apb_pclk);
+
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p64x0/dma.c b/arch/arm/mach-s5p64x0/dma.c
index 0e5b3e6..442dd4a 100644
--- a/arch/arm/mach-s5p64x0/dma.c
+++ b/arch/arm/mach-s5p64x0/dma.c
@@ -21,115 +21,208 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
-#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl330.h>
+
+#include <asm/irq.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
 #include <mach/regs-clock.h>
+#include <mach/dma.h>
 
 #include <plat/cpu.h>
 #include <plat/devs.h>
-#include <plat/s3c-pl330-pdata.h>
+#include <plat/irqs.h>
 
 static u64 dma_dmamask = DMA_BIT_MASK(32);
 
-static struct resource s5p64x0_pdma_resource[] = {
-	[0] = {
-		.start	= S5P64X0_PA_PDMA,
-		.end	= S5P64X0_PA_PDMA + SZ_4K,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_DMA0,
-		.end	= IRQ_DMA0,
-		.flags	= IORESOURCE_IRQ,
+struct dma_pl330_peri s5p6440_pdma_peri[22] = {
+	{
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = DMACH_MAX,
+	}, {
+		.peri_id = DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_RX,
+		.rqtype = DEVTOMEM,
 	},
 };
 
-static struct s3c_pl330_platdata s5p6440_pdma_pdata = {
-	.peri = {
-		[0] = DMACH_UART0_RX,
-		[1] = DMACH_UART0_TX,
-		[2] = DMACH_UART1_RX,
-		[3] = DMACH_UART1_TX,
-		[4] = DMACH_UART2_RX,
-		[5] = DMACH_UART2_TX,
-		[6] = DMACH_UART3_RX,
-		[7] = DMACH_UART3_TX,
-		[8] = DMACH_MAX,
-		[9] = DMACH_MAX,
-		[10] = DMACH_PCM0_TX,
-		[11] = DMACH_PCM0_RX,
-		[12] = DMACH_I2S0_TX,
-		[13] = DMACH_I2S0_RX,
-		[14] = DMACH_SPI0_TX,
-		[15] = DMACH_SPI0_RX,
-		[16] = DMACH_MAX,
-		[17] = DMACH_MAX,
-		[18] = DMACH_MAX,
-		[19] = DMACH_MAX,
-		[20] = DMACH_SPI1_TX,
-		[21] = DMACH_SPI1_RX,
-		[22] = DMACH_MAX,
-		[23] = DMACH_MAX,
-		[24] = DMACH_MAX,
-		[25] = DMACH_MAX,
-		[26] = DMACH_MAX,
-		[27] = DMACH_MAX,
-		[28] = DMACH_MAX,
-		[29] = DMACH_PWM,
-		[30] = DMACH_MAX,
-		[31] = DMACH_MAX,
+struct dma_pl330_platdata s5p6440_pdma_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(s5p6440_pdma_peri),
+	.peri = s5p6440_pdma_peri,
+};
+
+struct dma_pl330_peri s5p6450_pdma_peri[32] = {
+	{
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART4_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART4_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_USI_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_USI_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PWM,
+	}, {
+		.peri_id = (u8)DMACH_UART5_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART5_TX,
+		.rqtype = MEMTODEV,
 	},
 };
 
-static struct s3c_pl330_platdata s5p6450_pdma_pdata = {
-	.peri = {
-		[0] = DMACH_UART0_RX,
-		[1] = DMACH_UART0_TX,
-		[2] = DMACH_UART1_RX,
-		[3] = DMACH_UART1_TX,
-		[4] = DMACH_UART2_RX,
-		[5] = DMACH_UART2_TX,
-		[6] = DMACH_UART3_RX,
-		[7] = DMACH_UART3_TX,
-		[8] = DMACH_UART4_RX,
-		[9] = DMACH_UART4_TX,
-		[10] = DMACH_PCM0_TX,
-		[11] = DMACH_PCM0_RX,
-		[12] = DMACH_I2S0_TX,
-		[13] = DMACH_I2S0_RX,
-		[14] = DMACH_SPI0_TX,
-		[15] = DMACH_SPI0_RX,
-		[16] = DMACH_PCM1_TX,
-		[17] = DMACH_PCM1_RX,
-		[18] = DMACH_PCM2_TX,
-		[19] = DMACH_PCM2_RX,
-		[20] = DMACH_SPI1_TX,
-		[21] = DMACH_SPI1_RX,
-		[22] = DMACH_USI_TX,
-		[23] = DMACH_USI_RX,
-		[24] = DMACH_MAX,
-		[25] = DMACH_I2S1_TX,
-		[26] = DMACH_I2S1_RX,
-		[27] = DMACH_I2S2_TX,
-		[28] = DMACH_I2S2_RX,
-		[29] = DMACH_PWM,
-		[30] = DMACH_UART5_RX,
-		[31] = DMACH_UART5_TX,
-	},
+struct dma_pl330_platdata s5p6450_pdma_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(s5p6450_pdma_peri),
+	.peri = s5p6450_pdma_peri,
 };
 
-static struct platform_device s5p64x0_device_pdma = {
-	.name		= "s3c-pl330",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(s5p64x0_pdma_resource),
-	.resource	= s5p64x0_pdma_resource,
-	.dev		= {
+struct amba_device s5p64x0_device_pdma = {
+	.dev = {
+		.init_name = "dma-pl330",
 		.dma_mask = &dma_dmamask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
+	.res = {
+		.start = S5P64X0_PA_PDMA,
+		.end = S5P64X0_PA_PDMA + SZ_4K,
+		.flags = IORESOURCE_MEM,
+	},
+	.irq = {IRQ_DMA0, NO_IRQ},
+	.periphid = 0x00041330,
 };
 
 static int __init s5p64x0_dma_init(void)
@@ -139,7 +232,7 @@
 	else
 		s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata;
 
-	platform_device_register(&s5p64x0_device_pdma);
+	amba_device_register(&s5p64x0_device_pdma, &iomem_resource);
 
 	return 0;
 }
diff --git a/arch/arm/mach-s5p64x0/include/mach/dma.h b/arch/arm/mach-s5p64x0/include/mach/dma.h
index 81209eb..5a622af 100644
--- a/arch/arm/mach-s5p64x0/include/mach/dma.h
+++ b/arch/arm/mach-s5p64x0/include/mach/dma.h
@@ -20,7 +20,7 @@
 #ifndef __MACH_DMA_H
 #define __MACH_DMA_H
 
-/* This platform uses the common S3C DMA API driver for PL330 */
-#include <plat/s3c-dma-pl330.h>
+/* This platform uses the common common DMA API driver for PL330 */
+#include <plat/dma-pl330.h>
 
 #endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig
index e8a33c4..e538a4c 100644
--- a/arch/arm/mach-s5pc100/Kconfig
+++ b/arch/arm/mach-s5pc100/Kconfig
@@ -10,7 +10,7 @@
 config CPU_S5PC100
 	bool
 	select S5P_EXT_INT
-	select S3C_PL330_DMA
+	select SAMSUNG_DMADEV
 	help
 	  Enable S5PC100 CPU support
 
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index ff5cbb3..6527c05 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -33,6 +33,11 @@
 	.name		= "otg_phy",
 };
 
+static struct clk dummy_apb_pclk = {
+	.name		= "apb_pclk",
+	.id		= -1,
+};
+
 static struct clk *clk_src_mout_href_list[] = {
 	[0] = &s5p_clk_27m,
 	[1] = &clk_fin_hpll,
@@ -454,13 +459,13 @@
 		.enable		= s5pc100_d1_0_ctrl,
 		.ctrlbit	= (1 << 2),
 	}, {
-		.name		= "pdma",
+		.name		= "dma",
 		.devname	= "s3c-pl330.1",
 		.parent		= &clk_div_d1_bus.clk,
 		.enable		= s5pc100_d1_0_ctrl,
 		.ctrlbit	= (1 << 1),
 	}, {
-		.name		= "pdma",
+		.name		= "dma",
 		.devname	= "s3c-pl330.0",
 		.parent		= &clk_div_d1_bus.clk,
 		.enable		= s5pc100_d1_0_ctrl,
@@ -1276,5 +1281,7 @@
 	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
+	s3c24xx_register_clock(&dummy_apb_pclk);
+
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pc100/dma.c b/arch/arm/mach-s5pc100/dma.c
index bf4cd0f..ef803e9 100644
--- a/arch/arm/mach-s5pc100/dma.c
+++ b/arch/arm/mach-s5pc100/dma.c
@@ -1,4 +1,8 @@
-/*
+/* linux/arch/arm/mach-s5pc100/dma.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  *	Jaswinder Singh <jassi.brar@samsung.com>
  *
@@ -17,150 +21,245 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl330.h>
 
+#include <asm/irq.h>
 #include <plat/devs.h>
+#include <plat/irqs.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
-
-#include <plat/s3c-pl330-pdata.h>
+#include <mach/dma.h>
 
 static u64 dma_dmamask = DMA_BIT_MASK(32);
 
-static struct resource s5pc100_pdma0_resource[] = {
-	[0] = {
-		.start  = S5PC100_PA_PDMA0,
-		.end    = S5PC100_PA_PDMA0 + SZ_4K,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_PDMA0,
-		.end	= IRQ_PDMA0,
-		.flags	= IORESOURCE_IRQ,
+struct dma_pl330_peri pdma0_peri[30] = {
+	{
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = DMACH_IRDA,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0S_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_AC97_MICIN,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_AC97_PCMIN,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_AC97_PCMOUT,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_EXTERNAL,
+	}, {
+		.peri_id = (u8)DMACH_PWM,
+	}, {
+		.peri_id = (u8)DMACH_SPDIF,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_HSI_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_HSI_TX,
+		.rqtype = MEMTODEV,
 	},
 };
 
-static struct s3c_pl330_platdata s5pc100_pdma0_pdata = {
-	.peri = {
-		[0] = DMACH_UART0_RX,
-		[1] = DMACH_UART0_TX,
-		[2] = DMACH_UART1_RX,
-		[3] = DMACH_UART1_TX,
-		[4] = DMACH_UART2_RX,
-		[5] = DMACH_UART2_TX,
-		[6] = DMACH_UART3_RX,
-		[7] = DMACH_UART3_TX,
-		[8] = DMACH_IRDA,
-		[9] = DMACH_I2S0_RX,
-		[10] = DMACH_I2S0_TX,
-		[11] = DMACH_I2S0S_TX,
-		[12] = DMACH_I2S1_RX,
-		[13] = DMACH_I2S1_TX,
-		[14] = DMACH_I2S2_RX,
-		[15] = DMACH_I2S2_TX,
-		[16] = DMACH_SPI0_RX,
-		[17] = DMACH_SPI0_TX,
-		[18] = DMACH_SPI1_RX,
-		[19] = DMACH_SPI1_TX,
-		[20] = DMACH_SPI2_RX,
-		[21] = DMACH_SPI2_TX,
-		[22] = DMACH_AC97_MICIN,
-		[23] = DMACH_AC97_PCMIN,
-		[24] = DMACH_AC97_PCMOUT,
-		[25] = DMACH_EXTERNAL,
-		[26] = DMACH_PWM,
-		[27] = DMACH_SPDIF,
-		[28] = DMACH_HSI_RX,
-		[29] = DMACH_HSI_TX,
-		[30] = DMACH_MAX,
-		[31] = DMACH_MAX,
-	},
+struct dma_pl330_platdata s5pc100_pdma0_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(pdma0_peri),
+	.peri = pdma0_peri,
 };
 
-static struct platform_device s5pc100_device_pdma0 = {
-	.name		= "s3c-pl330",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(s5pc100_pdma0_resource),
-	.resource	= s5pc100_pdma0_resource,
-	.dev		= {
+struct amba_device s5pc100_device_pdma0 = {
+	.dev = {
+		.init_name = "dma-pl330.0",
 		.dma_mask = &dma_dmamask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = &s5pc100_pdma0_pdata,
 	},
-};
-
-static struct resource s5pc100_pdma1_resource[] = {
-	[0] = {
-		.start  = S5PC100_PA_PDMA1,
-		.end    = S5PC100_PA_PDMA1 + SZ_4K,
+	.res = {
+		.start = S5PC100_PA_PDMA0,
+		.end = S5PC100_PA_PDMA0 + SZ_4K,
 		.flags = IORESOURCE_MEM,
 	},
-	[1] = {
-		.start	= IRQ_PDMA1,
-		.end	= IRQ_PDMA1,
-		.flags	= IORESOURCE_IRQ,
+	.irq = {IRQ_PDMA0, NO_IRQ},
+	.periphid = 0x00041330,
+};
+
+struct dma_pl330_peri pdma1_peri[30] = {
+	{
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = DMACH_IRDA,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0S_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ0,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ1,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ2,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ3,
 	},
 };
 
-static struct s3c_pl330_platdata s5pc100_pdma1_pdata = {
-	.peri = {
-		[0] = DMACH_UART0_RX,
-		[1] = DMACH_UART0_TX,
-		[2] = DMACH_UART1_RX,
-		[3] = DMACH_UART1_TX,
-		[4] = DMACH_UART2_RX,
-		[5] = DMACH_UART2_TX,
-		[6] = DMACH_UART3_RX,
-		[7] = DMACH_UART3_TX,
-		[8] = DMACH_IRDA,
-		[9] = DMACH_I2S0_RX,
-		[10] = DMACH_I2S0_TX,
-		[11] = DMACH_I2S0S_TX,
-		[12] = DMACH_I2S1_RX,
-		[13] = DMACH_I2S1_TX,
-		[14] = DMACH_I2S2_RX,
-		[15] = DMACH_I2S2_TX,
-		[16] = DMACH_SPI0_RX,
-		[17] = DMACH_SPI0_TX,
-		[18] = DMACH_SPI1_RX,
-		[19] = DMACH_SPI1_TX,
-		[20] = DMACH_SPI2_RX,
-		[21] = DMACH_SPI2_TX,
-		[22] = DMACH_PCM0_RX,
-		[23] = DMACH_PCM0_TX,
-		[24] = DMACH_PCM1_RX,
-		[25] = DMACH_PCM1_TX,
-		[26] = DMACH_MSM_REQ0,
-		[27] = DMACH_MSM_REQ1,
-		[28] = DMACH_MSM_REQ2,
-		[29] = DMACH_MSM_REQ3,
-		[30] = DMACH_MAX,
-		[31] = DMACH_MAX,
-	},
+struct dma_pl330_platdata s5pc100_pdma1_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(pdma1_peri),
+	.peri = pdma1_peri,
 };
 
-static struct platform_device s5pc100_device_pdma1 = {
-	.name		= "s3c-pl330",
-	.id		= 1,
-	.num_resources	= ARRAY_SIZE(s5pc100_pdma1_resource),
-	.resource	= s5pc100_pdma1_resource,
-	.dev		= {
+struct amba_device s5pc100_device_pdma1 = {
+	.dev = {
+		.init_name = "dma-pl330.1",
 		.dma_mask = &dma_dmamask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = &s5pc100_pdma1_pdata,
 	},
-};
-
-static struct platform_device *s5pc100_dmacs[] __initdata = {
-	&s5pc100_device_pdma0,
-	&s5pc100_device_pdma1,
+	.res = {
+		.start = S5PC100_PA_PDMA1,
+		.end = S5PC100_PA_PDMA1 + SZ_4K,
+		.flags = IORESOURCE_MEM,
+	},
+	.irq = {IRQ_PDMA1, NO_IRQ},
+	.periphid = 0x00041330,
 };
 
 static int __init s5pc100_dma_init(void)
 {
-	platform_add_devices(s5pc100_dmacs, ARRAY_SIZE(s5pc100_dmacs));
+	amba_device_register(&s5pc100_device_pdma0, &iomem_resource);
 
 	return 0;
 }
diff --git a/arch/arm/mach-s5pc100/include/mach/dma.h b/arch/arm/mach-s5pc100/include/mach/dma.h
index 81209eb..201842a 100644
--- a/arch/arm/mach-s5pc100/include/mach/dma.h
+++ b/arch/arm/mach-s5pc100/include/mach/dma.h
@@ -20,7 +20,7 @@
 #ifndef __MACH_DMA_H
 #define __MACH_DMA_H
 
-/* This platform uses the common S3C DMA API driver for PL330 */
-#include <plat/s3c-dma-pl330.h>
+/* This platform uses the common DMA API driver for PL330 */
+#include <plat/dma-pl330.h>
 
 #endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index aaeb44a..e3ebe969 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -11,7 +11,7 @@
 
 config CPU_S5PV210
 	bool
-	select S3C_PL330_DMA
+	select SAMSUNG_DMADEV
 	select S5P_EXT_INT
 	select S5P_HRT
 	help
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index f5f8fa8..1ab3400 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -203,6 +203,11 @@
 	.name		= "pcmcdclk",
 };
 
+static struct clk dummy_apb_pclk = {
+	.name		= "apb_pclk",
+	.id		= -1,
+};
+
 static struct clk *clkset_vpllsrc_list[] = {
 	[0] = &clk_fin_vpll,
 	[1] = &clk_sclk_hdmi27m,
@@ -289,13 +294,13 @@
 
 static struct clk init_clocks_off[] = {
 	{
-		.name		= "pdma",
+		.name		= "dma",
 		.devname	= "s3c-pl330.0",
 		.parent		= &clk_hclk_psys.clk,
 		.enable		= s5pv210_clk_ip0_ctrl,
 		.ctrlbit	= (1 << 3),
 	}, {
-		.name		= "pdma",
+		.name		= "dma",
 		.devname	= "s3c-pl330.1",
 		.parent		= &clk_hclk_psys.clk,
 		.enable		= s5pv210_clk_ip0_ctrl,
@@ -1159,5 +1164,6 @@
 	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
+	s3c24xx_register_clock(&dummy_apb_pclk);
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c
index 497d343..f79d0b0 100644
--- a/arch/arm/mach-s5pv210/dma.c
+++ b/arch/arm/mach-s5pv210/dma.c
@@ -1,4 +1,8 @@
-/*
+/* linux/arch/arm/mach-s5pv210/dma.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  *	Jaswinder Singh <jassi.brar@samsung.com>
  *
@@ -17,151 +21,239 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl330.h>
 
+#include <asm/irq.h>
 #include <plat/devs.h>
 #include <plat/irqs.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
-
-#include <plat/s3c-pl330-pdata.h>
+#include <mach/dma.h>
 
 static u64 dma_dmamask = DMA_BIT_MASK(32);
 
-static struct resource s5pv210_pdma0_resource[] = {
-	[0] = {
-		.start  = S5PV210_PA_PDMA0,
-		.end    = S5PV210_PA_PDMA0 + SZ_4K,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_PDMA0,
-		.end	= IRQ_PDMA0,
-		.flags	= IORESOURCE_IRQ,
+struct dma_pl330_peri pdma0_peri[28] = {
+	{
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0S_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_AC97_MICIN,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_AC97_PCMIN,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_AC97_PCMOUT,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_PWM,
+	}, {
+		.peri_id = (u8)DMACH_SPDIF,
+		.rqtype = MEMTODEV,
 	},
 };
 
-static struct s3c_pl330_platdata s5pv210_pdma0_pdata = {
-	.peri = {
-		[0] = DMACH_UART0_RX,
-		[1] = DMACH_UART0_TX,
-		[2] = DMACH_UART1_RX,
-		[3] = DMACH_UART1_TX,
-		[4] = DMACH_UART2_RX,
-		[5] = DMACH_UART2_TX,
-		[6] = DMACH_UART3_RX,
-		[7] = DMACH_UART3_TX,
-		[8] = DMACH_MAX,
-		[9] = DMACH_I2S0_RX,
-		[10] = DMACH_I2S0_TX,
-		[11] = DMACH_I2S0S_TX,
-		[12] = DMACH_I2S1_RX,
-		[13] = DMACH_I2S1_TX,
-		[14] = DMACH_MAX,
-		[15] = DMACH_MAX,
-		[16] = DMACH_SPI0_RX,
-		[17] = DMACH_SPI0_TX,
-		[18] = DMACH_SPI1_RX,
-		[19] = DMACH_SPI1_TX,
-		[20] = DMACH_MAX,
-		[21] = DMACH_MAX,
-		[22] = DMACH_AC97_MICIN,
-		[23] = DMACH_AC97_PCMIN,
-		[24] = DMACH_AC97_PCMOUT,
-		[25] = DMACH_MAX,
-		[26] = DMACH_PWM,
-		[27] = DMACH_SPDIF,
-		[28] = DMACH_MAX,
-		[29] = DMACH_MAX,
-		[30] = DMACH_MAX,
-		[31] = DMACH_MAX,
-	},
+struct dma_pl330_platdata s5pv210_pdma0_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(pdma0_peri),
+	.peri = pdma0_peri,
 };
 
-static struct platform_device s5pv210_device_pdma0 = {
-	.name		= "s3c-pl330",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(s5pv210_pdma0_resource),
-	.resource	= s5pv210_pdma0_resource,
-	.dev		= {
+struct amba_device s5pv210_device_pdma0 = {
+	.dev = {
+		.init_name = "dma-pl330.0",
 		.dma_mask = &dma_dmamask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = &s5pv210_pdma0_pdata,
 	},
-};
-
-static struct resource s5pv210_pdma1_resource[] = {
-	[0] = {
-		.start  = S5PV210_PA_PDMA1,
-		.end    = S5PV210_PA_PDMA1 + SZ_4K,
+	.res = {
+		.start = S5PV210_PA_PDMA0,
+		.end = S5PV210_PA_PDMA0 + SZ_4K,
 		.flags = IORESOURCE_MEM,
 	},
-	[1] = {
-		.start	= IRQ_PDMA1,
-		.end	= IRQ_PDMA1,
-		.flags	= IORESOURCE_IRQ,
+	.irq = {IRQ_PDMA0, NO_IRQ},
+	.periphid = 0x00041330,
+};
+
+struct dma_pl330_peri pdma1_peri[32] = {
+	{
+		.peri_id = (u8)DMACH_UART0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_UART3_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_UART3_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S0S_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_I2S2_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_SPI1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_MAX,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM0_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM1_TX,
+		.rqtype = MEMTODEV,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ0,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ1,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ2,
+	}, {
+		.peri_id = (u8)DMACH_MSM_REQ3,
+	}, {
+		.peri_id = (u8)DMACH_PCM2_RX,
+		.rqtype = DEVTOMEM,
+	}, {
+		.peri_id = (u8)DMACH_PCM2_TX,
+		.rqtype = MEMTODEV,
 	},
 };
 
-static struct s3c_pl330_platdata s5pv210_pdma1_pdata = {
-	.peri = {
-		[0] = DMACH_UART0_RX,
-		[1] = DMACH_UART0_TX,
-		[2] = DMACH_UART1_RX,
-		[3] = DMACH_UART1_TX,
-		[4] = DMACH_UART2_RX,
-		[5] = DMACH_UART2_TX,
-		[6] = DMACH_UART3_RX,
-		[7] = DMACH_UART3_TX,
-		[8] = DMACH_MAX,
-		[9] = DMACH_I2S0_RX,
-		[10] = DMACH_I2S0_TX,
-		[11] = DMACH_I2S0S_TX,
-		[12] = DMACH_I2S1_RX,
-		[13] = DMACH_I2S1_TX,
-		[14] = DMACH_I2S2_RX,
-		[15] = DMACH_I2S2_TX,
-		[16] = DMACH_SPI0_RX,
-		[17] = DMACH_SPI0_TX,
-		[18] = DMACH_SPI1_RX,
-		[19] = DMACH_SPI1_TX,
-		[20] = DMACH_MAX,
-		[21] = DMACH_MAX,
-		[22] = DMACH_PCM0_RX,
-		[23] = DMACH_PCM0_TX,
-		[24] = DMACH_PCM1_RX,
-		[25] = DMACH_PCM1_TX,
-		[26] = DMACH_MSM_REQ0,
-		[27] = DMACH_MSM_REQ1,
-		[28] = DMACH_MSM_REQ2,
-		[29] = DMACH_MSM_REQ3,
-		[30] = DMACH_PCM2_RX,
-		[31] = DMACH_PCM2_TX,
-	},
+struct dma_pl330_platdata s5pv210_pdma1_pdata = {
+	.nr_valid_peri = ARRAY_SIZE(pdma1_peri),
+	.peri = pdma1_peri,
 };
 
-static struct platform_device s5pv210_device_pdma1 = {
-	.name		= "s3c-pl330",
-	.id		= 1,
-	.num_resources	= ARRAY_SIZE(s5pv210_pdma1_resource),
-	.resource	= s5pv210_pdma1_resource,
-	.dev		= {
+struct amba_device s5pv210_device_pdma1 = {
+	.dev = {
+		.init_name = "dma-pl330.1",
 		.dma_mask = &dma_dmamask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = &s5pv210_pdma1_pdata,
 	},
-};
-
-static struct platform_device *s5pv210_dmacs[] __initdata = {
-	&s5pv210_device_pdma0,
-	&s5pv210_device_pdma1,
+	.res = {
+		.start = S5PV210_PA_PDMA1,
+		.end = S5PV210_PA_PDMA1 + SZ_4K,
+		.flags = IORESOURCE_MEM,
+	},
+	.irq = {IRQ_PDMA1, NO_IRQ},
+	.periphid = 0x00041330,
 };
 
 static int __init s5pv210_dma_init(void)
 {
-	platform_add_devices(s5pv210_dmacs, ARRAY_SIZE(s5pv210_dmacs));
+	amba_device_register(&s5pv210_device_pdma0, &iomem_resource);
 
 	return 0;
 }
diff --git a/arch/arm/mach-s5pv210/include/mach/dma.h b/arch/arm/mach-s5pv210/include/mach/dma.h
index 81209eb..201842a 100644
--- a/arch/arm/mach-s5pv210/include/mach/dma.h
+++ b/arch/arm/mach-s5pv210/include/mach/dma.h
@@ -20,7 +20,7 @@
 #ifndef __MACH_DMA_H
 #define __MACH_DMA_H
 
-/* This platform uses the common S3C DMA API driver for PL330 */
-#include <plat/s3c-dma-pl330.h>
+/* This platform uses the common DMA API driver for PL330 */
+#include <plat/dma-pl330.h>
 
 #endif /* __MACH_DMA_H */
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 539bd0e..53754bcf 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1094,14 +1094,14 @@
  *
  * configure the dma source/destination hardware type and address
  *
- * source:    S3C2410_DMASRC_HW: source is hardware
- *            S3C2410_DMASRC_MEM: source is memory
+ * source:    DMA_FROM_DEVICE: source is hardware
+ *            DMA_TO_DEVICE: source is memory
  *
  * devaddr:   physical address of the source
 */
 
 int s3c2410_dma_devconfig(enum dma_ch channel,
-			  enum s3c2410_dmasrc source,
+			  enum dma_data_direction source,
 			  unsigned long devaddr)
 {
 	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -1131,7 +1131,7 @@
 	 hwcfg |= S3C2410_DISRCC_INC;
 
 	switch (source) {
-	case S3C2410_DMASRC_HW:
+	case DMA_FROM_DEVICE:
 		/* source is hardware */
 		pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
 			 __func__, devaddr, hwcfg);
@@ -1142,7 +1142,7 @@
 		chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
 		break;
 
-	case S3C2410_DMASRC_MEM:
+	case DMA_TO_DEVICE:
 		/* source is memory */
 		pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n",
 			 __func__, devaddr, hwcfg);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 3895f9a..7a96198 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -295,11 +295,14 @@
 	help
 	  Internal configuration for S3C DMA core
 
-config S3C_PL330_DMA
+config SAMSUNG_DMADEV
 	bool
-	select PL330
+	select DMADEVICES
+	select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
+					CPU_S5P6450 || CPU_S5P6440)
+	select ARM_AMBA
 	help
-	  S3C DMA API Driver for PL330 DMAC.
+	  Use DMA device engine for PL330 DMAC.
 
 comment "Power management"
 
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 09adb84..3dd5dbad 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -62,9 +62,9 @@
 
 # DMA support
 
-obj-$(CONFIG_S3C_DMA)		+= dma.o
+obj-$(CONFIG_S3C_DMA)		+= dma.o s3c-dma-ops.o
 
-obj-$(CONFIG_S3C_PL330_DMA)	+= s3c-pl330.o
+obj-$(CONFIG_SAMSUNG_DMADEV)	+= dma-ops.o
 
 # PM support
 
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
new file mode 100644
index 0000000..6e3d9ab
--- /dev/null
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -0,0 +1,131 @@
+/* linux/arch/arm/plat-samsung/dma-ops.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Samsung DMA Operations
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/amba/pl330.h>
+#include <linux/scatterlist.h>
+
+#include <mach/dma.h>
+
+static inline bool pl330_filter(struct dma_chan *chan, void *param)
+{
+	struct dma_pl330_peri *peri = chan->private;
+	return peri->peri_id == (unsigned)param;
+}
+
+static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
+				struct samsung_dma_info *info)
+{
+	struct dma_chan *chan;
+	dma_cap_mask_t mask;
+	struct dma_slave_config slave_config;
+
+	dma_cap_zero(mask);
+	dma_cap_set(info->cap, mask);
+
+	chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);
+
+	if (info->direction == DMA_FROM_DEVICE) {
+		memset(&slave_config, 0, sizeof(struct dma_slave_config));
+		slave_config.direction = info->direction;
+		slave_config.src_addr = info->fifo;
+		slave_config.src_addr_width = info->width;
+		slave_config.src_maxburst = 1;
+		dmaengine_slave_config(chan, &slave_config);
+	} else if (info->direction == DMA_TO_DEVICE) {
+		memset(&slave_config, 0, sizeof(struct dma_slave_config));
+		slave_config.direction = info->direction;
+		slave_config.dst_addr = info->fifo;
+		slave_config.dst_addr_width = info->width;
+		slave_config.dst_maxburst = 1;
+		dmaengine_slave_config(chan, &slave_config);
+	}
+
+	return (unsigned)chan;
+}
+
+static int samsung_dmadev_release(unsigned ch,
+			struct s3c2410_dma_client *client)
+{
+	dma_release_channel((struct dma_chan *)ch);
+
+	return 0;
+}
+
+static int samsung_dmadev_prepare(unsigned ch,
+			struct samsung_dma_prep_info *info)
+{
+	struct scatterlist sg;
+	struct dma_chan *chan = (struct dma_chan *)ch;
+	struct dma_async_tx_descriptor *desc;
+
+	switch (info->cap) {
+	case DMA_SLAVE:
+		sg_init_table(&sg, 1);
+		sg_dma_len(&sg) = info->len;
+		sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
+			    info->len, offset_in_page(info->buf));
+		sg_dma_address(&sg) = info->buf;
+
+		desc = chan->device->device_prep_slave_sg(chan,
+			&sg, 1, info->direction, DMA_PREP_INTERRUPT);
+		break;
+	case DMA_CYCLIC:
+		desc = chan->device->device_prep_dma_cyclic(chan,
+			info->buf, info->len, info->period, info->direction);
+		break;
+	default:
+		dev_err(&chan->dev->device, "unsupported format\n");
+		return -EFAULT;
+	}
+
+	if (!desc) {
+		dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
+		return -EFAULT;
+	}
+
+	desc->callback = info->fp;
+	desc->callback_param = info->fp_param;
+
+	dmaengine_submit((struct dma_async_tx_descriptor *)desc);
+
+	return 0;
+}
+
+static inline int samsung_dmadev_trigger(unsigned ch)
+{
+	dma_async_issue_pending((struct dma_chan *)ch);
+
+	return 0;
+}
+
+static inline int samsung_dmadev_flush(unsigned ch)
+{
+	return dmaengine_terminate_all((struct dma_chan *)ch);
+}
+
+struct samsung_dma_ops dmadev_ops = {
+	.request	= samsung_dmadev_request,
+	.release	= samsung_dmadev_release,
+	.prepare	= samsung_dmadev_prepare,
+	.trigger	= samsung_dmadev_trigger,
+	.started	= NULL,
+	.flush		= samsung_dmadev_flush,
+	.stop		= samsung_dmadev_flush,
+};
+
+void *samsung_dmadev_get_ops(void)
+{
+	return &dmadev_ops;
+}
+EXPORT_SYMBOL(samsung_dmadev_get_ops);
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
new file mode 100644
index 0000000..4c1a363
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -0,0 +1,63 @@
+/* arch/arm/plat-samsung/include/plat/dma-ops.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Samsung DMA support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SAMSUNG_DMA_OPS_H_
+#define __SAMSUNG_DMA_OPS_H_ __FILE__
+
+#include <linux/dmaengine.h>
+
+struct samsung_dma_prep_info {
+	enum dma_transaction_type cap;
+	enum dma_data_direction direction;
+	dma_addr_t buf;
+	unsigned long period;
+	unsigned long len;
+	void (*fp)(void *data);
+	void *fp_param;
+};
+
+struct samsung_dma_info {
+	enum dma_transaction_type cap;
+	enum dma_data_direction direction;
+	enum dma_slave_buswidth width;
+	dma_addr_t fifo;
+	struct s3c2410_dma_client *client;
+};
+
+struct samsung_dma_ops {
+	unsigned (*request)(enum dma_ch ch, struct samsung_dma_info *info);
+	int (*release)(unsigned ch, struct s3c2410_dma_client *client);
+	int (*prepare)(unsigned ch, struct samsung_dma_prep_info *info);
+	int (*trigger)(unsigned ch);
+	int (*started)(unsigned ch);
+	int (*flush)(unsigned ch);
+	int (*stop)(unsigned ch);
+};
+
+extern void *samsung_dmadev_get_ops(void);
+extern void *s3c_dma_get_ops(void);
+
+static inline void *__samsung_dma_get_ops(void)
+{
+	if (samsung_dma_is_dmadev())
+		return samsung_dmadev_get_ops();
+	else
+		return s3c_dma_get_ops();
+}
+
+/*
+ * samsung_dma_get_ops
+ * get the set of samsung dma operations
+ */
+#define samsung_dma_get_ops() __samsung_dma_get_ops()
+
+#endif /* __SAMSUNG_DMA_OPS_H_ */
diff --git a/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
similarity index 84%
rename from arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h
rename to arch/arm/plat-samsung/include/plat/dma-pl330.h
index 8107442..2e55e59 100644
--- a/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h
+++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h
@@ -8,11 +8,8 @@
  * (at your option) any later version.
  */
 
-#ifndef	__S3C_DMA_PL330_H_
-#define	__S3C_DMA_PL330_H_
-
-#define S3C2410_DMAF_AUTOSTART		(1 << 0)
-#define S3C2410_DMAF_CIRCULAR		(1 << 1)
+#ifndef __DMA_PL330_H_
+#define __DMA_PL330_H_ __FILE__
 
 /*
  * PL330 can assign any channel to communicate with
@@ -20,7 +17,7 @@
  * For the sake of consistency across client drivers,
  * We keep the channel names unchanged and only add
  * missing peripherals are added.
- * Order is not important since S3C PL330 API driver
+ * Order is not important since DMA PL330 API driver
  * use these just as IDs.
  */
 enum dma_ch {
@@ -88,11 +85,20 @@
 	DMACH_MAX,
 };
 
-static inline bool s3c_dma_has_circular(void)
+struct s3c2410_dma_client {
+	char	*name;
+};
+
+static inline bool samsung_dma_has_circular(void)
 {
 	return true;
 }
 
-#include <plat/dma.h>
+static inline bool samsung_dma_is_dmadev(void)
+{
+	return true;
+}
 
-#endif	/* __S3C_DMA_PL330_H_ */
+#include <plat/dma-ops.h>
+
+#endif	/* __DMA_PL330_H_ */
diff --git a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
index ab9bce63..1c1ed54 100644
--- a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
+++ b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
@@ -41,7 +41,7 @@
 
 	void	(*direction)(struct s3c2410_dma_chan *chan,
 			     struct s3c24xx_dma_map *map,
-			     enum s3c2410_dmasrc dir);
+			     enum dma_data_direction dir);
 };
 
 extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel);
diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h
index 8c273b7..b906112 100644
--- a/arch/arm/plat-samsung/include/plat/dma.h
+++ b/arch/arm/plat-samsung/include/plat/dma.h
@@ -10,17 +10,14 @@
  * published by the Free Software Foundation.
 */
 
+#include <linux/dma-mapping.h>
+
 enum s3c2410_dma_buffresult {
 	S3C2410_RES_OK,
 	S3C2410_RES_ERR,
 	S3C2410_RES_ABORT
 };
 
-enum s3c2410_dmasrc {
-	S3C2410_DMASRC_HW,		/* source is memory */
-	S3C2410_DMASRC_MEM		/* source is hardware */
-};
-
 /* enum s3c2410_chan_op
  *
  * operation codes passed to the DMA code by the user, and also used
@@ -112,7 +109,7 @@
 */
 
 extern int s3c2410_dma_devconfig(enum dma_ch channel,
-		enum s3c2410_dmasrc source, unsigned long devaddr);
+		enum dma_data_direction source, unsigned long devaddr);
 
 /* s3c2410_dma_getposition
  *
@@ -126,3 +123,4 @@
 extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
 
 
+#include <plat/dma-ops.h>
diff --git a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h b/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
deleted file mode 100644
index bf5e2a9..0000000
--- a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- *	Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __S3C_PL330_PDATA_H
-#define __S3C_PL330_PDATA_H
-
-#include <plat/s3c-dma-pl330.h>
-
-/*
- * Every PL330 DMAC has max 32 peripheral interfaces,
- * of which some may be not be really used in your
- * DMAC's configuration.
- * Populate this array of 32 peri i/fs with relevant
- * channel IDs for used peri i/f and DMACH_MAX for
- * those unused.
- *
- * The platforms just need to provide this info
- * to the S3C DMA API driver for PL330.
- */
-struct s3c_pl330_platdata {
-	enum dma_ch peri[32];
-};
-
-#endif /* __S3C_PL330_PDATA_H */
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
new file mode 100644
index 0000000..582333c
--- /dev/null
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -0,0 +1,130 @@
+/* linux/arch/arm/plat-samsung/s3c-dma-ops.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Samsung S3C-DMA Operations
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <mach/dma.h>
+
+struct cb_data {
+	void (*fp) (void *);
+	void *fp_param;
+	unsigned ch;
+	struct list_head node;
+};
+
+static LIST_HEAD(dma_list);
+
+static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
+		       int size, enum s3c2410_dma_buffresult res)
+{
+	struct cb_data *data = param;
+
+	data->fp(data->fp_param);
+}
+
+static unsigned s3c_dma_request(enum dma_ch dma_ch,
+				 struct samsung_dma_info *info)
+{
+	struct cb_data *data;
+
+	if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) {
+		s3c2410_dma_free(dma_ch, info->client);
+		return 0;
+	}
+
+	data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
+	data->ch = dma_ch;
+	list_add_tail(&data->node, &dma_list);
+
+	s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo);
+
+	if (info->cap == DMA_CYCLIC)
+		s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
+
+	s3c2410_dma_config(dma_ch, info->width);
+
+	return (unsigned)dma_ch;
+}
+
+static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client)
+{
+	struct cb_data *data;
+
+	list_for_each_entry(data, &dma_list, node)
+		if (data->ch == ch)
+			break;
+	list_del(&data->node);
+
+	s3c2410_dma_free(ch, client);
+	kfree(data);
+
+	return 0;
+}
+
+static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info)
+{
+	struct cb_data *data;
+	int len = (info->cap == DMA_CYCLIC) ? info->period : info->len;
+
+	list_for_each_entry(data, &dma_list, node)
+		if (data->ch == ch)
+			break;
+
+	if (!data->fp) {
+		s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb);
+		data->fp = info->fp;
+		data->fp_param = info->fp_param;
+	}
+
+	s3c2410_dma_enqueue(ch, (void *)data, info->buf, len);
+
+	return 0;
+}
+
+static inline int s3c_dma_trigger(unsigned ch)
+{
+	return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START);
+}
+
+static inline int s3c_dma_started(unsigned ch)
+{
+	return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED);
+}
+
+static inline int s3c_dma_flush(unsigned ch)
+{
+	return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH);
+}
+
+static inline int s3c_dma_stop(unsigned ch)
+{
+	return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP);
+}
+
+static struct samsung_dma_ops s3c_dma_ops = {
+	.request	= s3c_dma_request,
+	.release	= s3c_dma_release,
+	.prepare	= s3c_dma_prepare,
+	.trigger	= s3c_dma_trigger,
+	.started	= s3c_dma_started,
+	.flush		= s3c_dma_flush,
+	.stop		= s3c_dma_stop,
+};
+
+void *s3c_dma_get_ops(void)
+{
+	return &s3c_dma_ops;
+}
+EXPORT_SYMBOL(s3c_dma_get_ops);
diff --git a/arch/arm/plat-samsung/s3c-pl330.c b/arch/arm/plat-samsung/s3c-pl330.c
deleted file mode 100644
index f85638c..0000000
--- a/arch/arm/plat-samsung/s3c-pl330.c
+++ /dev/null
@@ -1,1244 +0,0 @@
-/* linux/arch/arm/plat-samsung/s3c-pl330.c
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- *	Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include <asm/hardware/pl330.h>
-
-#include <plat/s3c-pl330-pdata.h>
-
-/**
- * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
- * @busy_chan: Number of channels currently busy.
- * @peri: List of IDs of peripherals this DMAC can work with.
- * @node: To attach to the global list of DMACs.
- * @pi: PL330 configuration info for the DMAC.
- * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
- * @clk: Pointer of DMAC operation clock.
- */
-struct s3c_pl330_dmac {
-	unsigned		busy_chan;
-	enum dma_ch		*peri;
-	struct list_head	node;
-	struct pl330_info	*pi;
-	struct kmem_cache	*kmcache;
-	struct clk		*clk;
-};
-
-/**
- * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
- * @token: Xfer ID provided by the client.
- * @node: To attach to the list of xfers on a channel.
- * @px: Xfer for PL330 core.
- * @chan: Owner channel of this xfer.
- */
-struct s3c_pl330_xfer {
-	void			*token;
-	struct list_head	node;
-	struct pl330_xfer	px;
-	struct s3c_pl330_chan	*chan;
-};
-
-/**
- * struct s3c_pl330_chan - Logical channel to communicate with
- * 	a Physical peripheral.
- * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
- * 	NULL if the channel is available to be acquired.
- * @id: ID of the peripheral that this channel can communicate with.
- * @options: Options specified by the client.
- * @sdaddr: Address provided via s3c2410_dma_devconfig.
- * @node: To attach to the global list of channels.
- * @lrq: Pointer to the last submitted pl330_req to PL330 core.
- * @xfer_list: To manage list of xfers enqueued.
- * @req: Two requests to communicate with the PL330 engine.
- * @callback_fn: Callback function to the client.
- * @rqcfg: Channel configuration for the xfers.
- * @xfer_head: Pointer to the xfer to be next executed.
- * @dmac: Pointer to the DMAC that manages this channel, NULL if the
- * 	channel is available to be acquired.
- * @client: Client of this channel. NULL if the
- * 	channel is available to be acquired.
- */
-struct s3c_pl330_chan {
-	void				*pl330_chan_id;
-	enum dma_ch			id;
-	unsigned int			options;
-	unsigned long			sdaddr;
-	struct list_head		node;
-	struct pl330_req		*lrq;
-	struct list_head		xfer_list;
-	struct pl330_req		req[2];
-	s3c2410_dma_cbfn_t		callback_fn;
-	struct pl330_reqcfg		rqcfg;
-	struct s3c_pl330_xfer		*xfer_head;
-	struct s3c_pl330_dmac		*dmac;
-	struct s3c2410_dma_client	*client;
-};
-
-/* All DMACs in the platform */
-static LIST_HEAD(dmac_list);
-
-/* All channels to peripherals in the platform */
-static LIST_HEAD(chan_list);
-
-/*
- * Since we add resources(DMACs and Channels) to the global pool,
- * we need to guard access to the resources using a global lock
- */
-static DEFINE_SPINLOCK(res_lock);
-
-/* Returns the channel with ID 'id' in the chan_list */
-static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
-{
-	struct s3c_pl330_chan *ch;
-
-	list_for_each_entry(ch, &chan_list, node)
-		if (ch->id == id)
-			return ch;
-
-	return NULL;
-}
-
-/* Allocate a new channel with ID 'id' and add to chan_list */
-static void chan_add(const enum dma_ch id)
-{
-	struct s3c_pl330_chan *ch = id_to_chan(id);
-
-	/* Return if the channel already exists */
-	if (ch)
-		return;
-
-	ch = kmalloc(sizeof(*ch), GFP_KERNEL);
-	/* Return silently to work with other channels */
-	if (!ch)
-		return;
-
-	ch->id = id;
-	ch->dmac = NULL;
-
-	list_add_tail(&ch->node, &chan_list);
-}
-
-/* If the channel is not yet acquired by any client */
-static bool chan_free(struct s3c_pl330_chan *ch)
-{
-	if (!ch)
-		return false;
-
-	/* Channel points to some DMAC only when it's acquired */
-	return ch->dmac ? false : true;
-}
-
-/*
- * Returns 0 is peripheral i/f is invalid or not present on the dmac.
- * Index + 1, otherwise.
- */
-static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
-{
-	enum dma_ch *id = dmac->peri;
-	int i;
-
-	/* Discount invalid markers */
-	if (ch_id == DMACH_MAX)
-		return 0;
-
-	for (i = 0; i < PL330_MAX_PERI; i++)
-		if (id[i] == ch_id)
-			return i + 1;
-
-	return 0;
-}
-
-/* If all channel threads of the DMAC are busy */
-static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
-{
-	struct pl330_info *pi = dmac->pi;
-
-	return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
-}
-
-/*
- * Returns the number of free channels that
- * can be handled by this dmac only.
- */
-static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
-{
-	enum dma_ch *id = dmac->peri;
-	struct s3c_pl330_dmac *d;
-	struct s3c_pl330_chan *ch;
-	unsigned found, count = 0;
-	enum dma_ch p;
-	int i;
-
-	for (i = 0; i < PL330_MAX_PERI; i++) {
-		p = id[i];
-		ch = id_to_chan(p);
-
-		if (p == DMACH_MAX || !chan_free(ch))
-			continue;
-
-		found = 0;
-		list_for_each_entry(d, &dmac_list, node) {
-			if (d != dmac && iface_of_dmac(d, ch->id)) {
-				found = 1;
-				break;
-			}
-		}
-		if (!found)
-			count++;
-	}
-
-	return count;
-}
-
-/*
- * Measure of suitability of 'dmac' handling 'ch'
- *
- * 0 indicates 'dmac' can not handle 'ch' either
- * because it is not supported by the hardware or
- * because all dmac channels are currently busy.
- *
- * >0 vlaue indicates 'dmac' has the capability.
- * The bigger the value the more suitable the dmac.
- */
-#define MAX_SUIT	UINT_MAX
-#define MIN_SUIT	0
-
-static unsigned suitablility(struct s3c_pl330_dmac *dmac,
-		struct s3c_pl330_chan *ch)
-{
-	struct pl330_info *pi = dmac->pi;
-	enum dma_ch *id = dmac->peri;
-	struct s3c_pl330_dmac *d;
-	unsigned s;
-	int i;
-
-	s = MIN_SUIT;
-	/* If all the DMAC channel threads are busy */
-	if (dmac_busy(dmac))
-		return s;
-
-	for (i = 0; i < PL330_MAX_PERI; i++)
-		if (id[i] == ch->id)
-			break;
-
-	/* If the 'dmac' can't talk to 'ch' */
-	if (i == PL330_MAX_PERI)
-		return s;
-
-	s = MAX_SUIT;
-	list_for_each_entry(d, &dmac_list, node) {
-		/*
-		 * If some other dmac can talk to this
-		 * peri and has some channel free.
-		 */
-		if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
-			s = 0;
-			break;
-		}
-	}
-	if (s)
-		return s;
-
-	s = 100;
-
-	/* Good if free chans are more, bad otherwise */
-	s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
-
-	return s;
-}
-
-/* More than one DMAC may have capability to transfer data with the
- * peripheral. This function assigns most suitable DMAC to manage the
- * channel and hence communicate with the peripheral.
- */
-static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
-{
-	struct s3c_pl330_dmac *d, *dmac = NULL;
-	unsigned sn, sl = MIN_SUIT;
-
-	list_for_each_entry(d, &dmac_list, node) {
-		sn = suitablility(d, ch);
-
-		if (sn == MAX_SUIT)
-			return d;
-
-		if (sn > sl)
-			dmac = d;
-	}
-
-	return dmac;
-}
-
-/* Acquire the channel for peripheral 'id' */
-static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
-{
-	struct s3c_pl330_chan *ch = id_to_chan(id);
-	struct s3c_pl330_dmac *dmac;
-
-	/* If the channel doesn't exist or is already acquired */
-	if (!ch || !chan_free(ch)) {
-		ch = NULL;
-		goto acq_exit;
-	}
-
-	dmac = map_chan_to_dmac(ch);
-	/* If couldn't map */
-	if (!dmac) {
-		ch = NULL;
-		goto acq_exit;
-	}
-
-	dmac->busy_chan++;
-	ch->dmac = dmac;
-
-acq_exit:
-	return ch;
-}
-
-/* Delete xfer from the queue */
-static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
-{
-	struct s3c_pl330_xfer *t;
-	struct s3c_pl330_chan *ch;
-	int found;
-
-	if (!xfer)
-		return;
-
-	ch = xfer->chan;
-
-	/* Make sure xfer is in the queue */
-	found = 0;
-	list_for_each_entry(t, &ch->xfer_list, node)
-		if (t == xfer) {
-			found = 1;
-			break;
-		}
-
-	if (!found)
-		return;
-
-	/* If xfer is last entry in the queue */
-	if (xfer->node.next == &ch->xfer_list)
-		t = list_entry(ch->xfer_list.next,
-				struct s3c_pl330_xfer, node);
-	else
-		t = list_entry(xfer->node.next,
-				struct s3c_pl330_xfer, node);
-
-	/* If there was only one node left */
-	if (t == xfer)
-		ch->xfer_head = NULL;
-	else if (ch->xfer_head == xfer)
-		ch->xfer_head = t;
-
-	list_del(&xfer->node);
-}
-
-/* Provides pointer to the next xfer in the queue.
- * If CIRCULAR option is set, the list is left intact,
- * otherwise the xfer is removed from the list.
- * Forced delete 'pluck' can be set to override the CIRCULAR option.
- */
-static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
-		int pluck)
-{
-	struct s3c_pl330_xfer *xfer = ch->xfer_head;
-
-	if (!xfer)
-		return NULL;
-
-	/* If xfer is last entry in the queue */
-	if (xfer->node.next == &ch->xfer_list)
-		ch->xfer_head = list_entry(ch->xfer_list.next,
-					struct s3c_pl330_xfer, node);
-	else
-		ch->xfer_head = list_entry(xfer->node.next,
-					struct s3c_pl330_xfer, node);
-
-	if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
-		del_from_queue(xfer);
-
-	return xfer;
-}
-
-static inline void add_to_queue(struct s3c_pl330_chan *ch,
-		struct s3c_pl330_xfer *xfer, int front)
-{
-	struct pl330_xfer *xt;
-
-	/* If queue empty */
-	if (ch->xfer_head == NULL)
-		ch->xfer_head = xfer;
-
-	xt = &ch->xfer_head->px;
-	/* If the head already submitted (CIRCULAR head) */
-	if (ch->options & S3C2410_DMAF_CIRCULAR &&
-		(xt == ch->req[0].x || xt == ch->req[1].x))
-		ch->xfer_head = xfer;
-
-	/* If this is a resubmission, it should go at the head */
-	if (front) {
-		ch->xfer_head = xfer;
-		list_add(&xfer->node, &ch->xfer_list);
-	} else {
-		list_add_tail(&xfer->node, &ch->xfer_list);
-	}
-}
-
-static inline void _finish_off(struct s3c_pl330_xfer *xfer,
-		enum s3c2410_dma_buffresult res, int ffree)
-{
-	struct s3c_pl330_chan *ch;
-
-	if (!xfer)
-		return;
-
-	ch = xfer->chan;
-
-	/* Do callback */
-	if (ch->callback_fn)
-		ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
-
-	/* Force Free or if buffer is not needed anymore */
-	if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
-		kmem_cache_free(ch->dmac->kmcache, xfer);
-}
-
-static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
-		struct pl330_req *r)
-{
-	struct s3c_pl330_xfer *xfer;
-	int ret = 0;
-
-	/* If already submitted */
-	if (r->x)
-		return 0;
-
-	xfer = get_from_queue(ch, 0);
-	if (xfer) {
-		r->x = &xfer->px;
-
-		/* Use max bandwidth for M<->M xfers */
-		if (r->rqtype == MEMTOMEM) {
-			struct pl330_info *pi = xfer->chan->dmac->pi;
-			int burst = 1 << ch->rqcfg.brst_size;
-			u32 bytes = r->x->bytes;
-			int bl;
-
-			bl = pi->pcfg.data_bus_width / 8;
-			bl *= pi->pcfg.data_buf_dep;
-			bl /= burst;
-
-			/* src/dst_burst_len can't be more than 16 */
-			if (bl > 16)
-				bl = 16;
-
-			while (bl > 1) {
-				if (!(bytes % (bl * burst)))
-					break;
-				bl--;
-			}
-
-			ch->rqcfg.brst_len = bl;
-		} else {
-			ch->rqcfg.brst_len = 1;
-		}
-
-		ret = pl330_submit_req(ch->pl330_chan_id, r);
-
-		/* If submission was successful */
-		if (!ret) {
-			ch->lrq = r; /* latest submitted req */
-			return 0;
-		}
-
-		r->x = NULL;
-
-		/* If both of the PL330 ping-pong buffers filled */
-		if (ret == -EAGAIN) {
-			dev_err(ch->dmac->pi->dev, "%s:%d!\n",
-				__func__, __LINE__);
-			/* Queue back again */
-			add_to_queue(ch, xfer, 1);
-			ret = 0;
-		} else {
-			dev_err(ch->dmac->pi->dev, "%s:%d!\n",
-				__func__, __LINE__);
-			_finish_off(xfer, S3C2410_RES_ERR, 0);
-		}
-	}
-
-	return ret;
-}
-
-static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
-	struct pl330_req *r, enum pl330_op_err err)
-{
-	unsigned long flags;
-	struct s3c_pl330_xfer *xfer;
-	struct pl330_xfer *xl = r->x;
-	enum s3c2410_dma_buffresult res;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	r->x = NULL;
-
-	s3c_pl330_submit(ch, r);
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	/* Map result to S3C DMA API */
-	if (err == PL330_ERR_NONE)
-		res = S3C2410_RES_OK;
-	else if (err == PL330_ERR_ABORT)
-		res = S3C2410_RES_ABORT;
-	else
-		res = S3C2410_RES_ERR;
-
-	/* If last request had some xfer */
-	if (xl) {
-		xfer = container_of(xl, struct s3c_pl330_xfer, px);
-		_finish_off(xfer, res, 0);
-	} else {
-		dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
-			__func__, __LINE__);
-	}
-}
-
-static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
-{
-	struct pl330_req *r = token;
-	struct s3c_pl330_chan *ch = container_of(r,
-					struct s3c_pl330_chan, req[0]);
-	s3c_pl330_rq(ch, r, err);
-}
-
-static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
-{
-	struct pl330_req *r = token;
-	struct s3c_pl330_chan *ch = container_of(r,
-					struct s3c_pl330_chan, req[1]);
-	s3c_pl330_rq(ch, r, err);
-}
-
-/* Release an acquired channel */
-static void chan_release(struct s3c_pl330_chan *ch)
-{
-	struct s3c_pl330_dmac *dmac;
-
-	if (chan_free(ch))
-		return;
-
-	dmac = ch->dmac;
-	ch->dmac = NULL;
-	dmac->busy_chan--;
-}
-
-int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
-{
-	struct s3c_pl330_xfer *xfer;
-	enum pl330_chan_op pl330op;
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int idx, ret;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto ctrl_exit;
-	}
-
-	switch (op) {
-	case S3C2410_DMAOP_START:
-		/* Make sure both reqs are enqueued */
-		idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-		s3c_pl330_submit(ch, &ch->req[idx]);
-		s3c_pl330_submit(ch, &ch->req[1 - idx]);
-		pl330op = PL330_OP_START;
-		break;
-
-	case S3C2410_DMAOP_STOP:
-		pl330op = PL330_OP_ABORT;
-		break;
-
-	case S3C2410_DMAOP_FLUSH:
-		pl330op = PL330_OP_FLUSH;
-		break;
-
-	case S3C2410_DMAOP_PAUSE:
-	case S3C2410_DMAOP_RESUME:
-	case S3C2410_DMAOP_TIMEOUT:
-	case S3C2410_DMAOP_STARTED:
-		spin_unlock_irqrestore(&res_lock, flags);
-		return 0;
-
-	default:
-		spin_unlock_irqrestore(&res_lock, flags);
-		return -EINVAL;
-	}
-
-	ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
-
-	if (pl330op == PL330_OP_START) {
-		spin_unlock_irqrestore(&res_lock, flags);
-		return ret;
-	}
-
-	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-
-	/* Abort the current xfer */
-	if (ch->req[idx].x) {
-		xfer = container_of(ch->req[idx].x,
-				struct s3c_pl330_xfer, px);
-
-		/* Drop xfer during FLUSH */
-		if (pl330op == PL330_OP_FLUSH)
-			del_from_queue(xfer);
-
-		ch->req[idx].x = NULL;
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT,
-				pl330op == PL330_OP_FLUSH ? 1 : 0);
-		spin_lock_irqsave(&res_lock, flags);
-	}
-
-	/* Flush the whole queue */
-	if (pl330op == PL330_OP_FLUSH) {
-
-		if (ch->req[1 - idx].x) {
-			xfer = container_of(ch->req[1 - idx].x,
-					struct s3c_pl330_xfer, px);
-
-			del_from_queue(xfer);
-
-			ch->req[1 - idx].x = NULL;
-
-			spin_unlock_irqrestore(&res_lock, flags);
-			_finish_off(xfer, S3C2410_RES_ABORT, 1);
-			spin_lock_irqsave(&res_lock, flags);
-		}
-
-		/* Finish off the remaining in the queue */
-		xfer = ch->xfer_head;
-		while (xfer) {
-
-			del_from_queue(xfer);
-
-			spin_unlock_irqrestore(&res_lock, flags);
-			_finish_off(xfer, S3C2410_RES_ABORT, 1);
-			spin_lock_irqsave(&res_lock, flags);
-
-			xfer = ch->xfer_head;
-		}
-	}
-
-ctrl_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_ctrl);
-
-int s3c2410_dma_enqueue(enum dma_ch id, void *token,
-			dma_addr_t addr, int size)
-{
-	struct s3c_pl330_chan *ch;
-	struct s3c_pl330_xfer *xfer;
-	unsigned long flags;
-	int idx, ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	/* Error if invalid or free channel */
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto enq_exit;
-	}
-
-	/* Error if size is unaligned */
-	if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
-		ret = -EINVAL;
-		goto enq_exit;
-	}
-
-	xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
-	if (!xfer) {
-		ret = -ENOMEM;
-		goto enq_exit;
-	}
-
-	xfer->token = token;
-	xfer->chan = ch;
-	xfer->px.bytes = size;
-	xfer->px.next = NULL; /* Single request */
-
-	/* For S3C DMA API, direction is always fixed for all xfers */
-	if (ch->req[0].rqtype == MEMTODEV) {
-		xfer->px.src_addr = addr;
-		xfer->px.dst_addr = ch->sdaddr;
-	} else {
-		xfer->px.src_addr = ch->sdaddr;
-		xfer->px.dst_addr = addr;
-	}
-
-	add_to_queue(ch, xfer, 0);
-
-	/* Try submitting on either request */
-	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-
-	if (!ch->req[idx].x)
-		s3c_pl330_submit(ch, &ch->req[idx]);
-	else
-		s3c_pl330_submit(ch, &ch->req[1 - idx]);
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	if (ch->options & S3C2410_DMAF_AUTOSTART)
-		s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
-
-	return 0;
-
-enq_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_enqueue);
-
-int s3c2410_dma_request(enum dma_ch id,
-			struct s3c2410_dma_client *client,
-			void *dev)
-{
-	struct s3c_pl330_dmac *dmac;
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = chan_acquire(id);
-	if (!ch) {
-		ret = -EBUSY;
-		goto req_exit;
-	}
-
-	dmac = ch->dmac;
-
-	ch->pl330_chan_id = pl330_request_channel(dmac->pi);
-	if (!ch->pl330_chan_id) {
-		chan_release(ch);
-		ret = -EBUSY;
-		goto req_exit;
-	}
-
-	ch->client = client;
-	ch->options = 0; /* Clear any option */
-	ch->callback_fn = NULL; /* Clear any callback */
-	ch->lrq = NULL;
-
-	ch->rqcfg.brst_size = 2; /* Default word size */
-	ch->rqcfg.swap = SWAP_NO;
-	ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
-	ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
-	ch->rqcfg.privileged = 0;
-	ch->rqcfg.insnaccess = 0;
-
-	/* Set invalid direction */
-	ch->req[0].rqtype = DEVTODEV;
-	ch->req[1].rqtype = ch->req[0].rqtype;
-
-	ch->req[0].cfg = &ch->rqcfg;
-	ch->req[1].cfg = ch->req[0].cfg;
-
-	ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
-	ch->req[1].peri = ch->req[0].peri;
-
-	ch->req[0].token = &ch->req[0];
-	ch->req[0].xfer_cb = s3c_pl330_rq0;
-	ch->req[1].token = &ch->req[1];
-	ch->req[1].xfer_cb = s3c_pl330_rq1;
-
-	ch->req[0].x = NULL;
-	ch->req[1].x = NULL;
-
-	/* Reset xfer list */
-	INIT_LIST_HEAD(&ch->xfer_list);
-	ch->xfer_head = NULL;
-
-req_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_request);
-
-int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
-{
-	struct s3c_pl330_chan *ch;
-	struct s3c_pl330_xfer *xfer;
-	unsigned long flags;
-	int ret = 0;
-	unsigned idx;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch))
-		goto free_exit;
-
-	/* Refuse if someone else wanted to free the channel */
-	if (ch->client != client) {
-		ret = -EBUSY;
-		goto free_exit;
-	}
-
-	/* Stop any active xfer, Flushe the queue and do callbacks */
-	pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
-
-	/* Abort the submitted requests */
-	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-
-	if (ch->req[idx].x) {
-		xfer = container_of(ch->req[idx].x,
-				struct s3c_pl330_xfer, px);
-
-		ch->req[idx].x = NULL;
-		del_from_queue(xfer);
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT, 1);
-		spin_lock_irqsave(&res_lock, flags);
-	}
-
-	if (ch->req[1 - idx].x) {
-		xfer = container_of(ch->req[1 - idx].x,
-				struct s3c_pl330_xfer, px);
-
-		ch->req[1 - idx].x = NULL;
-		del_from_queue(xfer);
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT, 1);
-		spin_lock_irqsave(&res_lock, flags);
-	}
-
-	/* Pluck and Abort the queued requests in order */
-	do {
-		xfer = get_from_queue(ch, 1);
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT, 1);
-		spin_lock_irqsave(&res_lock, flags);
-	} while (xfer);
-
-	ch->client = NULL;
-
-	pl330_release_channel(ch->pl330_chan_id);
-
-	ch->pl330_chan_id = NULL;
-
-	chan_release(ch);
-
-free_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_free);
-
-int s3c2410_dma_config(enum dma_ch id, int xferunit)
-{
-	struct s3c_pl330_chan *ch;
-	struct pl330_info *pi;
-	unsigned long flags;
-	int i, dbwidth, ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto cfg_exit;
-	}
-
-	pi = ch->dmac->pi;
-	dbwidth = pi->pcfg.data_bus_width / 8;
-
-	/* Max size of xfer can be pcfg.data_bus_width */
-	if (xferunit > dbwidth) {
-		ret = -EINVAL;
-		goto cfg_exit;
-	}
-
-	i = 0;
-	while (xferunit != (1 << i))
-		i++;
-
-	/* If valid value */
-	if (xferunit == (1 << i))
-		ch->rqcfg.brst_size = i;
-	else
-		ret = -EINVAL;
-
-cfg_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_config);
-
-/* Options that are supported by this driver */
-#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
-
-int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
-{
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
-		ret = -EINVAL;
-	else
-		ch->options = options;
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return 0;
-}
-EXPORT_SYMBOL(s3c2410_dma_setflags);
-
-int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
-{
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch))
-		ret = -EINVAL;
-	else
-		ch->callback_fn = rtn;
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
-
-int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
-			  unsigned long address)
-{
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto devcfg_exit;
-	}
-
-	switch (source) {
-	case S3C2410_DMASRC_HW: /* P->M */
-		ch->req[0].rqtype = DEVTOMEM;
-		ch->req[1].rqtype = DEVTOMEM;
-		ch->rqcfg.src_inc = 0;
-		ch->rqcfg.dst_inc = 1;
-		break;
-	case S3C2410_DMASRC_MEM: /* M->P */
-		ch->req[0].rqtype = MEMTODEV;
-		ch->req[1].rqtype = MEMTODEV;
-		ch->rqcfg.src_inc = 1;
-		ch->rqcfg.dst_inc = 0;
-		break;
-	default:
-		ret = -EINVAL;
-		goto devcfg_exit;
-	}
-
-	ch->sdaddr = address;
-
-devcfg_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_devconfig);
-
-int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
-{
-	struct s3c_pl330_chan *ch = id_to_chan(id);
-	struct pl330_chanstatus status;
-	int ret;
-
-	if (!ch || chan_free(ch))
-		return -EINVAL;
-
-	ret = pl330_chan_status(ch->pl330_chan_id, &status);
-	if (ret < 0)
-		return ret;
-
-	*src = status.src_addr;
-	*dst = status.dst_addr;
-
-	return 0;
-}
-EXPORT_SYMBOL(s3c2410_dma_getposition);
-
-static irqreturn_t pl330_irq_handler(int irq, void *data)
-{
-	if (pl330_update(data))
-		return IRQ_HANDLED;
-	else
-		return IRQ_NONE;
-}
-
-static int pl330_probe(struct platform_device *pdev)
-{
-	struct s3c_pl330_dmac *s3c_pl330_dmac;
-	struct s3c_pl330_platdata *pl330pd;
-	struct pl330_info *pl330_info;
-	struct resource *res;
-	int i, ret, irq;
-
-	pl330pd = pdev->dev.platform_data;
-
-	/* Can't do without the list of _32_ peripherals */
-	if (!pl330pd || !pl330pd->peri) {
-		dev_err(&pdev->dev, "platform data missing!\n");
-		return -ENODEV;
-	}
-
-	pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
-	if (!pl330_info)
-		return -ENOMEM;
-
-	pl330_info->pl330_data = NULL;
-	pl330_info->dev = &pdev->dev;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -ENODEV;
-		goto probe_err1;
-	}
-
-	request_mem_region(res->start, resource_size(res), pdev->name);
-
-	pl330_info->base = ioremap(res->start, resource_size(res));
-	if (!pl330_info->base) {
-		ret = -ENXIO;
-		goto probe_err2;
-	}
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		ret = irq;
-		goto probe_err3;
-	}
-
-	ret = request_irq(irq, pl330_irq_handler, 0,
-			dev_name(&pdev->dev), pl330_info);
-	if (ret)
-		goto probe_err4;
-
-	/* Allocate a new DMAC */
-	s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
-	if (!s3c_pl330_dmac) {
-		ret = -ENOMEM;
-		goto probe_err5;
-	}
-
-	/* Get operation clock and enable it */
-	s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
-	if (IS_ERR(s3c_pl330_dmac->clk)) {
-		dev_err(&pdev->dev, "Cannot get operation clock.\n");
-		ret = -EINVAL;
-		goto probe_err6;
-	}
-	clk_enable(s3c_pl330_dmac->clk);
-
-	ret = pl330_add(pl330_info);
-	if (ret)
-		goto probe_err7;
-
-	/* Hook the info */
-	s3c_pl330_dmac->pi = pl330_info;
-
-	/* No busy channels */
-	s3c_pl330_dmac->busy_chan = 0;
-
-	s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
-				sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
-
-	if (!s3c_pl330_dmac->kmcache) {
-		ret = -ENOMEM;
-		goto probe_err8;
-	}
-
-	/* Get the list of peripherals */
-	s3c_pl330_dmac->peri = pl330pd->peri;
-
-	/* Attach to the list of DMACs */
-	list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
-
-	/* Create a channel for each peripheral in the DMAC
-	 * that is, if it doesn't already exist
-	 */
-	for (i = 0; i < PL330_MAX_PERI; i++)
-		if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
-			chan_add(s3c_pl330_dmac->peri[i]);
-
-	printk(KERN_INFO
-		"Loaded driver for PL330 DMAC-%d %s\n",	pdev->id, pdev->name);
-	printk(KERN_INFO
-		"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
-		pl330_info->pcfg.data_buf_dep,
-		pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
-		pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
-
-	return 0;
-
-probe_err8:
-	pl330_del(pl330_info);
-probe_err7:
-	clk_disable(s3c_pl330_dmac->clk);
-	clk_put(s3c_pl330_dmac->clk);
-probe_err6:
-	kfree(s3c_pl330_dmac);
-probe_err5:
-	free_irq(irq, pl330_info);
-probe_err4:
-probe_err3:
-	iounmap(pl330_info->base);
-probe_err2:
-	release_mem_region(res->start, resource_size(res));
-probe_err1:
-	kfree(pl330_info);
-
-	return ret;
-}
-
-static int pl330_remove(struct platform_device *pdev)
-{
-	struct s3c_pl330_dmac *dmac, *d;
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int del, found;
-
-	if (!pdev->dev.platform_data)
-		return -EINVAL;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	found = 0;
-	list_for_each_entry(d, &dmac_list, node)
-		if (d->pi->dev == &pdev->dev) {
-			found = 1;
-			break;
-		}
-
-	if (!found) {
-		spin_unlock_irqrestore(&res_lock, flags);
-		return 0;
-	}
-
-	dmac = d;
-
-	/* Remove all Channels that are managed only by this DMAC */
-	list_for_each_entry(ch, &chan_list, node) {
-
-		/* Only channels that are handled by this DMAC */
-		if (iface_of_dmac(dmac, ch->id))
-			del = 1;
-		else
-			continue;
-
-		/* Don't remove if some other DMAC has it too */
-		list_for_each_entry(d, &dmac_list, node)
-			if (d != dmac && iface_of_dmac(d, ch->id)) {
-				del = 0;
-				break;
-			}
-
-		if (del) {
-			spin_unlock_irqrestore(&res_lock, flags);
-			s3c2410_dma_free(ch->id, ch->client);
-			spin_lock_irqsave(&res_lock, flags);
-			list_del(&ch->node);
-			kfree(ch);
-		}
-	}
-
-	/* Disable operation clock */
-	clk_disable(dmac->clk);
-	clk_put(dmac->clk);
-
-	/* Remove the DMAC */
-	list_del(&dmac->node);
-	kfree(dmac);
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return 0;
-}
-
-static struct platform_driver pl330_driver = {
-	.driver		= {
-		.owner	= THIS_MODULE,
-		.name	= "s3c-pl330",
-	},
-	.probe		= pl330_probe,
-	.remove		= pl330_remove,
-};
-
-static int __init pl330_init(void)
-{
-	return platform_driver_register(&pl330_driver);
-}
-module_init(pl330_init);
-
-static void __exit pl330_exit(void)
-{
-	platform_driver_unregister(&pl330_driver);
-	return;
-}
-module_exit(pl330_exit);
-
-MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
-MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
-MODULE_LICENSE("GPL");
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 48e50f8..e301133 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -59,7 +59,7 @@
 	struct gendisk *disk;
 };
 
-static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
+static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
 {
 	struct nfhd_device *dev = queue->queuedata;
 	struct bio_vec *bvec;
@@ -76,7 +76,6 @@
 		sec += len;
 	}
 	bio_endio(bio, 0);
-	return 0;
 }
 
 static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 265f0f0..ba42719 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -104,7 +104,7 @@
  * axon_ram_make_request - make_request() method for block device
  * @queue, @bio: see blk_queue_make_request()
  */
-static int
+static void
 axon_ram_make_request(struct request_queue *queue, struct bio *bio)
 {
 	struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
@@ -113,7 +113,6 @@
 	struct bio_vec *vec;
 	unsigned int transfered;
 	unsigned short idx;
-	int rc = 0;
 
 	phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
 	phys_end = bank->io_addr + bank->size;
@@ -121,8 +120,7 @@
 	bio_for_each_segment(vec, bio, idx) {
 		if (unlikely(phys_mem + vec->bv_len > phys_end)) {
 			bio_io_error(bio);
-			rc = -ERANGE;
-			break;
+			return;
 		}
 
 		user_mem = page_address(vec->bv_page) + vec->bv_offset;
@@ -135,8 +133,6 @@
 		transfered += vec->bv_len;
 	}
 	bio_endio(bio, 0);
-
-	return rc;
 }
 
 /**
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b596e54..8f630ce 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -768,25 +768,14 @@
 	return disk_total;
 }
 
-static int blkio_check_dev_num(dev_t dev)
-{
-	int part = 0;
-	struct gendisk *disk;
-
-	disk = get_gendisk(dev, &part);
-	if (!disk || part)
-		return -ENODEV;
-
-	return 0;
-}
-
 static int blkio_policy_parse_and_set(char *buf,
 	struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
 {
+	struct gendisk *disk = NULL;
 	char *s[4], *p, *major_s = NULL, *minor_s = NULL;
-	int ret;
 	unsigned long major, minor;
-	int i = 0;
+	int i = 0, ret = -EINVAL;
+	int part;
 	dev_t dev;
 	u64 temp;
 
@@ -804,37 +793,36 @@
 	}
 
 	if (i != 2)
-		return -EINVAL;
+		goto out;
 
 	p = strsep(&s[0], ":");
 	if (p != NULL)
 		major_s = p;
 	else
-		return -EINVAL;
+		goto out;
 
 	minor_s = s[0];
 	if (!minor_s)
-		return -EINVAL;
+		goto out;
 
-	ret = strict_strtoul(major_s, 10, &major);
-	if (ret)
-		return -EINVAL;
+	if (strict_strtoul(major_s, 10, &major))
+		goto out;
 
-	ret = strict_strtoul(minor_s, 10, &minor);
-	if (ret)
-		return -EINVAL;
+	if (strict_strtoul(minor_s, 10, &minor))
+		goto out;
 
 	dev = MKDEV(major, minor);
 
-	ret = strict_strtoull(s[1], 10, &temp);
-	if (ret)
-		return -EINVAL;
+	if (strict_strtoull(s[1], 10, &temp))
+		goto out;
 
 	/* For rule removal, do not check for device presence. */
 	if (temp) {
-		ret = blkio_check_dev_num(dev);
-		if (ret)
-			return ret;
+		disk = get_gendisk(dev, &part);
+		if (!disk || part) {
+			ret = -ENODEV;
+			goto out;
+		}
 	}
 
 	newpn->dev = dev;
@@ -843,7 +831,7 @@
 	case BLKIO_POLICY_PROP:
 		if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
 		     temp > BLKIO_WEIGHT_MAX)
-			return -EINVAL;
+			goto out;
 
 		newpn->plid = plid;
 		newpn->fileid = fileid;
@@ -860,7 +848,7 @@
 		case BLKIO_THROTL_read_iops_device:
 		case BLKIO_THROTL_write_iops_device:
 			if (temp > THROTL_IOPS_MAX)
-				return -EINVAL;
+				goto out;
 
 			newpn->plid = plid;
 			newpn->fileid = fileid;
@@ -871,68 +859,96 @@
 	default:
 		BUG();
 	}
-
-	return 0;
+	ret = 0;
+out:
+	put_disk(disk);
+	return ret;
 }
 
 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
 			      dev_t dev)
 {
 	struct blkio_policy_node *pn;
+	unsigned long flags;
+	unsigned int weight;
+
+	spin_lock_irqsave(&blkcg->lock, flags);
 
 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
 				BLKIO_PROP_weight_device);
 	if (pn)
-		return pn->val.weight;
+		weight = pn->val.weight;
 	else
-		return blkcg->weight;
+		weight = blkcg->weight;
+
+	spin_unlock_irqrestore(&blkcg->lock, flags);
+
+	return weight;
 }
 EXPORT_SYMBOL_GPL(blkcg_get_weight);
 
 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
 {
 	struct blkio_policy_node *pn;
+	unsigned long flags;
+	uint64_t bps = -1;
 
+	spin_lock_irqsave(&blkcg->lock, flags);
 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
 				BLKIO_THROTL_read_bps_device);
 	if (pn)
-		return pn->val.bps;
-	else
-		return -1;
+		bps = pn->val.bps;
+	spin_unlock_irqrestore(&blkcg->lock, flags);
+
+	return bps;
 }
 
 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
 {
 	struct blkio_policy_node *pn;
+	unsigned long flags;
+	uint64_t bps = -1;
+
+	spin_lock_irqsave(&blkcg->lock, flags);
 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
 				BLKIO_THROTL_write_bps_device);
 	if (pn)
-		return pn->val.bps;
-	else
-		return -1;
+		bps = pn->val.bps;
+	spin_unlock_irqrestore(&blkcg->lock, flags);
+
+	return bps;
 }
 
 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
 {
 	struct blkio_policy_node *pn;
+	unsigned long flags;
+	unsigned int iops = -1;
 
+	spin_lock_irqsave(&blkcg->lock, flags);
 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
 				BLKIO_THROTL_read_iops_device);
 	if (pn)
-		return pn->val.iops;
-	else
-		return -1;
+		iops = pn->val.iops;
+	spin_unlock_irqrestore(&blkcg->lock, flags);
+
+	return iops;
 }
 
 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
 {
 	struct blkio_policy_node *pn;
+	unsigned long flags;
+	unsigned int iops = -1;
+
+	spin_lock_irqsave(&blkcg->lock, flags);
 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
 				BLKIO_THROTL_write_iops_device);
 	if (pn)
-		return pn->val.iops;
-	else
-		return -1;
+		iops = pn->val.iops;
+	spin_unlock_irqrestore(&blkcg->lock, flags);
+
+	return iops;
 }
 
 /* Checks whether user asked for deleting a policy rule */
@@ -1085,6 +1101,7 @@
 
 	if (blkio_delete_rule_command(newpn)) {
 		blkio_policy_delete_node(pn);
+		kfree(pn);
 		spin_unlock_irq(&blkcg->lock);
 		goto update_io_group;
 	}
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index a71d290..6f3ace7 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -188,7 +188,7 @@
 	union {
 		unsigned int weight;
 		/*
-		 * Rate read/write in terms of byptes per second
+		 * Rate read/write in terms of bytes per second
 		 * Whether this rate represents read or write is determined
 		 * by file type "fileid".
 		 */
diff --git a/block/blk-core.c b/block/blk-core.c
index d34433a..f43c8a5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -28,6 +28,7 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/fault-inject.h>
 #include <linux/list_sort.h>
+#include <linux/delay.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/block.h>
@@ -38,8 +39,6 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 
-static int __make_request(struct request_queue *q, struct bio *bio);
-
 /*
  * For the allocated request tables
  */
@@ -347,30 +346,80 @@
 }
 EXPORT_SYMBOL(blk_put_queue);
 
-/*
- * Note: If a driver supplied the queue lock, it is disconnected
- * by this function. The actual state of the lock doesn't matter
- * here as the request_queue isn't accessible after this point
- * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
+/**
+ * blk_drain_queue - drain requests from request_queue
+ * @q: queue to drain
+ * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
+ *
+ * Drain requests from @q.  If @drain_all is set, all requests are drained.
+ * If not, only ELVPRIV requests are drained.  The caller is responsible
+ * for ensuring that no new requests which need to be drained are queued.
+ */
+void blk_drain_queue(struct request_queue *q, bool drain_all)
+{
+	while (true) {
+		int nr_rqs;
+
+		spin_lock_irq(q->queue_lock);
+
+		elv_drain_elevator(q);
+		if (drain_all)
+			blk_throtl_drain(q);
+
+		__blk_run_queue(q);
+
+		if (drain_all)
+			nr_rqs = q->rq.count[0] + q->rq.count[1];
+		else
+			nr_rqs = q->rq.elvpriv;
+
+		spin_unlock_irq(q->queue_lock);
+
+		if (!nr_rqs)
+			break;
+		msleep(10);
+	}
+}
+
+/**
+ * blk_cleanup_queue - shutdown a request queue
+ * @q: request queue to shutdown
+ *
+ * Mark @q DEAD, drain all pending requests, destroy and put it.  All
+ * future requests will be failed immediately with -ENODEV.
  */
 void blk_cleanup_queue(struct request_queue *q)
 {
-	/*
-	 * We know we have process context here, so we can be a little
-	 * cautious and ensure that pending block actions on this device
-	 * are done before moving on. Going into this function, we should
-	 * not have processes doing IO to this device.
-	 */
-	blk_sync_queue(q);
+	spinlock_t *lock = q->queue_lock;
 
-	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+	/* mark @q DEAD, no new request or merges will be allowed afterwards */
 	mutex_lock(&q->sysfs_lock);
 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-	mutex_unlock(&q->sysfs_lock);
+
+	spin_lock_irq(lock);
+	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+	queue_flag_set(QUEUE_FLAG_DEAD, q);
 
 	if (q->queue_lock != &q->__queue_lock)
 		q->queue_lock = &q->__queue_lock;
 
+	spin_unlock_irq(lock);
+	mutex_unlock(&q->sysfs_lock);
+
+	/*
+	 * Drain all requests queued before DEAD marking.  The caller might
+	 * be trying to tear down @q before its elevator is initialized, in
+	 * which case we don't want to call into draining.
+	 */
+	if (q->elevator)
+		blk_drain_queue(q, true);
+
+	/* @q won't process any more request, flush async actions */
+	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+	blk_sync_queue(q);
+
+	/* @q is and will stay empty, shutdown and put */
 	blk_put_queue(q);
 }
 EXPORT_SYMBOL(blk_cleanup_queue);
@@ -541,7 +590,7 @@
 	/*
 	 * This also sets hw/phys segments, boundary and size
 	 */
-	blk_queue_make_request(q, __make_request);
+	blk_queue_make_request(q, blk_queue_bio);
 
 	q->sg_reserved_size = INT_MAX;
 
@@ -576,7 +625,7 @@
 }
 
 static struct request *
-blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
+blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask)
 {
 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 
@@ -587,12 +636,10 @@
 
 	rq->cmd_flags = flags | REQ_ALLOCED;
 
-	if (priv) {
-		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
-			mempool_free(rq, q->rq.rq_pool);
-			return NULL;
-		}
-		rq->cmd_flags |= REQ_ELVPRIV;
+	if ((flags & REQ_ELVPRIV) &&
+	    unlikely(elv_set_request(q, rq, gfp_mask))) {
+		mempool_free(rq, q->rq.rq_pool);
+		return NULL;
 	}
 
 	return rq;
@@ -651,12 +698,13 @@
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_queue *q, int sync, int priv)
+static void freed_request(struct request_queue *q, unsigned int flags)
 {
 	struct request_list *rl = &q->rq;
+	int sync = rw_is_sync(flags);
 
 	rl->count[sync]--;
-	if (priv)
+	if (flags & REQ_ELVPRIV)
 		rl->elvpriv--;
 
 	__freed_request(q, sync);
@@ -684,10 +732,19 @@
 	return true;
 }
 
-/*
- * Get a free request, queue_lock must be held.
- * Returns NULL on failure, with queue_lock held.
- * Returns !NULL on success, with queue_lock *not held*.
+/**
+ * get_request - get a free request
+ * @q: request_queue to allocate request from
+ * @rw_flags: RW and SYNC flags
+ * @bio: bio to allocate request for (can be %NULL)
+ * @gfp_mask: allocation mask
+ *
+ * Get a free request from @q.  This function may fail under memory
+ * pressure or if @q is dead.
+ *
+ * Must be callled with @q->queue_lock held and,
+ * Returns %NULL on failure, with @q->queue_lock held.
+ * Returns !%NULL on success, with @q->queue_lock *not held*.
  */
 static struct request *get_request(struct request_queue *q, int rw_flags,
 				   struct bio *bio, gfp_t gfp_mask)
@@ -696,7 +753,10 @@
 	struct request_list *rl = &q->rq;
 	struct io_context *ioc = NULL;
 	const bool is_sync = rw_is_sync(rw_flags) != 0;
-	int may_queue, priv = 0;
+	int may_queue;
+
+	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+		return NULL;
 
 	may_queue = elv_may_queue(q, rw_flags);
 	if (may_queue == ELV_MQUEUE_NO)
@@ -740,17 +800,17 @@
 	rl->count[is_sync]++;
 	rl->starved[is_sync] = 0;
 
-	if (blk_rq_should_init_elevator(bio)) {
-		priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-		if (priv)
-			rl->elvpriv++;
+	if (blk_rq_should_init_elevator(bio) &&
+	    !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+		rw_flags |= REQ_ELVPRIV;
+		rl->elvpriv++;
 	}
 
 	if (blk_queue_io_stat(q))
 		rw_flags |= REQ_IO_STAT;
 	spin_unlock_irq(q->queue_lock);
 
-	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
+	rq = blk_alloc_request(q, rw_flags, gfp_mask);
 	if (unlikely(!rq)) {
 		/*
 		 * Allocation failed presumably due to memory. Undo anything
@@ -760,7 +820,7 @@
 		 * wait queue, but this is pretty rare.
 		 */
 		spin_lock_irq(q->queue_lock);
-		freed_request(q, is_sync, priv);
+		freed_request(q, rw_flags);
 
 		/*
 		 * in the very unlikely event that allocation failed and no
@@ -790,11 +850,18 @@
 	return rq;
 }
 
-/*
- * No available requests for this queue, wait for some requests to become
- * available.
+/**
+ * get_request_wait - get a free request with retry
+ * @q: request_queue to allocate request from
+ * @rw_flags: RW and SYNC flags
+ * @bio: bio to allocate request for (can be %NULL)
  *
- * Called with q->queue_lock held, and returns with it unlocked.
+ * Get a free request from @q.  This function keeps retrying under memory
+ * pressure and fails iff @q is dead.
+ *
+ * Must be callled with @q->queue_lock held and,
+ * Returns %NULL on failure, with @q->queue_lock held.
+ * Returns !%NULL on success, with @q->queue_lock *not held*.
  */
 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
 					struct bio *bio)
@@ -808,6 +875,9 @@
 		struct io_context *ioc;
 		struct request_list *rl = &q->rq;
 
+		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+			return NULL;
+
 		prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
 				TASK_UNINTERRUPTIBLE);
 
@@ -838,19 +908,15 @@
 {
 	struct request *rq;
 
-	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
-		return NULL;
-
 	BUG_ON(rw != READ && rw != WRITE);
 
 	spin_lock_irq(q->queue_lock);
-	if (gfp_mask & __GFP_WAIT) {
+	if (gfp_mask & __GFP_WAIT)
 		rq = get_request_wait(q, rw, NULL);
-	} else {
+	else
 		rq = get_request(q, rw, NULL, gfp_mask);
-		if (!rq)
-			spin_unlock_irq(q->queue_lock);
-	}
+	if (!rq)
+		spin_unlock_irq(q->queue_lock);
 	/* q->queue_lock is unlocked at this point */
 
 	return rq;
@@ -1052,14 +1118,13 @@
 	 * it didn't come out of our reserved rq pools
 	 */
 	if (req->cmd_flags & REQ_ALLOCED) {
-		int is_sync = rq_is_sync(req) != 0;
-		int priv = req->cmd_flags & REQ_ELVPRIV;
+		unsigned int flags = req->cmd_flags;
 
 		BUG_ON(!list_empty(&req->queuelist));
 		BUG_ON(!hlist_unhashed(&req->hash));
 
 		blk_free_request(q, req);
-		freed_request(q, is_sync, priv);
+		freed_request(q, flags);
 	}
 }
 EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1161,18 +1226,32 @@
 	return true;
 }
 
-/*
- * Attempts to merge with the plugged list in the current process. Returns
- * true if merge was successful, otherwise false.
+/**
+ * attempt_plug_merge - try to merge with %current's plugged list
+ * @q: request_queue new bio is being queued at
+ * @bio: new bio being queued
+ * @request_count: out parameter for number of traversed plugged requests
+ *
+ * Determine whether @bio being queued on @q can be merged with a request
+ * on %current's plugged list.  Returns %true if merge was successful,
+ * otherwise %false.
+ *
+ * This function is called without @q->queue_lock; however, elevator is
+ * accessed iff there already are requests on the plugged list which in
+ * turn guarantees validity of the elevator.
+ *
+ * Note that, on successful merge, elevator operation
+ * elevator_bio_merged_fn() will be called without queue lock.  Elevator
+ * must be ready for this.
  */
-static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
-			       struct bio *bio, unsigned int *request_count)
+static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
+			       unsigned int *request_count)
 {
 	struct blk_plug *plug;
 	struct request *rq;
 	bool ret = false;
 
-	plug = tsk->plug;
+	plug = current->plug;
 	if (!plug)
 		goto out;
 	*request_count = 0;
@@ -1202,7 +1281,6 @@
 
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
-	req->cpu = bio->bi_comp_cpu;
 	req->cmd_type = REQ_TYPE_FS;
 
 	req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
@@ -1215,7 +1293,7 @@
 	blk_rq_bio_prep(req->q, req, bio);
 }
 
-static int __make_request(struct request_queue *q, struct bio *bio)
+void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
 	const bool sync = !!(bio->bi_rw & REQ_SYNC);
 	struct blk_plug *plug;
@@ -1240,8 +1318,8 @@
 	 * Check if we can merge with the plugged list before grabbing
 	 * any locks.
 	 */
-	if (attempt_plug_merge(current, q, bio, &request_count))
-		goto out;
+	if (attempt_plug_merge(q, bio, &request_count))
+		return;
 
 	spin_lock_irq(q->queue_lock);
 
@@ -1275,6 +1353,10 @@
 	 * Returns with the queue unlocked.
 	 */
 	req = get_request_wait(q, rw_flags, bio);
+	if (unlikely(!req)) {
+		bio_endio(bio, -ENODEV);	/* @q is dead */
+		goto out_unlock;
+	}
 
 	/*
 	 * After dropping the lock and possibly sleeping here, our request
@@ -1284,8 +1366,7 @@
 	 */
 	init_request_from_bio(req, bio);
 
-	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
-	    bio_flagged(bio, BIO_CPU_AFFINE))
+	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
 		req->cpu = raw_smp_processor_id();
 
 	plug = current->plug;
@@ -1316,9 +1397,8 @@
 out_unlock:
 		spin_unlock_irq(q->queue_lock);
 	}
-out:
-	return 0;
 }
+EXPORT_SYMBOL_GPL(blk_queue_bio);	/* for device mapper only */
 
 /*
  * If bio->bi_dev is a partition, remap the location
@@ -1417,6 +1497,89 @@
 	return 0;
 }
 
+static noinline_for_stack bool
+generic_make_request_checks(struct bio *bio)
+{
+	struct request_queue *q;
+	int nr_sectors = bio_sectors(bio);
+	int err = -EIO;
+	char b[BDEVNAME_SIZE];
+	struct hd_struct *part;
+
+	might_sleep();
+
+	if (bio_check_eod(bio, nr_sectors))
+		goto end_io;
+
+	q = bdev_get_queue(bio->bi_bdev);
+	if (unlikely(!q)) {
+		printk(KERN_ERR
+		       "generic_make_request: Trying to access "
+			"nonexistent block-device %s (%Lu)\n",
+			bdevname(bio->bi_bdev, b),
+			(long long) bio->bi_sector);
+		goto end_io;
+	}
+
+	if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+		     nr_sectors > queue_max_hw_sectors(q))) {
+		printk(KERN_ERR "bio too big device %s (%u > %u)\n",
+		       bdevname(bio->bi_bdev, b),
+		       bio_sectors(bio),
+		       queue_max_hw_sectors(q));
+		goto end_io;
+	}
+
+	part = bio->bi_bdev->bd_part;
+	if (should_fail_request(part, bio->bi_size) ||
+	    should_fail_request(&part_to_disk(part)->part0,
+				bio->bi_size))
+		goto end_io;
+
+	/*
+	 * If this device has partitions, remap block n
+	 * of partition p to block n+start(p) of the disk.
+	 */
+	blk_partition_remap(bio);
+
+	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
+		goto end_io;
+
+	if (bio_check_eod(bio, nr_sectors))
+		goto end_io;
+
+	/*
+	 * Filter flush bio's early so that make_request based
+	 * drivers without flush support don't have to worry
+	 * about them.
+	 */
+	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+		bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+		if (!nr_sectors) {
+			err = 0;
+			goto end_io;
+		}
+	}
+
+	if ((bio->bi_rw & REQ_DISCARD) &&
+	    (!blk_queue_discard(q) ||
+	     ((bio->bi_rw & REQ_SECURE) &&
+	      !blk_queue_secdiscard(q)))) {
+		err = -EOPNOTSUPP;
+		goto end_io;
+	}
+
+	if (blk_throtl_bio(q, bio))
+		return false;	/* throttled, will be resubmitted later */
+
+	trace_block_bio_queue(q, bio);
+	return true;
+
+end_io:
+	bio_endio(bio, err);
+	return false;
+}
+
 /**
  * generic_make_request - hand a buffer to its device driver for I/O
  * @bio:  The bio describing the location in memory and on the device.
@@ -1437,145 +1600,32 @@
  * completion notification should be signaled.
  *
  * generic_make_request and the drivers it calls may use bi_next if this
- * bio happens to be merged with someone else, and may change bi_dev and
- * bi_sector for remaps as it sees fit.  So the values of these fields
- * should NOT be depended on after the call to generic_make_request.
- */
-static inline void __generic_make_request(struct bio *bio)
-{
-	struct request_queue *q;
-	sector_t old_sector;
-	int ret, nr_sectors = bio_sectors(bio);
-	dev_t old_dev;
-	int err = -EIO;
-
-	might_sleep();
-
-	if (bio_check_eod(bio, nr_sectors))
-		goto end_io;
-
-	/*
-	 * Resolve the mapping until finished. (drivers are
-	 * still free to implement/resolve their own stacking
-	 * by explicitly returning 0)
-	 *
-	 * NOTE: we don't repeat the blk_size check for each new device.
-	 * Stacking drivers are expected to know what they are doing.
-	 */
-	old_sector = -1;
-	old_dev = 0;
-	do {
-		char b[BDEVNAME_SIZE];
-		struct hd_struct *part;
-
-		q = bdev_get_queue(bio->bi_bdev);
-		if (unlikely(!q)) {
-			printk(KERN_ERR
-			       "generic_make_request: Trying to access "
-				"nonexistent block-device %s (%Lu)\n",
-				bdevname(bio->bi_bdev, b),
-				(long long) bio->bi_sector);
-			goto end_io;
-		}
-
-		if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
-			     nr_sectors > queue_max_hw_sectors(q))) {
-			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
-			       bdevname(bio->bi_bdev, b),
-			       bio_sectors(bio),
-			       queue_max_hw_sectors(q));
-			goto end_io;
-		}
-
-		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
-			goto end_io;
-
-		part = bio->bi_bdev->bd_part;
-		if (should_fail_request(part, bio->bi_size) ||
-		    should_fail_request(&part_to_disk(part)->part0,
-					bio->bi_size))
-			goto end_io;
-
-		/*
-		 * If this device has partitions, remap block n
-		 * of partition p to block n+start(p) of the disk.
-		 */
-		blk_partition_remap(bio);
-
-		if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
-			goto end_io;
-
-		if (old_sector != -1)
-			trace_block_bio_remap(q, bio, old_dev, old_sector);
-
-		old_sector = bio->bi_sector;
-		old_dev = bio->bi_bdev->bd_dev;
-
-		if (bio_check_eod(bio, nr_sectors))
-			goto end_io;
-
-		/*
-		 * Filter flush bio's early so that make_request based
-		 * drivers without flush support don't have to worry
-		 * about them.
-		 */
-		if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
-			bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
-			if (!nr_sectors) {
-				err = 0;
-				goto end_io;
-			}
-		}
-
-		if ((bio->bi_rw & REQ_DISCARD) &&
-		    (!blk_queue_discard(q) ||
-		     ((bio->bi_rw & REQ_SECURE) &&
-		      !blk_queue_secdiscard(q)))) {
-			err = -EOPNOTSUPP;
-			goto end_io;
-		}
-
-		if (blk_throtl_bio(q, &bio))
-			goto end_io;
-
-		/*
-		 * If bio = NULL, bio has been throttled and will be submitted
-		 * later.
-		 */
-		if (!bio)
-			break;
-
-		trace_block_bio_queue(q, bio);
-
-		ret = q->make_request_fn(q, bio);
-	} while (ret);
-
-	return;
-
-end_io:
-	bio_endio(bio, err);
-}
-
-/*
- * We only want one ->make_request_fn to be active at a time,
- * else stack usage with stacked devices could be a problem.
- * So use current->bio_list to keep a list of requests
- * submited by a make_request_fn function.
- * current->bio_list is also used as a flag to say if
- * generic_make_request is currently active in this task or not.
- * If it is NULL, then no make_request is active.  If it is non-NULL,
- * then a make_request is active, and new requests should be added
- * at the tail
+ * bio happens to be merged with someone else, and may resubmit the bio to
+ * a lower device by calling into generic_make_request recursively, which
+ * means the bio should NOT be touched after the call to ->make_request_fn.
  */
 void generic_make_request(struct bio *bio)
 {
 	struct bio_list bio_list_on_stack;
 
+	if (!generic_make_request_checks(bio))
+		return;
+
+	/*
+	 * We only want one ->make_request_fn to be active at a time, else
+	 * stack usage with stacked devices could be a problem.  So use
+	 * current->bio_list to keep a list of requests submited by a
+	 * make_request_fn function.  current->bio_list is also used as a
+	 * flag to say if generic_make_request is currently active in this
+	 * task or not.  If it is NULL, then no make_request is active.  If
+	 * it is non-NULL, then a make_request is active, and new requests
+	 * should be added at the tail
+	 */
 	if (current->bio_list) {
-		/* make_request is active */
 		bio_list_add(current->bio_list, bio);
 		return;
 	}
+
 	/* following loop may be a bit non-obvious, and so deserves some
 	 * explanation.
 	 * Before entering the loop, bio->bi_next is NULL (as all callers
@@ -1583,22 +1633,21 @@
 	 * We pretend that we have just taken it off a longer list, so
 	 * we assign bio_list to a pointer to the bio_list_on_stack,
 	 * thus initialising the bio_list of new bios to be
-	 * added.  __generic_make_request may indeed add some more bios
+	 * added.  ->make_request() may indeed add some more bios
 	 * through a recursive call to generic_make_request.  If it
 	 * did, we find a non-NULL value in bio_list and re-enter the loop
 	 * from the top.  In this case we really did just take the bio
 	 * of the top of the list (no pretending) and so remove it from
-	 * bio_list, and call into __generic_make_request again.
-	 *
-	 * The loop was structured like this to make only one call to
-	 * __generic_make_request (which is important as it is large and
-	 * inlined) and to keep the structure simple.
+	 * bio_list, and call into ->make_request() again.
 	 */
 	BUG_ON(bio->bi_next);
 	bio_list_init(&bio_list_on_stack);
 	current->bio_list = &bio_list_on_stack;
 	do {
-		__generic_make_request(bio);
+		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+
+		q->make_request_fn(q, bio);
+
 		bio = bio_list_pop(current->bio_list);
 	} while (bio);
 	current->bio_list = NULL; /* deactivate */
@@ -1725,6 +1774,8 @@
 		where = ELEVATOR_INSERT_FLUSH;
 
 	add_acct_request(q, rq, where);
+	if (where == ELEVATOR_INSERT_FLUSH)
+		__blk_run_queue(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
 	return 0;
@@ -2628,6 +2679,20 @@
 
 #define PLUG_MAGIC	0x91827364
 
+/**
+ * blk_start_plug - initialize blk_plug and track it inside the task_struct
+ * @plug:	The &struct blk_plug that needs to be initialized
+ *
+ * Description:
+ *   Tracking blk_plug inside the task_struct will help with auto-flushing the
+ *   pending I/O should the task end up blocking between blk_start_plug() and
+ *   blk_finish_plug(). This is important from a performance perspective, but
+ *   also ensures that we don't deadlock. For instance, if the task is blocking
+ *   for a memory allocation, memory reclaim could end up wanting to free a
+ *   page belonging to that request that is currently residing in our private
+ *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
+ *   this kind of deadlock.
+ */
 void blk_start_plug(struct blk_plug *plug)
 {
 	struct task_struct *tsk = current;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 491eb30..720ad60 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -320,7 +320,7 @@
 		return;
 	}
 
-	BUG_ON(!rq->bio || rq->bio != rq->biotail);
+	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
 
 	/*
 	 * If there's data but flush is not necessary, the request can be
@@ -330,7 +330,6 @@
 	if ((policy & REQ_FSEQ_DATA) &&
 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
 		list_add_tail(&rq->queuelist, &q->queue_head);
-		blk_run_queue_async(q);
 		return;
 	}
 
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 60fda88..e7f9f65 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -457,11 +457,11 @@
 }
 
 /**
- * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
- * @kobj:    the kobj belonging of the request queue to be released
+ * blk_release_queue: - release a &struct request_queue when it is no longer needed
+ * @kobj:    the kobj belonging to the request queue to be released
  *
  * Description:
- *     blk_cleanup_queue is the pair to blk_init_queue() or
+ *     blk_release_queue is the pair to blk_init_queue() or
  *     blk_queue_make_request().  It should be called when a request queue is
  *     being released; typically when a block device is being de-registered.
  *     Currently, its primary task it to free all the &struct request
@@ -490,6 +490,7 @@
 	if (q->queue_tags)
 		__blk_queue_free_tags(q);
 
+	blk_throtl_release(q);
 	blk_trace_shutdown(q);
 
 	bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index ece65fc..e74d6d1 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -286,12 +286,14 @@
 
 	BUG_ON(tag == -1);
 
-	if (unlikely(tag >= bqt->real_max_depth))
+	if (unlikely(tag >= bqt->max_depth)) {
 		/*
 		 * This can happen after tag depth has been reduced.
-		 * FIXME: how about a warning or info message here?
+		 * But tag shouldn't be larger than real_max_depth.
 		 */
+		WARN_ON(tag >= bqt->real_max_depth);
 		return;
+	}
 
 	list_del_init(&rq->queuelist);
 	rq->cmd_flags &= ~REQ_QUEUED;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a19f58c..4553245 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -10,6 +10,7 @@
 #include <linux/bio.h>
 #include <linux/blktrace_api.h>
 #include "blk-cgroup.h"
+#include "blk.h"
 
 /* Max dispatch from a group in 1 round */
 static int throtl_grp_quantum = 8;
@@ -302,16 +303,16 @@
 	return tg;
 }
 
-/*
- * This function returns with queue lock unlocked in case of error, like
- * request queue is no more
- */
 static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
 {
 	struct throtl_grp *tg = NULL, *__tg = NULL;
 	struct blkio_cgroup *blkcg;
 	struct request_queue *q = td->queue;
 
+	/* no throttling for dead queue */
+	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+		return NULL;
+
 	rcu_read_lock();
 	blkcg = task_blkio_cgroup(current);
 	tg = throtl_find_tg(td, blkcg);
@@ -323,32 +324,22 @@
 	/*
 	 * Need to allocate a group. Allocation of group also needs allocation
 	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
-	 * we need to drop rcu lock and queue_lock before we call alloc
-	 *
-	 * Take the request queue reference to make sure queue does not
-	 * go away once we return from allocation.
+	 * we need to drop rcu lock and queue_lock before we call alloc.
 	 */
-	blk_get_queue(q);
 	rcu_read_unlock();
 	spin_unlock_irq(q->queue_lock);
 
 	tg = throtl_alloc_tg(td);
-	/*
-	 * We might have slept in group allocation. Make sure queue is not
-	 * dead
-	 */
-	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-		blk_put_queue(q);
-		if (tg)
-			kfree(tg);
-
-		return ERR_PTR(-ENODEV);
-	}
-	blk_put_queue(q);
 
 	/* Group allocated and queue is still alive. take the lock */
 	spin_lock_irq(q->queue_lock);
 
+	/* Make sure @q is still alive */
+	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+		kfree(tg);
+		return NULL;
+	}
+
 	/*
 	 * Initialize the new group. After sleeping, read the blkcg again.
 	 */
@@ -1014,11 +1005,6 @@
 	}
 }
 
-static void throtl_td_free(struct throtl_data *td)
-{
-	kfree(td);
-}
-
 /*
  * Blk cgroup controller notification saying that blkio_group object is being
  * delinked as associated cgroup object is going away. That also means that
@@ -1123,17 +1109,17 @@
 	.plid = BLKIO_POLICY_THROTL,
 };
 
-int blk_throtl_bio(struct request_queue *q, struct bio **biop)
+bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 {
 	struct throtl_data *td = q->td;
 	struct throtl_grp *tg;
-	struct bio *bio = *biop;
 	bool rw = bio_data_dir(bio), update_disptime = true;
 	struct blkio_cgroup *blkcg;
+	bool throttled = false;
 
 	if (bio->bi_rw & REQ_THROTTLED) {
 		bio->bi_rw &= ~REQ_THROTTLED;
-		return 0;
+		goto out;
 	}
 
 	/*
@@ -1152,7 +1138,7 @@
 			blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
 					rw, rw_is_sync(bio->bi_rw));
 			rcu_read_unlock();
-			return 0;
+			goto out;
 		}
 	}
 	rcu_read_unlock();
@@ -1161,18 +1147,10 @@
 	 * Either group has not been allocated yet or it is not an unlimited
 	 * IO group
 	 */
-
 	spin_lock_irq(q->queue_lock);
 	tg = throtl_get_tg(td);
-
-	if (IS_ERR(tg)) {
-		if (PTR_ERR(tg)	== -ENODEV) {
-			/*
-			 * Queue is gone. No queue lock held here.
-			 */
-			return -ENODEV;
-		}
-	}
+	if (unlikely(!tg))
+		goto out_unlock;
 
 	if (tg->nr_queued[rw]) {
 		/*
@@ -1200,7 +1178,7 @@
 		 * So keep on trimming slice even if bio is not queued.
 		 */
 		throtl_trim_slice(td, tg, rw);
-		goto out;
+		goto out_unlock;
 	}
 
 queue_bio:
@@ -1212,16 +1190,52 @@
 			tg->nr_queued[READ], tg->nr_queued[WRITE]);
 
 	throtl_add_bio_tg(q->td, tg, bio);
-	*biop = NULL;
+	throttled = true;
 
 	if (update_disptime) {
 		tg_update_disptime(td, tg);
 		throtl_schedule_next_dispatch(td);
 	}
 
-out:
+out_unlock:
 	spin_unlock_irq(q->queue_lock);
-	return 0;
+out:
+	return throttled;
+}
+
+/**
+ * blk_throtl_drain - drain throttled bios
+ * @q: request_queue to drain throttled bios for
+ *
+ * Dispatch all currently throttled bios on @q through ->make_request_fn().
+ */
+void blk_throtl_drain(struct request_queue *q)
+	__releases(q->queue_lock) __acquires(q->queue_lock)
+{
+	struct throtl_data *td = q->td;
+	struct throtl_rb_root *st = &td->tg_service_tree;
+	struct throtl_grp *tg;
+	struct bio_list bl;
+	struct bio *bio;
+
+	WARN_ON_ONCE(!queue_is_locked(q));
+
+	bio_list_init(&bl);
+
+	while ((tg = throtl_rb_first(st))) {
+		throtl_dequeue_tg(td, tg);
+
+		while ((bio = bio_list_peek(&tg->bio_lists[READ])))
+			tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+		while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
+			tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+	}
+	spin_unlock_irq(q->queue_lock);
+
+	while ((bio = bio_list_pop(&bl)))
+		generic_make_request(bio);
+
+	spin_lock_irq(q->queue_lock);
 }
 
 int blk_throtl_init(struct request_queue *q)
@@ -1296,7 +1310,11 @@
 	 * it.
 	 */
 	throtl_shutdown_wq(q);
-	throtl_td_free(td);
+}
+
+void blk_throtl_release(struct request_queue *q)
+{
+	kfree(q->td);
 }
 
 static int __init throtl_init(void)
diff --git a/block/blk.h b/block/blk.h
index 20b900a..3f6551b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -15,6 +15,7 @@
 			struct bio *bio);
 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 		      struct bio *bio);
+void blk_drain_queue(struct request_queue *q, bool drain_all);
 void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 bool __blk_end_bidi_request(struct request *rq, int error,
@@ -188,4 +189,21 @@
 	        (rq->cmd_flags & REQ_DISCARD));
 }
 
-#endif
+#ifdef CONFIG_BLK_DEV_THROTTLING
+extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
+extern void blk_throtl_drain(struct request_queue *q);
+extern int blk_throtl_init(struct request_queue *q);
+extern void blk_throtl_exit(struct request_queue *q);
+extern void blk_throtl_release(struct request_queue *q);
+#else /* CONFIG_BLK_DEV_THROTTLING */
+static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
+{
+	return false;
+}
+static inline void blk_throtl_drain(struct request_queue *q) { }
+static inline int blk_throtl_init(struct request_queue *q) { return 0; }
+static inline void blk_throtl_exit(struct request_queue *q) { }
+static inline void blk_throtl_release(struct request_queue *q) { }
+#endif /* CONFIG_BLK_DEV_THROTTLING */
+
+#endif /* BLK_INTERNAL_H */
diff --git a/block/elevator.c b/block/elevator.c
index a3b64bc..66343d6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -31,7 +31,6 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/compiler.h>
-#include <linux/delay.h>
 #include <linux/blktrace_api.h>
 #include <linux/hash.h>
 #include <linux/uaccess.h>
@@ -182,7 +181,7 @@
 	eq->elevator_data = data;
 }
 
-static char chosen_elevator[16];
+static char chosen_elevator[ELV_NAME_MAX];
 
 static int __init elevator_setup(char *str)
 {
@@ -606,43 +605,35 @@
 void elv_drain_elevator(struct request_queue *q)
 {
 	static int printed;
+
+	lockdep_assert_held(q->queue_lock);
+
 	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
 		;
-	if (q->nr_sorted == 0)
-		return;
-	if (printed++ < 10) {
+	if (q->nr_sorted && printed++ < 10) {
 		printk(KERN_ERR "%s: forced dispatching is broken "
 		       "(nr_sorted=%u), please report this\n",
 		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
 	}
 }
 
-/*
- * Call with queue lock held, interrupts disabled
- */
 void elv_quiesce_start(struct request_queue *q)
 {
 	if (!q->elevator)
 		return;
 
+	spin_lock_irq(q->queue_lock);
 	queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
+	spin_unlock_irq(q->queue_lock);
 
-	/*
-	 * make sure we don't have any requests in flight
-	 */
-	elv_drain_elevator(q);
-	while (q->rq.elvpriv) {
-		__blk_run_queue(q);
-		spin_unlock_irq(q->queue_lock);
-		msleep(10);
-		spin_lock_irq(q->queue_lock);
-		elv_drain_elevator(q);
-	}
+	blk_drain_queue(q, false);
 }
 
 void elv_quiesce_end(struct request_queue *q)
 {
+	spin_lock_irq(q->queue_lock);
 	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+	spin_unlock_irq(q->queue_lock);
 }
 
 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
@@ -972,7 +963,6 @@
 	/*
 	 * Turn on BYPASS and drain all requests w/ elevator private data
 	 */
-	spin_lock_irq(q->queue_lock);
 	elv_quiesce_start(q);
 
 	/*
@@ -983,8 +973,8 @@
 	/*
 	 * attach and start new elevator
 	 */
+	spin_lock_irq(q->queue_lock);
 	elevator_attach(q, e, data);
-
 	spin_unlock_irq(q->queue_lock);
 
 	if (old_elevator->registered) {
@@ -999,9 +989,7 @@
 	 * finally exit old elevator and turn off BYPASS.
 	 */
 	elevator_exit(old_elevator);
-	spin_lock_irq(q->queue_lock);
 	elv_quiesce_end(q);
-	spin_unlock_irq(q->queue_lock);
 
 	blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
 
@@ -1015,10 +1003,7 @@
 	elevator_exit(e);
 	q->elevator = old_elevator;
 	elv_register_queue(q);
-
-	spin_lock_irq(q->queue_lock);
-	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
-	spin_unlock_irq(q->queue_lock);
+	elv_quiesce_end(q);
 
 	return err;
 }
diff --git a/block/genhd.c b/block/genhd.c
index 94855a9..9253839 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -537,7 +537,7 @@
 	disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
 
 	/* No minors to use for partitions */
-	if (!disk_partitionable(disk))
+	if (!disk_part_scan_enabled(disk))
 		goto exit;
 
 	/* No such device (e.g., media were just removed) */
@@ -612,6 +612,12 @@
 	register_disk(disk);
 	blk_register_queue(disk);
 
+	/*
+	 * Take an extra ref on queue which will be put on disk_release()
+	 * so that it sticks around as long as @disk is there.
+	 */
+	WARN_ON_ONCE(blk_get_queue(disk->queue));
+
 	retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
 				   "bdi");
 	WARN_ON(retval);
@@ -842,7 +848,7 @@
 	char buf[BDEVNAME_SIZE];
 
 	/* Don't show non-partitionable removeable devices or empty devices */
-	if (!get_capacity(sgp) || (!disk_partitionable(sgp) &&
+	if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
 				   (sgp->flags & GENHD_FL_REMOVABLE)))
 		return 0;
 	if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
@@ -1166,6 +1172,8 @@
 	disk_replace_part_tbl(disk, NULL);
 	free_part_stats(&disk->part0);
 	free_part_info(&disk->part0);
+	if (disk->queue)
+		blk_put_queue(disk->queue);
 	kfree(disk);
 }
 struct class block_class = {
diff --git a/block/ioctl.c b/block/ioctl.c
index 1124cd2..5c74efc 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -101,7 +101,7 @@
 	struct gendisk *disk = bdev->bd_disk;
 	int res;
 
-	if (!disk_partitionable(disk) || bdev != bdev->bd_contains)
+	if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
 		return -EINVAL;
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 4f4230b..fbdf0d8 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -565,7 +565,7 @@
 {
 	int err;
 
-	if (!q || blk_get_queue(q))
+	if (!q)
 		return -ENXIO;
 
 	switch (cmd) {
@@ -686,7 +686,6 @@
 			err = -ENOTTY;
 	}
 
-	blk_put_queue(q);
 	return err;
 }
 EXPORT_SYMBOL(scsi_cmd_ioctl);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 528f631..167ba0a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -159,7 +159,7 @@
 	return 0;
 }
 
-static int
+static void
 aoeblk_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct sk_buff_head queue;
@@ -172,25 +172,25 @@
 	if (bio == NULL) {
 		printk(KERN_ERR "aoe: bio is NULL\n");
 		BUG();
-		return 0;
+		return;
 	}
 	d = bio->bi_bdev->bd_disk->private_data;
 	if (d == NULL) {
 		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
 		BUG();
 		bio_endio(bio, -ENXIO);
-		return 0;
+		return;
 	} else if (bio->bi_io_vec == NULL) {
 		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
 		BUG();
 		bio_endio(bio, -ENXIO);
-		return 0;
+		return;
 	}
 	buf = mempool_alloc(d->bufpool, GFP_NOIO);
 	if (buf == NULL) {
 		printk(KERN_INFO "aoe: buf allocation failure\n");
 		bio_endio(bio, -ENOMEM);
-		return 0;
+		return;
 	}
 	memset(buf, 0, sizeof(*buf));
 	INIT_LIST_HEAD(&buf->bufs);
@@ -211,7 +211,7 @@
 		spin_unlock_irqrestore(&d->lock, flags);
 		mempool_free(buf, d->bufpool);
 		bio_endio(bio, -ENXIO);
-		return 0;
+		return;
 	}
 
 	list_add_tail(&buf->bufs, &d->bufq);
@@ -222,8 +222,6 @@
 
 	spin_unlock_irqrestore(&d->lock, flags);
 	aoenet_xmit(&queue);
-
-	return 0;
 }
 
 static int
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index dba1c32..d22119d 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -323,7 +323,7 @@
 	return err;
 }
 
-static int brd_make_request(struct request_queue *q, struct bio *bio)
+static void brd_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct block_device *bdev = bio->bi_bdev;
 	struct brd_device *brd = bdev->bd_disk->private_data;
@@ -359,8 +359,6 @@
 
 out:
 	bio_endio(bio, err);
-
-	return 0;
 }
 
 #ifdef CONFIG_BLK_DEV_XIP
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8f4ef65..486f94e 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -68,6 +68,10 @@
 module_param(cciss_tape_cmds, int, 0644);
 MODULE_PARM_DESC(cciss_tape_cmds,
 	"number of commands to allocate for tape devices (default: 6)");
+static int cciss_simple_mode;
+module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_simple_mode,
+	"Use 'simple mode' rather than 'performant mode'");
 
 static DEFINE_MUTEX(cciss_mutex);
 static struct proc_dir_entry *proc_cciss;
@@ -176,6 +180,7 @@
 			unsigned int block_size, InquiryData_struct *inq_buff,
 				   drive_info_struct *drv);
 static void __devinit cciss_interrupt_mode(ctlr_info_t *);
+static int __devinit cciss_enter_simple_mode(struct ctlr_info *h);
 static void start_io(ctlr_info_t *h);
 static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
 			__u8 page_code, unsigned char scsi3addr[],
@@ -388,7 +393,7 @@
 		h->product_name,
 		(unsigned long)h->board_id,
 		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
-		h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT],
+		h->firm_ver[3], (unsigned int)h->intr[h->intr_mode],
 		h->num_luns,
 		h->Qdepth, h->commands_outstanding,
 		h->maxQsinceinit, h->max_outstanding, h->maxSG);
@@ -636,6 +641,18 @@
 }
 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
 
+static ssize_t host_show_transport_mode(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct ctlr_info *h = to_hba(dev);
+
+	return snprintf(buf, 20, "%s\n",
+		h->transMethod & CFGTBL_Trans_Performant ?
+			"performant" : "simple");
+}
+static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL);
+
 static ssize_t dev_show_unique_id(struct device *dev,
 				 struct device_attribute *attr,
 				 char *buf)
@@ -808,6 +825,7 @@
 static struct attribute *cciss_host_attrs[] = {
 	&dev_attr_rescan.attr,
 	&dev_attr_resettable.attr,
+	&dev_attr_transport_mode.attr,
 	NULL
 };
 
@@ -3984,6 +4002,9 @@
 {
 	__u32 trans_support;
 
+	if (cciss_simple_mode)
+		return;
+
 	dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
 	/* Attempt to put controller into performant mode if supported */
 	/* Does board support performant mode? */
@@ -4081,7 +4102,7 @@
 default_int_mode:
 #endif				/* CONFIG_PCI_MSI */
 	/* if we get here we're going to use the default interrupt mode */
-	h->intr[PERF_MODE_INT] = h->pdev->irq;
+	h->intr[h->intr_mode] = h->pdev->irq;
 	return;
 }
 
@@ -4341,6 +4362,9 @@
 	}
 	cciss_enable_scsi_prefetch(h);
 	cciss_p600_dma_prefetch_quirk(h);
+	err = cciss_enter_simple_mode(h);
+	if (err)
+		goto err_out_free_res;
 	cciss_put_controller_into_performant_mode(h);
 	return 0;
 
@@ -4533,6 +4557,13 @@
 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 		pmcsr |= PCI_D0;
 		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+		/*
+		 * The P600 requires a small delay when changing states.
+		 * Otherwise we may think the board did not reset and we bail.
+		 * This for kdump only and is particular to the P600.
+		 */
+		msleep(500);
 	}
 	return 0;
 }
@@ -4843,20 +4874,20 @@
 	irqreturn_t (*intxhandler)(int, void *))
 {
 	if (h->msix_vector || h->msi_vector) {
-		if (!request_irq(h->intr[PERF_MODE_INT], msixhandler,
+		if (!request_irq(h->intr[h->intr_mode], msixhandler,
 				IRQF_DISABLED, h->devname, h))
 			return 0;
 		dev_err(&h->pdev->dev, "Unable to get msi irq %d"
-			" for %s\n", h->intr[PERF_MODE_INT],
+			" for %s\n", h->intr[h->intr_mode],
 			h->devname);
 		return -1;
 	}
 
-	if (!request_irq(h->intr[PERF_MODE_INT], intxhandler,
+	if (!request_irq(h->intr[h->intr_mode], intxhandler,
 			IRQF_DISABLED, h->devname, h))
 		return 0;
 	dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
-		h->intr[PERF_MODE_INT], h->devname);
+		h->intr[h->intr_mode], h->devname);
 	return -1;
 }
 
@@ -4887,7 +4918,7 @@
 {
 	int ctlr = h->ctlr;
 
-	free_irq(h->intr[PERF_MODE_INT], h);
+	free_irq(h->intr[h->intr_mode], h);
 #ifdef CONFIG_PCI_MSI
 	if (h->msix_vector)
 		pci_disable_msix(h->pdev);
@@ -4953,6 +4984,7 @@
 	h = hba[i];
 	h->pdev = pdev;
 	h->busy_initializing = 1;
+	h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
 	INIT_LIST_HEAD(&h->cmpQ);
 	INIT_LIST_HEAD(&h->reqQ);
 	mutex_init(&h->busy_shutting_down);
@@ -5009,7 +5041,7 @@
 
 	dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
 	       h->devname, pdev->device, pci_name(pdev),
-	       h->intr[PERF_MODE_INT], dac ? "" : " not");
+	       h->intr[h->intr_mode], dac ? "" : " not");
 
 	if (cciss_allocate_cmd_pool(h))
 		goto clean4;
@@ -5056,7 +5088,7 @@
 		spin_lock_irqsave(&h->lock, flags);
 		h->access.set_intr_mask(h, CCISS_INTR_OFF);
 		spin_unlock_irqrestore(&h->lock, flags);
-		free_irq(h->intr[PERF_MODE_INT], h);
+		free_irq(h->intr[h->intr_mode], h);
 		rc = cciss_request_irq(h, cciss_msix_discard_completions,
 					cciss_intx_discard_completions);
 		if (rc) {
@@ -5133,7 +5165,7 @@
 	cciss_free_cmd_pool(h);
 	cciss_free_scatterlists(h);
 	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
-	free_irq(h->intr[PERF_MODE_INT], h);
+	free_irq(h->intr[h->intr_mode], h);
 clean2:
 	unregister_blkdev(h->major, h->devname);
 clean1:
@@ -5172,9 +5204,31 @@
 	if (return_code != IO_OK)
 		dev_warn(&h->pdev->dev, "Error flushing cache\n");
 	h->access.set_intr_mask(h, CCISS_INTR_OFF);
-	free_irq(h->intr[PERF_MODE_INT], h);
+	free_irq(h->intr[h->intr_mode], h);
 }
 
+static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
+{
+	u32 trans_support;
+
+	trans_support = readl(&(h->cfgtable->TransportSupport));
+	if (!(trans_support & SIMPLE_MODE))
+		return -ENOTSUPP;
+
+	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+	cciss_wait_for_mode_change_ack(h);
+	print_cfg_table(h);
+	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+		dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+		return -ENODEV;
+	}
+	h->transMethod = CFGTBL_Trans_Simple;
+	return 0;
+}
+
+
 static void __devexit cciss_remove_one(struct pci_dev *pdev)
 {
 	ctlr_info_t *h;
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index c049548..7fda30e 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -92,6 +92,7 @@
 	unsigned int intr[4];
 	unsigned int msix_vector;
 	unsigned int msi_vector;
+	int	intr_mode;
 	int 	cciss_max_sectors;
 	BYTE	cciss_read;
 	BYTE	cciss_write;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b2fceb5..9125bbe 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -620,6 +620,7 @@
 	}
 	vendor_id = pdev->vendor;
 	device_id = pdev->device;
+	revision  = pdev->revision;
 	irq = pdev->irq;
 
 	for(i=0; i<6; i++)
@@ -632,7 +633,6 @@
 	}
 
 	pci_read_config_word(pdev, PCI_COMMAND, &command);
-	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
 	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
 
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 1706d60..9cf2035 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1506,7 +1506,7 @@
 extern int proc_details;
 
 /* drbd_req */
-extern int drbd_make_request(struct request_queue *q, struct bio *bio);
+extern void drbd_make_request(struct request_queue *q, struct bio *bio);
 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3424d67..4a0f314 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1073,7 +1073,7 @@
 	return 0;
 }
 
-int drbd_make_request(struct request_queue *q, struct bio *bio)
+void drbd_make_request(struct request_queue *q, struct bio *bio)
 {
 	unsigned int s_enr, e_enr;
 	struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
@@ -1081,7 +1081,7 @@
 
 	if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
 		bio_endio(bio, -EPERM);
-		return 0;
+		return;
 	}
 
 	start_time = jiffies;
@@ -1100,7 +1100,8 @@
 
 	if (likely(s_enr == e_enr)) {
 		inc_ap_bio(mdev, 1);
-		return drbd_make_request_common(mdev, bio, start_time);
+		drbd_make_request_common(mdev, bio, start_time);
+		return;
 	}
 
 	/* can this bio be split generically?
@@ -1148,7 +1149,6 @@
 
 		bio_pair_release(bp);
 	}
-	return 0;
 }
 
 /* This is called by bio_add_page().  With this function we reduce
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4720c7a..3d80682 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -76,6 +76,8 @@
 #include <linux/splice.h>
 #include <linux/sysfs.h>
 #include <linux/miscdevice.h>
+#include <linux/falloc.h>
+
 #include <asm/uaccess.h>
 
 static DEFINE_IDR(loop_index_idr);
@@ -203,74 +205,6 @@
 }
 
 /**
- * do_lo_send_aops - helper for writing data to a loop device
- *
- * This is the fast version for backing filesystems which implement the address
- * space operations write_begin and write_end.
- */
-static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
-		loff_t pos, struct page *unused)
-{
-	struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
-	struct address_space *mapping = file->f_mapping;
-	pgoff_t index;
-	unsigned offset, bv_offs;
-	int len, ret;
-
-	mutex_lock(&mapping->host->i_mutex);
-	index = pos >> PAGE_CACHE_SHIFT;
-	offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
-	bv_offs = bvec->bv_offset;
-	len = bvec->bv_len;
-	while (len > 0) {
-		sector_t IV;
-		unsigned size, copied;
-		int transfer_result;
-		struct page *page;
-		void *fsdata;
-
-		IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
-		size = PAGE_CACHE_SIZE - offset;
-		if (size > len)
-			size = len;
-
-		ret = pagecache_write_begin(file, mapping, pos, size, 0,
-							&page, &fsdata);
-		if (ret)
-			goto fail;
-
-		file_update_time(file);
-
-		transfer_result = lo_do_transfer(lo, WRITE, page, offset,
-				bvec->bv_page, bv_offs, size, IV);
-		copied = size;
-		if (unlikely(transfer_result))
-			copied = 0;
-
-		ret = pagecache_write_end(file, mapping, pos, size, copied,
-							page, fsdata);
-		if (ret < 0 || ret != copied)
-			goto fail;
-
-		if (unlikely(transfer_result))
-			goto fail;
-
-		bv_offs += copied;
-		len -= copied;
-		offset = 0;
-		index++;
-		pos += copied;
-	}
-	ret = 0;
-out:
-	mutex_unlock(&mapping->host->i_mutex);
-	return ret;
-fail:
-	ret = -1;
-	goto out;
-}
-
-/**
  * __do_lo_send_write - helper for writing data to a loop device
  *
  * This helper just factors out common code between do_lo_send_direct_write()
@@ -297,10 +231,8 @@
 /**
  * do_lo_send_direct_write - helper for writing data to a loop device
  *
- * This is the fast, non-transforming version for backing filesystems which do
- * not implement the address space operations write_begin and write_end.
- * It uses the write file operation which should be present on all writeable
- * filesystems.
+ * This is the fast, non-transforming version that does not need double
+ * buffering.
  */
 static int do_lo_send_direct_write(struct loop_device *lo,
 		struct bio_vec *bvec, loff_t pos, struct page *page)
@@ -316,15 +248,9 @@
 /**
  * do_lo_send_write - helper for writing data to a loop device
  *
- * This is the slow, transforming version for filesystems which do not
- * implement the address space operations write_begin and write_end.  It
- * uses the write file operation which should be present on all writeable
- * filesystems.
- *
- * Using fops->write is slower than using aops->{prepare,commit}_write in the
- * transforming case because we need to double buffer the data as we cannot do
- * the transformations in place as we do not have direct access to the
- * destination pages of the backing file.
+ * This is the slow, transforming version that needs to double buffer the
+ * data as it cannot do the transformations in place without having direct
+ * access to the destination pages of the backing file.
  */
 static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
 		loff_t pos, struct page *page)
@@ -350,17 +276,16 @@
 	struct page *page = NULL;
 	int i, ret = 0;
 
-	do_lo_send = do_lo_send_aops;
-	if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
+	if (lo->transfer != transfer_none) {
+		page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
+		if (unlikely(!page))
+			goto fail;
+		kmap(page);
+		do_lo_send = do_lo_send_write;
+	} else {
 		do_lo_send = do_lo_send_direct_write;
-		if (lo->transfer != transfer_none) {
-			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
-			if (unlikely(!page))
-				goto fail;
-			kmap(page);
-			do_lo_send = do_lo_send_write;
-		}
 	}
+
 	bio_for_each_segment(bvec, bio, i) {
 		ret = do_lo_send(lo, bvec, pos, page);
 		if (ret < 0)
@@ -484,6 +409,29 @@
 			}
 		}
 
+		/*
+		 * We use punch hole to reclaim the free space used by the
+		 * image a.k.a. discard. However we do support discard if
+		 * encryption is enabled, because it may give an attacker
+		 * useful information.
+		 */
+		if (bio->bi_rw & REQ_DISCARD) {
+			struct file *file = lo->lo_backing_file;
+			int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+
+			if ((!file->f_op->fallocate) ||
+			    lo->lo_encrypt_key_size) {
+				ret = -EOPNOTSUPP;
+				goto out;
+			}
+			ret = file->f_op->fallocate(file, mode, pos,
+						    bio->bi_size);
+			if (unlikely(ret && ret != -EINVAL &&
+				     ret != -EOPNOTSUPP))
+				ret = -EIO;
+			goto out;
+		}
+
 		ret = lo_send(lo, bio, pos);
 
 		if ((bio->bi_rw & REQ_FUA) && !ret) {
@@ -514,7 +462,7 @@
 	return bio_list_pop(&lo->lo_bio_list);
 }
 
-static int loop_make_request(struct request_queue *q, struct bio *old_bio)
+static void loop_make_request(struct request_queue *q, struct bio *old_bio)
 {
 	struct loop_device *lo = q->queuedata;
 	int rw = bio_rw(old_bio);
@@ -532,12 +480,11 @@
 	loop_add_bio(lo, old_bio);
 	wake_up(&lo->lo_event);
 	spin_unlock_irq(&lo->lo_lock);
-	return 0;
+	return;
 
 out:
 	spin_unlock_irq(&lo->lo_lock);
 	bio_io_error(old_bio);
-	return 0;
 }
 
 struct switch_request {
@@ -700,7 +647,7 @@
 		goto out_putf;
 
 	fput(old_file);
-	if (max_part > 0)
+	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
 		ioctl_by_bdev(bdev, BLKRRPART, 0);
 	return 0;
 
@@ -777,16 +724,25 @@
 	return sprintf(buf, "%s\n", autoclear ? "1" : "0");
 }
 
+static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
+{
+	int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
+
+	return sprintf(buf, "%s\n", partscan ? "1" : "0");
+}
+
 LOOP_ATTR_RO(backing_file);
 LOOP_ATTR_RO(offset);
 LOOP_ATTR_RO(sizelimit);
 LOOP_ATTR_RO(autoclear);
+LOOP_ATTR_RO(partscan);
 
 static struct attribute *loop_attrs[] = {
 	&loop_attr_backing_file.attr,
 	&loop_attr_offset.attr,
 	&loop_attr_sizelimit.attr,
 	&loop_attr_autoclear.attr,
+	&loop_attr_partscan.attr,
 	NULL,
 };
 
@@ -807,6 +763,35 @@
 			   &loop_attribute_group);
 }
 
+static void loop_config_discard(struct loop_device *lo)
+{
+	struct file *file = lo->lo_backing_file;
+	struct inode *inode = file->f_mapping->host;
+	struct request_queue *q = lo->lo_queue;
+
+	/*
+	 * We use punch hole to reclaim the free space used by the
+	 * image a.k.a. discard. However we do support discard if
+	 * encryption is enabled, because it may give an attacker
+	 * useful information.
+	 */
+	if ((!file->f_op->fallocate) ||
+	    lo->lo_encrypt_key_size) {
+		q->limits.discard_granularity = 0;
+		q->limits.discard_alignment = 0;
+		q->limits.max_discard_sectors = 0;
+		q->limits.discard_zeroes_data = 0;
+		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+		return;
+	}
+
+	q->limits.discard_granularity = inode->i_sb->s_blocksize;
+	q->limits.discard_alignment = inode->i_sb->s_blocksize;
+	q->limits.max_discard_sectors = UINT_MAX >> 9;
+	q->limits.discard_zeroes_data = 1;
+	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+}
+
 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 		       struct block_device *bdev, unsigned int arg)
 {
@@ -849,36 +834,24 @@
 	mapping = file->f_mapping;
 	inode = mapping->host;
 
-	if (!(file->f_mode & FMODE_WRITE))
-		lo_flags |= LO_FLAGS_READ_ONLY;
-
 	error = -EINVAL;
-	if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-		const struct address_space_operations *aops = mapping->a_ops;
-
-		if (aops->write_begin)
-			lo_flags |= LO_FLAGS_USE_AOPS;
-		if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
-			lo_flags |= LO_FLAGS_READ_ONLY;
-
-		lo_blocksize = S_ISBLK(inode->i_mode) ?
-			inode->i_bdev->bd_block_size : PAGE_SIZE;
-
-		error = 0;
-	} else {
+	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 		goto out_putf;
-	}
 
-	size = get_loop_size(lo, file);
-
-	if ((loff_t)(sector_t)size != size) {
-		error = -EFBIG;
-		goto out_putf;
-	}
-
-	if (!(mode & FMODE_WRITE))
+	if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
+	    !file->f_op->write)
 		lo_flags |= LO_FLAGS_READ_ONLY;
 
+	lo_blocksize = S_ISBLK(inode->i_mode) ?
+		inode->i_bdev->bd_block_size : PAGE_SIZE;
+
+	error = -EFBIG;
+	size = get_loop_size(lo, file);
+	if ((loff_t)(sector_t)size != size)
+		goto out_putf;
+
+	error = 0;
+
 	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
 	lo->lo_blocksize = lo_blocksize;
@@ -919,7 +892,9 @@
 	}
 	lo->lo_state = Lo_bound;
 	wake_up_process(lo->lo_thread);
-	if (max_part > 0)
+	if (part_shift)
+		lo->lo_flags |= LO_FLAGS_PARTSCAN;
+	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
 		ioctl_by_bdev(bdev, BLKRRPART, 0);
 	return 0;
 
@@ -980,10 +955,11 @@
 	return err;
 }
 
-static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
+static int loop_clr_fd(struct loop_device *lo)
 {
 	struct file *filp = lo->lo_backing_file;
 	gfp_t gfp = lo->old_gfp_mask;
+	struct block_device *bdev = lo->lo_device;
 
 	if (lo->lo_state != Lo_bound)
 		return -ENXIO;
@@ -1012,7 +988,6 @@
 	lo->lo_offset = 0;
 	lo->lo_sizelimit = 0;
 	lo->lo_encrypt_key_size = 0;
-	lo->lo_flags = 0;
 	lo->lo_thread = NULL;
 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
@@ -1030,8 +1005,11 @@
 	lo->lo_state = Lo_unbound;
 	/* This is safe: open() is still holding a reference. */
 	module_put(THIS_MODULE);
-	if (max_part > 0 && bdev)
+	if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
 		ioctl_by_bdev(bdev, BLKRRPART, 0);
+	lo->lo_flags = 0;
+	if (!part_shift)
+		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
 	mutex_unlock(&lo->lo_ctl_mutex);
 	/*
 	 * Need not hold lo_ctl_mutex to fput backing file.
@@ -1085,6 +1063,7 @@
 		if (figure_loop_size(lo))
 			return -EFBIG;
 	}
+	loop_config_discard(lo);
 
 	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
 	memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
@@ -1100,6 +1079,13 @@
 	     (info->lo_flags & LO_FLAGS_AUTOCLEAR))
 		lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
 
+	if ((info->lo_flags & LO_FLAGS_PARTSCAN) &&
+	     !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
+		lo->lo_flags |= LO_FLAGS_PARTSCAN;
+		lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
+		ioctl_by_bdev(lo->lo_device, BLKRRPART, 0);
+	}
+
 	lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
 	lo->lo_init[0] = info->lo_init[0];
 	lo->lo_init[1] = info->lo_init[1];
@@ -1293,7 +1279,7 @@
 		break;
 	case LOOP_CLR_FD:
 		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
-		err = loop_clr_fd(lo, bdev);
+		err = loop_clr_fd(lo);
 		if (!err)
 			goto out_unlocked;
 		break;
@@ -1513,7 +1499,7 @@
 		 * In autoclear mode, stop the loop thread
 		 * and remove configuration after last close.
 		 */
-		err = loop_clr_fd(lo, NULL);
+		err = loop_clr_fd(lo);
 		if (!err)
 			goto out_unlocked;
 	} else {
@@ -1635,6 +1621,27 @@
 	if (!disk)
 		goto out_free_queue;
 
+	/*
+	 * Disable partition scanning by default. The in-kernel partition
+	 * scanning can be requested individually per-device during its
+	 * setup. Userspace can always add and remove partitions from all
+	 * devices. The needed partition minors are allocated from the
+	 * extended minor space, the main loop device numbers will continue
+	 * to match the loop minors, regardless of the number of partitions
+	 * used.
+	 *
+	 * If max_part is given, partition scanning is globally enabled for
+	 * all loop devices. The minors for the main loop devices will be
+	 * multiples of max_part.
+	 *
+	 * Note: Global-for-all-devices, set-only-at-init, read-only module
+	 * parameteters like 'max_loop' and 'max_part' make things needlessly
+	 * complicated, are too static, inflexible and may surprise
+	 * userspace tools. Parameters like this in general should be avoided.
+	 */
+	if (!part_shift)
+		disk->flags |= GENHD_FL_NO_PART_SCAN;
+	disk->flags |= GENHD_FL_EXT_DEVT;
 	mutex_init(&lo->lo_ctl_mutex);
 	lo->lo_number		= i;
 	lo->lo_thread		= NULL;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index f533f33..c3f0ee1 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -127,8 +127,7 @@
 	if (lock)
 		mutex_lock(&lo->tx_lock);
 	if (lo->sock) {
-		printk(KERN_WARNING "%s: shutting down socket\n",
-			lo->disk->disk_name);
+		dev_warn(disk_to_dev(lo->disk), "shutting down socket\n");
 		kernel_sock_shutdown(lo->sock, SHUT_RDWR);
 		lo->sock = NULL;
 	}
@@ -158,8 +157,9 @@
 	sigset_t blocked, oldset;
 
 	if (unlikely(!sock)) {
-		printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
-		       lo->disk->disk_name, (send ? "send" : "recv"));
+		dev_err(disk_to_dev(lo->disk),
+			"Attempted %s on closed socket in sock_xmit\n",
+			(send ? "send" : "recv"));
 		return -EINVAL;
 	}
 
@@ -250,8 +250,8 @@
 	result = sock_xmit(lo, 1, &request, sizeof(request),
 			(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
 	if (result <= 0) {
-		printk(KERN_ERR "%s: Send control failed (result %d)\n",
-				lo->disk->disk_name, result);
+		dev_err(disk_to_dev(lo->disk),
+			"Send control failed (result %d)\n", result);
 		goto error_out;
 	}
 
@@ -270,8 +270,9 @@
 					lo->disk->disk_name, req, bvec->bv_len);
 			result = sock_send_bvec(lo, bvec, flags);
 			if (result <= 0) {
-				printk(KERN_ERR "%s: Send data failed (result %d)\n",
-						lo->disk->disk_name, result);
+				dev_err(disk_to_dev(lo->disk),
+					"Send data failed (result %d)\n",
+					result);
 				goto error_out;
 			}
 		}
@@ -328,14 +329,13 @@
 	reply.magic = 0;
 	result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL);
 	if (result <= 0) {
-		printk(KERN_ERR "%s: Receive control failed (result %d)\n",
-				lo->disk->disk_name, result);
+		dev_err(disk_to_dev(lo->disk),
+			"Receive control failed (result %d)\n", result);
 		goto harderror;
 	}
 
 	if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
-		printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
-				lo->disk->disk_name,
+		dev_err(disk_to_dev(lo->disk), "Wrong magic (0x%lx)\n",
 				(unsigned long)ntohl(reply.magic));
 		result = -EPROTO;
 		goto harderror;
@@ -347,15 +347,15 @@
 		if (result != -ENOENT)
 			goto harderror;
 
-		printk(KERN_ERR "%s: Unexpected reply (%p)\n",
-				lo->disk->disk_name, reply.handle);
+		dev_err(disk_to_dev(lo->disk), "Unexpected reply (%p)\n",
+			reply.handle);
 		result = -EBADR;
 		goto harderror;
 	}
 
 	if (ntohl(reply.error)) {
-		printk(KERN_ERR "%s: Other side returned error (%d)\n",
-				lo->disk->disk_name, ntohl(reply.error));
+		dev_err(disk_to_dev(lo->disk), "Other side returned error (%d)\n",
+			ntohl(reply.error));
 		req->errors++;
 		return req;
 	}
@@ -369,8 +369,8 @@
 		rq_for_each_segment(bvec, req, iter) {
 			result = sock_recv_bvec(lo, bvec);
 			if (result <= 0) {
-				printk(KERN_ERR "%s: Receive data failed (result %d)\n",
-						lo->disk->disk_name, result);
+				dev_err(disk_to_dev(lo->disk), "Receive data failed (result %d)\n",
+					result);
 				req->errors++;
 				return req;
 			}
@@ -405,10 +405,10 @@
 
 	BUG_ON(lo->magic != LO_MAGIC);
 
-	lo->pid = current->pid;
-	ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
+	lo->pid = task_pid_nr(current);
+	ret = device_create_file(disk_to_dev(lo->disk), &pid_attr);
 	if (ret) {
-		printk(KERN_ERR "nbd: sysfs_create_file failed!");
+		dev_err(disk_to_dev(lo->disk), "device_create_file failed!\n");
 		lo->pid = 0;
 		return ret;
 	}
@@ -416,7 +416,7 @@
 	while ((req = nbd_read_stat(lo)) != NULL)
 		nbd_end_request(req);
 
-	sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
+	device_remove_file(disk_to_dev(lo->disk), &pid_attr);
 	lo->pid = 0;
 	return 0;
 }
@@ -457,8 +457,8 @@
 	if (rq_data_dir(req) == WRITE) {
 		nbd_cmd(req) = NBD_CMD_WRITE;
 		if (lo->flags & NBD_READ_ONLY) {
-			printk(KERN_ERR "%s: Write on read-only\n",
-					lo->disk->disk_name);
+			dev_err(disk_to_dev(lo->disk),
+				"Write on read-only\n");
 			goto error_out;
 		}
 	}
@@ -468,16 +468,15 @@
 	mutex_lock(&lo->tx_lock);
 	if (unlikely(!lo->sock)) {
 		mutex_unlock(&lo->tx_lock);
-		printk(KERN_ERR "%s: Attempted send on closed socket\n",
-		       lo->disk->disk_name);
+		dev_err(disk_to_dev(lo->disk),
+			"Attempted send on closed socket\n");
 		goto error_out;
 	}
 
 	lo->active_req = req;
 
 	if (nbd_send_req(lo, req) != 0) {
-		printk(KERN_ERR "%s: Request send failed\n",
-				lo->disk->disk_name);
+		dev_err(disk_to_dev(lo->disk), "Request send failed\n");
 		req->errors++;
 		nbd_end_request(req);
 	} else {
@@ -549,8 +548,8 @@
 		BUG_ON(lo->magic != LO_MAGIC);
 
 		if (unlikely(!lo->sock)) {
-			printk(KERN_ERR "%s: Attempted send on closed socket\n",
-				lo->disk->disk_name);
+			dev_err(disk_to_dev(lo->disk),
+				"Attempted send on closed socket\n");
 			req->errors++;
 			nbd_end_request(req);
 			spin_lock_irq(q->queue_lock);
@@ -576,7 +575,7 @@
 	case NBD_DISCONNECT: {
 		struct request sreq;
 
-	        printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
+		dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n");
 
 		blk_rq_init(NULL, &sreq);
 		sreq.cmd_type = REQ_TYPE_SPECIAL;
@@ -674,7 +673,7 @@
 		file = lo->file;
 		lo->file = NULL;
 		nbd_clear_que(lo);
-		printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
+		dev_warn(disk_to_dev(lo->disk), "queue cleared\n");
 		if (file)
 			fput(file);
 		lo->bytesize = 0;
@@ -694,8 +693,8 @@
 		return 0;
 
 	case NBD_PRINT_DEBUG:
-		printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
-			bdev->bd_disk->disk_name,
+		dev_info(disk_to_dev(lo->disk),
+			"next = %p, prev = %p, head = %p\n",
 			lo->queue_head.next, lo->queue_head.prev,
 			&lo->queue_head);
 		return 0;
@@ -745,7 +744,7 @@
 	BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
 
 	if (max_part < 0) {
-		printk(KERN_CRIT "nbd: max_part must be >= 0\n");
+		printk(KERN_ERR "nbd: max_part must be >= 0\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index e133f09..a63b0a2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2444,7 +2444,7 @@
 	pkt_bio_finished(pd);
 }
 
-static int pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct pktcdvd_device *pd;
 	char b[BDEVNAME_SIZE];
@@ -2473,7 +2473,7 @@
 		cloned_bio->bi_end_io = pkt_end_io_read_cloned;
 		pd->stats.secs_r += bio->bi_size >> 9;
 		pkt_queue_bio(pd, cloned_bio);
-		return 0;
+		return;
 	}
 
 	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2509,7 +2509,7 @@
 			pkt_make_request(q, &bp->bio1);
 			pkt_make_request(q, &bp->bio2);
 			bio_pair_release(bp);
-			return 0;
+			return;
 		}
 	}
 
@@ -2533,7 +2533,7 @@
 				}
 				spin_unlock(&pkt->lock);
 				spin_unlock(&pd->cdrw.active_list_lock);
-				return 0;
+				return;
 			} else {
 				blocked_bio = 1;
 			}
@@ -2584,10 +2584,9 @@
 		 */
 		wake_up(&pd->wqueue);
 	}
-	return 0;
+	return;
 end_io:
 	bio_io_error(bio);
-	return 0;
 }
 
 
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b3bdb8a..7fad7af 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -596,7 +596,7 @@
 	return next;
 }
 
-static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct ps3_system_bus_device *dev = q->queuedata;
 	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -610,13 +610,11 @@
 	spin_unlock_irq(&priv->lock);
 
 	if (busy)
-		return 0;
+		return;
 
 	do {
 		bio = ps3vram_do_bio(dev, bio);
 	} while (bio);
-
-	return 0;
 }
 
 static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 031ca72..aa27120 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,7 +513,7 @@
 	}
 }
 
-static int mm_make_request(struct request_queue *q, struct bio *bio)
+static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct cardinfo *card = q->queuedata;
 	pr_debug("mm_make_request %llu %u\n",
@@ -525,7 +525,7 @@
 	card->biotail = &bio->bi_next;
 	spin_unlock_irq(&card->lock);
 
-	return 0;
+	return;
 }
 
 static irqreturn_t mm_interrupt(int irq, void *__card)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 1540792..15ec4db 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,6 +39,9 @@
 #include <linux/list.h>
 #include <linux/delay.h>
 #include <linux/freezer.h>
+#include <linux/loop.h>
+#include <linux/falloc.h>
+#include <linux/fs.h>
 
 #include <xen/events.h>
 #include <xen/page.h>
@@ -258,13 +261,16 @@
 
 static void print_stats(struct xen_blkif *blkif)
 {
-	pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d\n",
+	pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d"
+		 "  |  ds %4d\n",
 		 current->comm, blkif->st_oo_req,
-		 blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req);
+		 blkif->st_rd_req, blkif->st_wr_req,
+		 blkif->st_f_req, blkif->st_ds_req);
 	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
 	blkif->st_rd_req = 0;
 	blkif->st_wr_req = 0;
 	blkif->st_oo_req = 0;
+	blkif->st_ds_req = 0;
 }
 
 int xen_blkif_schedule(void *arg)
@@ -410,6 +416,59 @@
 	return ret;
 }
 
+static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
+{
+	int err = 0;
+	int status = BLKIF_RSP_OKAY;
+	struct block_device *bdev = blkif->vbd.bdev;
+
+	if (blkif->blk_backend_type == BLKIF_BACKEND_PHY)
+		/* just forward the discard request */
+		err = blkdev_issue_discard(bdev,
+				req->u.discard.sector_number,
+				req->u.discard.nr_sectors,
+				GFP_KERNEL, 0);
+	else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
+		/* punch a hole in the backing file */
+		struct loop_device *lo = bdev->bd_disk->private_data;
+		struct file *file = lo->lo_backing_file;
+
+		if (file->f_op->fallocate)
+			err = file->f_op->fallocate(file,
+				FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+				req->u.discard.sector_number << 9,
+				req->u.discard.nr_sectors << 9);
+		else
+			err = -EOPNOTSUPP;
+	} else
+		err = -EOPNOTSUPP;
+
+	if (err == -EOPNOTSUPP) {
+		pr_debug(DRV_PFX "discard op failed, not supported\n");
+		status = BLKIF_RSP_EOPNOTSUPP;
+	} else if (err)
+		status = BLKIF_RSP_ERROR;
+
+	make_response(blkif, req->id, req->operation, status);
+}
+
+static void xen_blk_drain_io(struct xen_blkif *blkif)
+{
+	atomic_set(&blkif->drain, 1);
+	do {
+		/* The initial value is one, and one refcnt taken at the
+		 * start of the xen_blkif_schedule thread. */
+		if (atomic_read(&blkif->refcnt) <= 2)
+			break;
+		wait_for_completion_interruptible_timeout(
+				&blkif->drain_complete, HZ);
+
+		if (!atomic_read(&blkif->drain))
+			break;
+	} while (!kthread_should_stop());
+	atomic_set(&blkif->drain, 0);
+}
+
 /*
  * Completion callback on the bio's. Called as bh->b_end_io()
  */
@@ -422,6 +481,11 @@
 		pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
+	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
+		    (error == -EOPNOTSUPP)) {
+		pr_debug(DRV_PFX "write barrier op failed, not supported\n");
+		xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
+		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
 	} else if (error) {
 		pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
 			 " error=%d\n", error);
@@ -438,6 +502,10 @@
 		make_response(pending_req->blkif, pending_req->id,
 			      pending_req->operation, pending_req->status);
 		xen_blkif_put(pending_req->blkif);
+		if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
+			if (atomic_read(&pending_req->blkif->drain))
+				complete(&pending_req->blkif->drain_complete);
+		}
 		free_req(pending_req);
 	}
 }
@@ -532,7 +600,6 @@
 
 	return more_to_do;
 }
-
 /*
  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
  * and call the 'submit_bio' to pass it to the underlying storage.
@@ -549,6 +616,7 @@
 	int i, nbio = 0;
 	int operation;
 	struct blk_plug plug;
+	bool drain = false;
 
 	switch (req->operation) {
 	case BLKIF_OP_READ:
@@ -559,11 +627,16 @@
 		blkif->st_wr_req++;
 		operation = WRITE_ODIRECT;
 		break;
+	case BLKIF_OP_WRITE_BARRIER:
+		drain = true;
 	case BLKIF_OP_FLUSH_DISKCACHE:
 		blkif->st_f_req++;
 		operation = WRITE_FLUSH;
 		break;
-	case BLKIF_OP_WRITE_BARRIER:
+	case BLKIF_OP_DISCARD:
+		blkif->st_ds_req++;
+		operation = REQ_DISCARD;
+		break;
 	default:
 		operation = 0; /* make gcc happy */
 		goto fail_response;
@@ -572,7 +645,8 @@
 
 	/* Check that the number of segments is sane. */
 	nseg = req->nr_segments;
-	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
+	if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
+				operation != REQ_DISCARD) ||
 	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
 		pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
 			 nseg);
@@ -621,16 +695,25 @@
 		}
 	}
 
+	/* Wait on all outstanding I/O's and once that has been completed
+	 * issue the WRITE_FLUSH.
+	 */
+	if (drain)
+		xen_blk_drain_io(pending_req->blkif);
+
 	/*
 	 * If we have failed at this point, we need to undo the M2P override,
 	 * set gnttab_set_unmap_op on all of the grant references and perform
 	 * the hypercall to unmap the grants - that is all done in
 	 * xen_blkbk_unmap.
 	 */
-	if (xen_blkbk_map(req, pending_req, seg))
+	if (operation != REQ_DISCARD && xen_blkbk_map(req, pending_req, seg))
 		goto fail_flush;
 
-	/* This corresponding xen_blkif_put is done in __end_block_io_op */
+	/*
+	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
+	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
+	 */
 	xen_blkif_get(blkif);
 
 	for (i = 0; i < nseg; i++) {
@@ -654,18 +737,25 @@
 		preq.sector_number += seg[i].nsec;
 	}
 
-	/* This will be hit if the operation was a flush. */
+	/* This will be hit if the operation was a flush or discard. */
 	if (!bio) {
-		BUG_ON(operation != WRITE_FLUSH);
+		BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD);
 
-		bio = bio_alloc(GFP_KERNEL, 0);
-		if (unlikely(bio == NULL))
-			goto fail_put_bio;
+		if (operation == WRITE_FLUSH) {
+			bio = bio_alloc(GFP_KERNEL, 0);
+			if (unlikely(bio == NULL))
+				goto fail_put_bio;
 
-		biolist[nbio++] = bio;
-		bio->bi_bdev    = preq.bdev;
-		bio->bi_private = pending_req;
-		bio->bi_end_io  = end_block_io_op;
+			biolist[nbio++] = bio;
+			bio->bi_bdev    = preq.bdev;
+			bio->bi_private = pending_req;
+			bio->bi_end_io  = end_block_io_op;
+		} else if (operation == REQ_DISCARD) {
+			xen_blk_discard(blkif, req);
+			xen_blkif_put(blkif);
+			free_req(pending_req);
+			return 0;
+		}
 	}
 
 	/*
@@ -685,7 +775,7 @@
 
 	if (operation == READ)
 		blkif->st_rd_sect += preq.nr_sects;
-	else if (operation == WRITE || operation == WRITE_FLUSH)
+	else if (operation & WRITE)
 		blkif->st_wr_sect += preq.nr_sects;
 
 	return 0;
@@ -765,9 +855,9 @@
 
 	mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
 
-	blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
+	blkbk->pending_reqs          = kzalloc(sizeof(blkbk->pending_reqs[0]) *
 					xen_blkif_reqs, GFP_KERNEL);
-	blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
+	blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
 					mmap_pages, GFP_KERNEL);
 	blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
 					mmap_pages, GFP_KERNEL);
@@ -790,8 +880,6 @@
 	if (rc)
 		goto failed_init;
 
-	memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
-
 	INIT_LIST_HEAD(&blkbk->pending_free);
 	spin_lock_init(&blkbk->pending_free_lock);
 	init_waitqueue_head(&blkbk->pending_free_wq);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index c4bd340..de09f52 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -62,13 +62,26 @@
 
 /* i386 protocol version */
 #pragma pack(push, 4)
+
+struct blkif_x86_32_request_rw {
+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct blkif_x86_32_request_discard {
+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+	uint64_t nr_sectors;
+};
+
 struct blkif_x86_32_request {
 	uint8_t        operation;    /* BLKIF_OP_???                         */
 	uint8_t        nr_segments;  /* number of segments                   */
 	blkif_vdev_t   handle;       /* only for read/write requests         */
 	uint64_t       id;           /* private guest value, echoed in resp  */
-	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+	union {
+		struct blkif_x86_32_request_rw rw;
+		struct blkif_x86_32_request_discard discard;
+	} u;
 };
 struct blkif_x86_32_response {
 	uint64_t        id;              /* copied from request */
@@ -78,13 +91,26 @@
 #pragma pack(pop)
 
 /* x86_64 protocol version */
+
+struct blkif_x86_64_request_rw {
+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct blkif_x86_64_request_discard {
+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+	uint64_t nr_sectors;
+};
+
 struct blkif_x86_64_request {
 	uint8_t        operation;    /* BLKIF_OP_???                         */
 	uint8_t        nr_segments;  /* number of segments                   */
 	blkif_vdev_t   handle;       /* only for read/write requests         */
 	uint64_t       __attribute__((__aligned__(8))) id;
-	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+	union {
+		struct blkif_x86_64_request_rw rw;
+		struct blkif_x86_64_request_discard discard;
+	} u;
 };
 struct blkif_x86_64_response {
 	uint64_t       __attribute__((__aligned__(8))) id;
@@ -112,6 +138,11 @@
 	BLKIF_PROTOCOL_X86_64 = 3,
 };
 
+enum blkif_backend_type {
+	BLKIF_BACKEND_PHY  = 1,
+	BLKIF_BACKEND_FILE = 2,
+};
+
 struct xen_vbd {
 	/* What the domain refers to this vbd as. */
 	blkif_vdev_t		handle;
@@ -137,6 +168,7 @@
 	unsigned int		irq;
 	/* Comms information. */
 	enum blkif_protocol	blk_protocol;
+	enum blkif_backend_type blk_backend_type;
 	union blkif_back_rings	blk_rings;
 	struct vm_struct	*blk_ring_area;
 	/* The VBD attached to this interface. */
@@ -148,6 +180,9 @@
 	atomic_t		refcnt;
 
 	wait_queue_head_t	wq;
+	/* for barrier (drain) requests */
+	struct completion	drain_complete;
+	atomic_t		drain;
 	/* One thread per one blkif. */
 	struct task_struct	*xenblkd;
 	unsigned int		waiting_reqs;
@@ -158,6 +193,7 @@
 	int			st_wr_req;
 	int			st_oo_req;
 	int			st_f_req;
+	int			st_ds_req;
 	int			st_rd_sect;
 	int			st_wr_sect;
 
@@ -181,7 +217,7 @@
 
 struct phys_req {
 	unsigned short		dev;
-	unsigned short		nr_sects;
+	blkif_sector_t		nr_sects;
 	struct block_device	*bdev;
 	blkif_sector_t		sector_number;
 };
@@ -195,6 +231,8 @@
 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
 			      struct backend_info *be, int state);
 
+int xen_blkbk_barrier(struct xenbus_transaction xbt,
+		      struct backend_info *be, int state);
 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
 
 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
@@ -205,12 +243,25 @@
 	dst->nr_segments = src->nr_segments;
 	dst->handle = src->handle;
 	dst->id = src->id;
-	dst->u.rw.sector_number = src->sector_number;
-	barrier();
-	if (n > dst->nr_segments)
-		n = dst->nr_segments;
-	for (i = 0; i < n; i++)
-		dst->u.rw.seg[i] = src->seg[i];
+	switch (src->operation) {
+	case BLKIF_OP_READ:
+	case BLKIF_OP_WRITE:
+	case BLKIF_OP_WRITE_BARRIER:
+	case BLKIF_OP_FLUSH_DISKCACHE:
+		dst->u.rw.sector_number = src->u.rw.sector_number;
+		barrier();
+		if (n > dst->nr_segments)
+			n = dst->nr_segments;
+		for (i = 0; i < n; i++)
+			dst->u.rw.seg[i] = src->u.rw.seg[i];
+		break;
+	case BLKIF_OP_DISCARD:
+		dst->u.discard.sector_number = src->u.discard.sector_number;
+		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+		break;
+	default:
+		break;
+	}
 }
 
 static inline void blkif_get_x86_64_req(struct blkif_request *dst,
@@ -221,12 +272,25 @@
 	dst->nr_segments = src->nr_segments;
 	dst->handle = src->handle;
 	dst->id = src->id;
-	dst->u.rw.sector_number = src->sector_number;
-	barrier();
-	if (n > dst->nr_segments)
-		n = dst->nr_segments;
-	for (i = 0; i < n; i++)
-		dst->u.rw.seg[i] = src->seg[i];
+	switch (src->operation) {
+	case BLKIF_OP_READ:
+	case BLKIF_OP_WRITE:
+	case BLKIF_OP_WRITE_BARRIER:
+	case BLKIF_OP_FLUSH_DISKCACHE:
+		dst->u.rw.sector_number = src->u.rw.sector_number;
+		barrier();
+		if (n > dst->nr_segments)
+			n = dst->nr_segments;
+		for (i = 0; i < n; i++)
+			dst->u.rw.seg[i] = src->u.rw.seg[i];
+		break;
+	case BLKIF_OP_DISCARD:
+		dst->u.discard.sector_number = src->u.discard.sector_number;
+		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+		break;
+	default:
+		break;
+	}
 }
 
 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 5fd2010..2c008af 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -114,6 +114,8 @@
 	spin_lock_init(&blkif->blk_ring_lock);
 	atomic_set(&blkif->refcnt, 1);
 	init_waitqueue_head(&blkif->wq);
+	init_completion(&blkif->drain_complete);
+	atomic_set(&blkif->drain, 0);
 	blkif->st_print = jiffies;
 	init_waitqueue_head(&blkif->waiting_to_free);
 
@@ -272,6 +274,7 @@
 VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
 VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
 VBD_SHOW(f_req,  "%d\n", be->blkif->st_f_req);
+VBD_SHOW(ds_req,  "%d\n", be->blkif->st_ds_req);
 VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
 VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
 
@@ -280,6 +283,7 @@
 	&dev_attr_rd_req.attr,
 	&dev_attr_wr_req.attr,
 	&dev_attr_f_req.attr,
+	&dev_attr_ds_req.attr,
 	&dev_attr_rd_sect.attr,
 	&dev_attr_wr_sect.attr,
 	NULL
@@ -419,6 +423,73 @@
 	return err;
 }
 
+int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
+{
+	struct xenbus_device *dev = be->dev;
+	struct xen_blkif *blkif = be->blkif;
+	char *type;
+	int err;
+	int state = 0;
+
+	type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
+	if (!IS_ERR(type)) {
+		if (strncmp(type, "file", 4) == 0) {
+			state = 1;
+			blkif->blk_backend_type = BLKIF_BACKEND_FILE;
+		}
+		if (strncmp(type, "phy", 3) == 0) {
+			struct block_device *bdev = be->blkif->vbd.bdev;
+			struct request_queue *q = bdev_get_queue(bdev);
+			if (blk_queue_discard(q)) {
+				err = xenbus_printf(xbt, dev->nodename,
+					"discard-granularity", "%u",
+					q->limits.discard_granularity);
+				if (err) {
+					xenbus_dev_fatal(dev, err,
+						"writing discard-granularity");
+					goto kfree;
+				}
+				err = xenbus_printf(xbt, dev->nodename,
+					"discard-alignment", "%u",
+					q->limits.discard_alignment);
+				if (err) {
+					xenbus_dev_fatal(dev, err,
+						"writing discard-alignment");
+					goto kfree;
+				}
+				state = 1;
+				blkif->blk_backend_type = BLKIF_BACKEND_PHY;
+			}
+		}
+	} else {
+		err = PTR_ERR(type);
+		xenbus_dev_fatal(dev, err, "reading type");
+		goto out;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "feature-discard",
+			    "%d", state);
+	if (err)
+		xenbus_dev_fatal(dev, err, "writing feature-discard");
+kfree:
+	kfree(type);
+out:
+	return err;
+}
+int xen_blkbk_barrier(struct xenbus_transaction xbt,
+		      struct backend_info *be, int state)
+{
+	struct xenbus_device *dev = be->dev;
+	int err;
+
+	err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
+			    "%d", state);
+	if (err)
+		xenbus_dev_fatal(dev, err, "writing feature-barrier");
+
+	return err;
+}
+
 /*
  * Entry point to this code when a new device is created.  Allocate the basic
  * structures, and watch the store waiting for the hotplug scripts to tell us
@@ -650,6 +721,11 @@
 	if (err)
 		goto abort;
 
+	err = xen_blkbk_discard(xbt, be);
+
+	/* If we can't advertise it is OK. */
+	err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+
 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
 			    (unsigned long long)vbd_sz(&be->blkif->vbd));
 	if (err) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9ea8c25..7b2ec59 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,6 +98,9 @@
 	unsigned long shadow_free;
 	unsigned int feature_flush;
 	unsigned int flush_op;
+	unsigned int feature_discard;
+	unsigned int discard_granularity;
+	unsigned int discard_alignment;
 	int is_ready;
 };
 
@@ -302,29 +305,36 @@
 		ring_req->operation = info->flush_op;
 	}
 
-	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
-	BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+	if (unlikely(req->cmd_flags & REQ_DISCARD)) {
+		/* id, sector_number and handle are set above. */
+		ring_req->operation = BLKIF_OP_DISCARD;
+		ring_req->nr_segments = 0;
+		ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+	} else {
+		ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
+		BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
 
-	for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
-		buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
-		fsect = sg->offset >> 9;
-		lsect = fsect + (sg->length >> 9) - 1;
-		/* install a grant reference. */
-		ref = gnttab_claim_grant_reference(&gref_head);
-		BUG_ON(ref == -ENOSPC);
+		for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
+			buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
+			fsect = sg->offset >> 9;
+			lsect = fsect + (sg->length >> 9) - 1;
+			/* install a grant reference. */
+			ref = gnttab_claim_grant_reference(&gref_head);
+			BUG_ON(ref == -ENOSPC);
 
-		gnttab_grant_foreign_access_ref(
-				ref,
-				info->xbdev->otherend_id,
-				buffer_mfn,
-				rq_data_dir(req) );
+			gnttab_grant_foreign_access_ref(
+					ref,
+					info->xbdev->otherend_id,
+					buffer_mfn,
+					rq_data_dir(req));
 
-		info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
-		ring_req->u.rw.seg[i] =
-				(struct blkif_request_segment) {
-					.gref       = ref,
-					.first_sect = fsect,
-					.last_sect  = lsect };
+			info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
+			ring_req->u.rw.seg[i] =
+					(struct blkif_request_segment) {
+						.gref       = ref,
+						.first_sect = fsect,
+						.last_sect  = lsect };
+		}
 	}
 
 	info->ring.req_prod_pvt++;
@@ -370,7 +380,9 @@
 
 		blk_start_request(req);
 
-		if (req->cmd_type != REQ_TYPE_FS) {
+		if ((req->cmd_type != REQ_TYPE_FS) ||
+		    ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
+		    !info->flush_op)) {
 			__blk_end_request_all(req, -EIO);
 			continue;
 		}
@@ -399,6 +411,7 @@
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
 {
 	struct request_queue *rq;
+	struct blkfront_info *info = gd->private_data;
 
 	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
 	if (rq == NULL)
@@ -406,6 +419,13 @@
 
 	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
 
+	if (info->feature_discard) {
+		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+		blk_queue_max_discard_sectors(rq, get_capacity(gd));
+		rq->limits.discard_granularity = info->discard_granularity;
+		rq->limits.discard_alignment = info->discard_alignment;
+	}
+
 	/* Hard sector size and max sectors impersonate the equiv. hardware. */
 	blk_queue_logical_block_size(rq, sector_size);
 	blk_queue_max_hw_sectors(rq, 512);
@@ -722,6 +742,17 @@
 
 		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
 		switch (bret->operation) {
+		case BLKIF_OP_DISCARD:
+			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+				struct request_queue *rq = info->rq;
+				printk(KERN_WARNING "blkfront: %s: discard op failed\n",
+					   info->gd->disk_name);
+				error = -EOPNOTSUPP;
+				info->feature_discard = 0;
+				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+			}
+			__blk_end_request_all(req, error);
+			break;
 		case BLKIF_OP_FLUSH_DISKCACHE:
 		case BLKIF_OP_WRITE_BARRIER:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
@@ -1098,6 +1129,33 @@
 	bdput(bdev);
 }
 
+static void blkfront_setup_discard(struct blkfront_info *info)
+{
+	int err;
+	char *type;
+	unsigned int discard_granularity;
+	unsigned int discard_alignment;
+
+	type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
+	if (IS_ERR(type))
+		return;
+
+	if (strncmp(type, "phy", 3) == 0) {
+		err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+			"discard-granularity", "%u", &discard_granularity,
+			"discard-alignment", "%u", &discard_alignment,
+			NULL);
+		if (!err) {
+			info->feature_discard = 1;
+			info->discard_granularity = discard_granularity;
+			info->discard_alignment = discard_alignment;
+		}
+	} else if (strncmp(type, "file", 4) == 0)
+		info->feature_discard = 1;
+
+	kfree(type);
+}
+
 /*
  * Invoked when the backend is finally 'ready' (and has told produced
  * the details about the physical device - #sectors, size, etc).
@@ -1108,7 +1166,7 @@
 	unsigned long sector_size;
 	unsigned int binfo;
 	int err;
-	int barrier, flush;
+	int barrier, flush, discard;
 
 	switch (info->connected) {
 	case BLKIF_STATE_CONNECTED:
@@ -1178,7 +1236,14 @@
 		info->feature_flush = REQ_FLUSH;
 		info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
 	}
-		
+
+	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+			    "feature-discard", "%d", &discard,
+			    NULL);
+
+	if (!err && discard)
+		blkfront_setup_discard(info);
+
 	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
 	if (err) {
 		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
@@ -1385,6 +1450,8 @@
 
 static int __init xlblk_init(void)
 {
+	int ret;
+
 	if (!xen_domain())
 		return -ENODEV;
 
@@ -1394,7 +1461,13 @@
 		return -ENODEV;
 	}
 
-	return xenbus_register_frontend(&blkfront);
+	ret = xenbus_register_frontend(&blkfront);
+	if (ret) {
+		unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+		return ret;
+	}
+
+	return 0;
 }
 module_init(xlblk_init);
 
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e3b3d3..ab8f469 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -193,7 +193,8 @@
 config PL330_DMA
 	tristate "DMA API Driver for PL330"
 	select DMA_ENGINE
-	depends on PL330
+	depends on ARM_AMBA
+	select PL330
 	help
 	  Select if your platform has one or more PL330 DMACs.
 	  You need to provide platform specific settings via
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index be21e3f..b7cbd1a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,32 +66,29 @@
  *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
  *    will then move to the next LLI entry.
  *
- * Only the former works sanely with scatter lists, so we only implement
- * the DMAC flow control method.  However, peripherals which use the LBREQ
- * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
- * these hardware restrictions prevents them from using scatter DMA.
- *
  * Global TODO:
  * - Break out common code from arch/arm/mach-s3c64xx and share
  */
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/dmaengine.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl08x.h>
 #include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
-
+#include <linux/slab.h>
 #include <asm/hardware/pl080.h>
 
 #define DRIVER_NAME	"pl08xdmac"
 
+static struct amba_driver pl08x_amba_driver;
+
 /**
  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  * @channels: the number of channels available in this variant
@@ -126,7 +123,8 @@
  * @phy_chans: array of data for the physical channels
  * @pool: a pool for the LLI descriptors
  * @pool_ctr: counter of LLIs in the pool
- * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
+ * fetches
  * @mem_buses: set to indicate memory transfers on AHB2.
  * @lock: a spinlock for this struct
  */
@@ -149,14 +147,6 @@
  * PL08X specific defines
  */
 
-/*
- * Memory boundaries: the manual for PL08x says that the controller
- * cannot read past a 1KiB boundary, so these defines are used to
- * create transfer LLIs that do not cross such boundaries.
- */
-#define PL08X_BOUNDARY_SHIFT		(10)	/* 1KB 0x400 */
-#define PL08X_BOUNDARY_SIZE		(1 << PL08X_BOUNDARY_SHIFT)
-
 /* Size (bytes) of each LLI buffer allocated for one transfer */
 # define PL08X_LLI_TSFR_SIZE	0x2000
 
@@ -272,7 +262,6 @@
 	writel(val, ch->base + PL080_CH_CONFIG);
 }
 
-
 /*
  * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
  * clears any pending interrupt status.  This should not be used for
@@ -363,7 +352,9 @@
 	if (!list_empty(&plchan->pend_list)) {
 		struct pl08x_txd *txdi;
 		list_for_each_entry(txdi, &plchan->pend_list, node) {
-			bytes += txdi->len;
+			struct pl08x_sg *dsg;
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				bytes += dsg->len;
 		}
 	}
 
@@ -407,6 +398,7 @@
 		return NULL;
 	}
 
+	pm_runtime_get_sync(&pl08x->adev->dev);
 	return ch;
 }
 
@@ -420,6 +412,8 @@
 	/* Stop the channel and clear its interrupts */
 	pl08x_terminate_phy_chan(pl08x, ch);
 
+	pm_runtime_put(&pl08x->adev->dev);
+
 	/* Mark it as free */
 	ch->serving = NULL;
 	spin_unlock_irqrestore(&ch->lock, flags);
@@ -499,36 +493,30 @@
 };
 
 /*
- * Autoselect a master bus to use for the transfer this prefers the
- * destination bus if both available if fixed address on one bus the
- * other will be chosen
+ * Autoselect a master bus to use for the transfer. Slave will be the chosen as
+ * victim in case src & dest are not similarly aligned. i.e. If after aligning
+ * masters address with width requirements of transfer (by sending few byte by
+ * byte data), slave is still not aligned, then its width will be reduced to
+ * BYTE.
+ * - prefers the destination bus if both available
+ * - prefers bus with fixed address (i.e. peripheral)
  */
 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
 	struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
 {
 	if (!(cctl & PL080_CONTROL_DST_INCR)) {
-		*mbus = &bd->srcbus;
-		*sbus = &bd->dstbus;
-	} else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
 		*mbus = &bd->dstbus;
 		*sbus = &bd->srcbus;
+	} else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
+		*mbus = &bd->srcbus;
+		*sbus = &bd->dstbus;
 	} else {
-		if (bd->dstbus.buswidth == 4) {
+		if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
 			*mbus = &bd->dstbus;
 			*sbus = &bd->srcbus;
-		} else if (bd->srcbus.buswidth == 4) {
-			*mbus = &bd->srcbus;
-			*sbus = &bd->dstbus;
-		} else if (bd->dstbus.buswidth == 2) {
-			*mbus = &bd->dstbus;
-			*sbus = &bd->srcbus;
-		} else if (bd->srcbus.buswidth == 2) {
-			*mbus = &bd->srcbus;
-			*sbus = &bd->dstbus;
 		} else {
-			/* bd->srcbus.buswidth == 1 */
-			*mbus = &bd->dstbus;
-			*sbus = &bd->srcbus;
+			*mbus = &bd->srcbus;
+			*sbus = &bd->dstbus;
 		}
 	}
 }
@@ -547,7 +535,8 @@
 	llis_va[num_llis].cctl = cctl;
 	llis_va[num_llis].src = bd->srcbus.addr;
 	llis_va[num_llis].dst = bd->dstbus.addr;
-	llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
+	llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
+		sizeof(struct pl08x_lli);
 	llis_va[num_llis].lli |= bd->lli_bus;
 
 	if (cctl & PL080_CONTROL_SRC_INCR)
@@ -560,16 +549,12 @@
 	bd->remainder -= len;
 }
 
-/*
- * Return number of bytes to fill to boundary, or len.
- * This calculation works for any value of addr.
- */
-static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
+static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
+		u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
 {
-	size_t boundary_len = PL08X_BOUNDARY_SIZE -
-			(addr & (PL08X_BOUNDARY_SIZE - 1));
-
-	return min(boundary_len, len);
+	*cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
+	pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+	(*total_bytes) += len;
 }
 
 /*
@@ -583,13 +568,12 @@
 	struct pl08x_bus_data *mbus, *sbus;
 	struct pl08x_lli_build_data bd;
 	int num_llis = 0;
-	u32 cctl;
-	size_t max_bytes_per_lli;
-	size_t total_bytes = 0;
+	u32 cctl, early_bytes = 0;
+	size_t max_bytes_per_lli, total_bytes;
 	struct pl08x_lli *llis_va;
+	struct pl08x_sg *dsg;
 
-	txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
-				      &txd->llis_bus);
+	txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
 	if (!txd->llis_va) {
 		dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
 		return 0;
@@ -597,13 +581,9 @@
 
 	pl08x->pool_ctr++;
 
-	/* Get the default CCTL */
-	cctl = txd->cctl;
-
 	bd.txd = txd;
-	bd.srcbus.addr = txd->src_addr;
-	bd.dstbus.addr = txd->dst_addr;
 	bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
+	cctl = txd->cctl;
 
 	/* Find maximum width of the source bus */
 	bd.srcbus.maxwidth =
@@ -615,215 +595,179 @@
 		pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
 				       PL080_CONTROL_DWIDTH_SHIFT);
 
-	/* Set up the bus widths to the maximum */
-	bd.srcbus.buswidth = bd.srcbus.maxwidth;
-	bd.dstbus.buswidth = bd.dstbus.maxwidth;
+	list_for_each_entry(dsg, &txd->dsg_list, node) {
+		total_bytes = 0;
+		cctl = txd->cctl;
 
-	/*
-	 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
-	 */
-	max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
-		PL080_CONTROL_TRANSFER_SIZE_MASK;
+		bd.srcbus.addr = dsg->src_addr;
+		bd.dstbus.addr = dsg->dst_addr;
+		bd.remainder = dsg->len;
+		bd.srcbus.buswidth = bd.srcbus.maxwidth;
+		bd.dstbus.buswidth = bd.dstbus.maxwidth;
 
-	/* We need to count this down to zero */
-	bd.remainder = txd->len;
+		pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
 
-	/*
-	 * Choose bus to align to
-	 * - prefers destination bus if both available
-	 * - if fixed address on one bus chooses other
-	 */
-	pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
-
-	dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
-		 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
-		 bd.srcbus.buswidth,
-		 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
-		 bd.dstbus.buswidth,
-		 bd.remainder, max_bytes_per_lli);
-	dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
-		 mbus == &bd.srcbus ? "src" : "dst",
-		 sbus == &bd.srcbus ? "src" : "dst");
-
-	if (txd->len < mbus->buswidth) {
-		/* Less than a bus width available - send as single bytes */
-		while (bd.remainder) {
-			dev_vdbg(&pl08x->adev->dev,
-				 "%s single byte LLIs for a transfer of "
-				 "less than a bus width (remain 0x%08x)\n",
-				 __func__, bd.remainder);
-			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
-			total_bytes++;
-		}
-	} else {
-		/* Make one byte LLIs until master bus is aligned */
-		while ((mbus->addr) % (mbus->buswidth)) {
-			dev_vdbg(&pl08x->adev->dev,
-				"%s adjustment lli for less than bus width "
-				 "(remain 0x%08x)\n",
-				 __func__, bd.remainder);
-			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
-			total_bytes++;
-		}
+		dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
+			bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+			bd.srcbus.buswidth,
+			bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+			bd.dstbus.buswidth,
+			bd.remainder);
+		dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
+			mbus == &bd.srcbus ? "src" : "dst",
+			sbus == &bd.srcbus ? "src" : "dst");
 
 		/*
-		 * Master now aligned
-		 * - if slave is not then we must set its width down
+		 * Zero length is only allowed if all these requirements are
+		 * met:
+		 * - flow controller is peripheral.
+		 * - src.addr is aligned to src.width
+		 * - dst.addr is aligned to dst.width
+		 *
+		 * sg_len == 1 should be true, as there can be two cases here:
+		 *
+		 * - Memory addresses are contiguous and are not scattered.
+		 *   Here, Only one sg will be passed by user driver, with
+		 *   memory address and zero length. We pass this to controller
+		 *   and after the transfer it will receive the last burst
+		 *   request from peripheral and so transfer finishes.
+		 *
+		 * - Memory addresses are scattered and are not contiguous.
+		 *   Here, Obviously as DMA controller doesn't know when a lli's
+		 *   transfer gets over, it can't load next lli. So in this
+		 *   case, there has to be an assumption that only one lli is
+		 *   supported. Thus, we can't have scattered addresses.
 		 */
-		if (sbus->addr % sbus->buswidth) {
-			dev_dbg(&pl08x->adev->dev,
-				"%s set down bus width to one byte\n",
-				 __func__);
-
-			sbus->buswidth = 1;
-		}
-
-		/*
-		 * Make largest possible LLIs until less than one bus
-		 * width left
-		 */
-		while (bd.remainder > (mbus->buswidth - 1)) {
-			size_t lli_len, target_len, tsize, odd_bytes;
-
-			/*
-			 * If enough left try to send max possible,
-			 * otherwise try to send the remainder
-			 */
-			target_len = min(bd.remainder, max_bytes_per_lli);
-
-			/*
-			 * Set bus lengths for incrementing buses to the
-			 * number of bytes which fill to next memory boundary,
-			 * limiting on the target length calculated above.
-			 */
-			if (cctl & PL080_CONTROL_SRC_INCR)
-				bd.srcbus.fill_bytes =
-					pl08x_pre_boundary(bd.srcbus.addr,
-						target_len);
-			else
-				bd.srcbus.fill_bytes = target_len;
-
-			if (cctl & PL080_CONTROL_DST_INCR)
-				bd.dstbus.fill_bytes =
-					pl08x_pre_boundary(bd.dstbus.addr,
-						target_len);
-			else
-				bd.dstbus.fill_bytes = target_len;
-
-			/* Find the nearest */
-			lli_len	= min(bd.srcbus.fill_bytes,
-				      bd.dstbus.fill_bytes);
-
-			BUG_ON(lli_len > bd.remainder);
-
-			if (lli_len <= 0) {
-				dev_err(&pl08x->adev->dev,
-					"%s lli_len is %zu, <= 0\n",
-						__func__, lli_len);
+		if (!bd.remainder) {
+			u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
+				PL080_CONFIG_FLOW_CONTROL_SHIFT;
+			if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
+					(fc <= PL080_FLOW_SRC2DST_SRC))) {
+				dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
+					__func__);
 				return 0;
 			}
 
-			if (lli_len == target_len) {
-				/*
-				 * Can send what we wanted.
-				 * Maintain alignment
-				 */
-				lli_len	= (lli_len/mbus->buswidth) *
-							mbus->buswidth;
-				odd_bytes = 0;
-			} else {
-				/*
-				 * So now we know how many bytes to transfer
-				 * to get to the nearest boundary.  The next
-				 * LLI will past the boundary.  However, we
-				 * may be working to a boundary on the slave
-				 * bus.  We need to ensure the master stays
-				 * aligned, and that we are working in
-				 * multiples of the bus widths.
-				 */
-				odd_bytes = lli_len % mbus->buswidth;
-				lli_len -= odd_bytes;
-
+			if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
+					(bd.srcbus.addr % bd.srcbus.buswidth)) {
+				dev_err(&pl08x->adev->dev,
+					"%s src & dst address must be aligned to src"
+					" & dst width if peripheral is flow controller",
+					__func__);
+				return 0;
 			}
 
-			if (lli_len) {
-				/*
-				 * Check against minimum bus alignment:
-				 * Calculate actual transfer size in relation
-				 * to bus width an get a maximum remainder of
-				 * the smallest bus width - 1
-				 */
-				/* FIXME: use round_down()? */
-				tsize = lli_len / min(mbus->buswidth,
-						      sbus->buswidth);
-				lli_len	= tsize * min(mbus->buswidth,
-						      sbus->buswidth);
-
-				if (target_len != lli_len) {
-					dev_vdbg(&pl08x->adev->dev,
-					"%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
-					__func__, target_len, lli_len, txd->len);
-				}
-
-				cctl = pl08x_cctl_bits(cctl,
-						       bd.srcbus.buswidth,
-						       bd.dstbus.buswidth,
-						       tsize);
-
-				dev_vdbg(&pl08x->adev->dev,
-					"%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
-					__func__, lli_len, bd.remainder);
-				pl08x_fill_lli_for_desc(&bd, num_llis++,
-					lli_len, cctl);
-				total_bytes += lli_len;
-			}
-
-
-			if (odd_bytes) {
-				/*
-				 * Creep past the boundary, maintaining
-				 * master alignment
-				 */
-				int j;
-				for (j = 0; (j < mbus->buswidth)
-						&& (bd.remainder); j++) {
-					cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-					dev_vdbg(&pl08x->adev->dev,
-						"%s align with boundary, single byte (remain 0x%08zx)\n",
-						__func__, bd.remainder);
-					pl08x_fill_lli_for_desc(&bd,
-						num_llis++, 1, cctl);
-					total_bytes++;
-				}
-			}
+			cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+					bd.dstbus.buswidth, 0);
+			pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+			break;
 		}
 
 		/*
-		 * Send any odd bytes
+		 * Send byte by byte for following cases
+		 * - Less than a bus width available
+		 * - until master bus is aligned
 		 */
-		while (bd.remainder) {
-			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-			dev_vdbg(&pl08x->adev->dev,
-				"%s align with boundary, single odd byte (remain %zu)\n",
-				__func__, bd.remainder);
-			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
-			total_bytes++;
+		if (bd.remainder < mbus->buswidth)
+			early_bytes = bd.remainder;
+		else if ((mbus->addr) % (mbus->buswidth)) {
+			early_bytes = mbus->buswidth - (mbus->addr) %
+				(mbus->buswidth);
+			if ((bd.remainder - early_bytes) < mbus->buswidth)
+				early_bytes = bd.remainder;
 		}
-	}
-	if (total_bytes != txd->len) {
-		dev_err(&pl08x->adev->dev,
-			"%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
-			__func__, total_bytes, txd->len);
-		return 0;
-	}
 
-	if (num_llis >= MAX_NUM_TSFR_LLIS) {
-		dev_err(&pl08x->adev->dev,
-			"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
-			__func__, (u32) MAX_NUM_TSFR_LLIS);
-		return 0;
+		if (early_bytes) {
+			dev_vdbg(&pl08x->adev->dev,
+				"%s byte width LLIs (remain 0x%08x)\n",
+				__func__, bd.remainder);
+			prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
+				&total_bytes);
+		}
+
+		if (bd.remainder) {
+			/*
+			 * Master now aligned
+			 * - if slave is not then we must set its width down
+			 */
+			if (sbus->addr % sbus->buswidth) {
+				dev_dbg(&pl08x->adev->dev,
+					"%s set down bus width to one byte\n",
+					__func__);
+
+				sbus->buswidth = 1;
+			}
+
+			/*
+			 * Bytes transferred = tsize * src width, not
+			 * MIN(buswidths)
+			 */
+			max_bytes_per_lli = bd.srcbus.buswidth *
+				PL080_CONTROL_TRANSFER_SIZE_MASK;
+			dev_vdbg(&pl08x->adev->dev,
+				"%s max bytes per lli = %zu\n",
+				__func__, max_bytes_per_lli);
+
+			/*
+			 * Make largest possible LLIs until less than one bus
+			 * width left
+			 */
+			while (bd.remainder > (mbus->buswidth - 1)) {
+				size_t lli_len, tsize, width;
+
+				/*
+				 * If enough left try to send max possible,
+				 * otherwise try to send the remainder
+				 */
+				lli_len = min(bd.remainder, max_bytes_per_lli);
+
+				/*
+				 * Check against maximum bus alignment:
+				 * Calculate actual transfer size in relation to
+				 * bus width an get a maximum remainder of the
+				 * highest bus width - 1
+				 */
+				width = max(mbus->buswidth, sbus->buswidth);
+				lli_len = (lli_len / width) * width;
+				tsize = lli_len / bd.srcbus.buswidth;
+
+				dev_vdbg(&pl08x->adev->dev,
+					"%s fill lli with single lli chunk of "
+					"size 0x%08zx (remainder 0x%08zx)\n",
+					__func__, lli_len, bd.remainder);
+
+				cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+					bd.dstbus.buswidth, tsize);
+				pl08x_fill_lli_for_desc(&bd, num_llis++,
+						lli_len, cctl);
+				total_bytes += lli_len;
+			}
+
+			/*
+			 * Send any odd bytes
+			 */
+			if (bd.remainder) {
+				dev_vdbg(&pl08x->adev->dev,
+					"%s align with boundary, send odd bytes (remain %zu)\n",
+					__func__, bd.remainder);
+				prep_byte_width_lli(&bd, &cctl, bd.remainder,
+						num_llis++, &total_bytes);
+			}
+		}
+
+		if (total_bytes != dsg->len) {
+			dev_err(&pl08x->adev->dev,
+				"%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
+				__func__, total_bytes, dsg->len);
+			return 0;
+		}
+
+		if (num_llis >= MAX_NUM_TSFR_LLIS) {
+			dev_err(&pl08x->adev->dev,
+				"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
+				__func__, (u32) MAX_NUM_TSFR_LLIS);
+			return 0;
+		}
 	}
 
 	llis_va = txd->llis_va;
@@ -856,11 +800,19 @@
 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 			   struct pl08x_txd *txd)
 {
+	struct pl08x_sg *dsg, *_dsg;
+
 	/* Free the LLI */
-	dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
+	if (txd->llis_va)
+		dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
 
 	pl08x->pool_ctr--;
 
+	list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
+		list_del(&dsg->node);
+		kfree(dsg);
+	}
+
 	kfree(txd);
 }
 
@@ -917,9 +869,7 @@
 	 * need, but for slaves the physical signals may be muxed!
 	 * Can the platform allow us to use this channel?
 	 */
-	if (plchan->slave &&
-	    ch->signal < 0 &&
-	    pl08x->pd->get_signal) {
+	if (plchan->slave && pl08x->pd->get_signal) {
 		ret = pl08x->pd->get_signal(plchan);
 		if (ret < 0) {
 			dev_dbg(&pl08x->adev->dev,
@@ -1008,10 +958,8 @@
  * If slaves are relying on interrupts to signal completion this function
  * must not be called with interrupts disabled.
  */
-static enum dma_status
-pl08x_dma_tx_status(struct dma_chan *chan,
-		    dma_cookie_t cookie,
-		    struct dma_tx_state *txstate)
+static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	dma_cookie_t last_used;
@@ -1253,7 +1201,9 @@
 
 	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
 	if (!num_llis) {
-		kfree(txd);
+		spin_lock_irqsave(&plchan->lock, flags);
+		pl08x_free_txd(pl08x, txd);
+		spin_unlock_irqrestore(&plchan->lock, flags);
 		return -EINVAL;
 	}
 
@@ -1301,13 +1251,14 @@
 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
 	unsigned long flags)
 {
-	struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
 
 	if (txd) {
 		dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
 		txd->tx.flags = flags;
 		txd->tx.tx_submit = pl08x_tx_submit;
 		INIT_LIST_HEAD(&txd->node);
+		INIT_LIST_HEAD(&txd->dsg_list);
 
 		/* Always enable error and terminal interrupts */
 		txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1326,6 +1277,7 @@
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_txd *txd;
+	struct pl08x_sg *dsg;
 	int ret;
 
 	txd = pl08x_get_txd(plchan, flags);
@@ -1335,10 +1287,19 @@
 		return NULL;
 	}
 
+	dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+	if (!dsg) {
+		pl08x_free_txd(pl08x, txd);
+		dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
+				__func__);
+		return NULL;
+	}
+	list_add_tail(&dsg->node, &txd->dsg_list);
+
 	txd->direction = DMA_NONE;
-	txd->src_addr = src;
-	txd->dst_addr = dest;
-	txd->len = len;
+	dsg->src_addr = src;
+	dsg->dst_addr = dest;
+	dsg->len = len;
 
 	/* Set platform data for m2m */
 	txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1367,19 +1328,13 @@
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_txd *txd;
-	int ret;
-
-	/*
-	 * Current implementation ASSUMES only one sg
-	 */
-	if (sg_len != 1) {
-		dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
-			__func__);
-		BUG();
-	}
+	struct pl08x_sg *dsg;
+	struct scatterlist *sg;
+	dma_addr_t slave_addr;
+	int ret, tmp;
 
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
-		__func__, sgl->length, plchan->name);
+			__func__, sgl->length, plchan->name);
 
 	txd = pl08x_get_txd(plchan, flags);
 	if (!txd) {
@@ -1398,24 +1353,49 @@
 	 * channel target address dynamically at runtime.
 	 */
 	txd->direction = direction;
-	txd->len = sgl->length;
 
 	if (direction == DMA_TO_DEVICE) {
-		txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
 		txd->cctl = plchan->dst_cctl;
-		txd->src_addr = sgl->dma_address;
-		txd->dst_addr = plchan->dst_addr;
+		slave_addr = plchan->dst_addr;
 	} else if (direction == DMA_FROM_DEVICE) {
-		txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
 		txd->cctl = plchan->src_cctl;
-		txd->src_addr = plchan->src_addr;
-		txd->dst_addr = sgl->dma_address;
+		slave_addr = plchan->src_addr;
 	} else {
+		pl08x_free_txd(pl08x, txd);
 		dev_err(&pl08x->adev->dev,
 			"%s direction unsupported\n", __func__);
 		return NULL;
 	}
 
+	if (plchan->cd->device_fc)
+		tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+			PL080_FLOW_PER2MEM_PER;
+	else
+		tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+			PL080_FLOW_PER2MEM;
+
+	txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+
+	for_each_sg(sgl, sg, sg_len, tmp) {
+		dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+		if (!dsg) {
+			pl08x_free_txd(pl08x, txd);
+			dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
+					__func__);
+			return NULL;
+		}
+		list_add_tail(&dsg->node, &txd->dsg_list);
+
+		dsg->len = sg_dma_len(sg);
+		if (direction == DMA_TO_DEVICE) {
+			dsg->src_addr = sg_phys(sg);
+			dsg->dst_addr = slave_addr;
+		} else {
+			dsg->src_addr = slave_addr;
+			dsg->dst_addr = sg_phys(sg);
+		}
+	}
+
 	ret = pl08x_prep_channel_resources(plchan, txd);
 	if (ret)
 		return NULL;
@@ -1489,9 +1469,15 @@
 
 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
 {
-	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	struct pl08x_dma_chan *plchan;
 	char *name = chan_id;
 
+	/* Reject channels for devices not bound to this driver */
+	if (chan->device->dev->driver != &pl08x_amba_driver.drv)
+		return false;
+
+	plchan = to_pl08x_chan(chan);
+
 	/* Check that the channel is not taken! */
 	if (!strcmp(plchan->name, name))
 		return true;
@@ -1507,34 +1493,34 @@
  */
 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 {
-	u32 val;
-
-	val = readl(pl08x->base + PL080_CONFIG);
-	val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
-	/* We implicitly clear bit 1 and that means little-endian mode */
-	val |= PL080_CONFIG_ENABLE;
-	writel(val, pl08x->base + PL080_CONFIG);
+	writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
 }
 
 static void pl08x_unmap_buffers(struct pl08x_txd *txd)
 {
 	struct device *dev = txd->tx.chan->device->dev;
+	struct pl08x_sg *dsg;
 
 	if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
 		if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-			dma_unmap_single(dev, txd->src_addr, txd->len,
-				DMA_TO_DEVICE);
-		else
-			dma_unmap_page(dev, txd->src_addr, txd->len,
-				DMA_TO_DEVICE);
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		else {
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		}
 	}
 	if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 		if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-			dma_unmap_single(dev, txd->dst_addr, txd->len,
-				DMA_FROM_DEVICE);
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
 		else
-			dma_unmap_page(dev, txd->dst_addr, txd->len,
-				DMA_FROM_DEVICE);
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
 	}
 }
 
@@ -1589,8 +1575,8 @@
 		 */
 		list_for_each_entry(waiting, &pl08x->memcpy.channels,
 				    chan.device_node) {
-		  if (waiting->state == PL08X_CHAN_WAITING &&
-			    waiting->waiting != NULL) {
+			if (waiting->state == PL08X_CHAN_WAITING &&
+				waiting->waiting != NULL) {
 				int ret;
 
 				/* This should REALLY not fail now */
@@ -1630,38 +1616,40 @@
 static irqreturn_t pl08x_irq(int irq, void *dev)
 {
 	struct pl08x_driver_data *pl08x = dev;
-	u32 mask = 0;
-	u32 val;
-	int i;
+	u32 mask = 0, err, tc, i;
 
-	val = readl(pl08x->base + PL080_ERR_STATUS);
-	if (val) {
-		/* An error interrupt (on one or more channels) */
-		dev_err(&pl08x->adev->dev,
-			"%s error interrupt, register value 0x%08x\n",
-				__func__, val);
-		/*
-		 * Simply clear ALL PL08X error interrupts,
-		 * regardless of channel and cause
-		 * FIXME: should be 0x00000003 on PL081 really.
-		 */
-		writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+	/* check & clear - ERR & TC interrupts */
+	err = readl(pl08x->base + PL080_ERR_STATUS);
+	if (err) {
+		dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
+			__func__, err);
+		writel(err, pl08x->base + PL080_ERR_CLEAR);
 	}
-	val = readl(pl08x->base + PL080_INT_STATUS);
+	tc = readl(pl08x->base + PL080_INT_STATUS);
+	if (tc)
+		writel(tc, pl08x->base + PL080_TC_CLEAR);
+
+	if (!err && !tc)
+		return IRQ_NONE;
+
 	for (i = 0; i < pl08x->vd->channels; i++) {
-		if ((1 << i) & val) {
+		if (((1 << i) & err) || ((1 << i) & tc)) {
 			/* Locate physical channel */
 			struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
 			struct pl08x_dma_chan *plchan = phychan->serving;
 
+			if (!plchan) {
+				dev_err(&pl08x->adev->dev,
+					"%s Error TC interrupt on unused channel: 0x%08x\n",
+					__func__, i);
+				continue;
+			}
+
 			/* Schedule tasklet on this channel */
 			tasklet_schedule(&plchan->tasklet);
-
 			mask |= (1 << i);
 		}
 	}
-	/* Clear only the terminal interrupts on channels we processed */
-	writel(mask, pl08x->base + PL080_TC_CLEAR);
 
 	return mask ? IRQ_HANDLED : IRQ_NONE;
 }
@@ -1685,9 +1673,7 @@
  * Make a local wrapper to hold required data
  */
 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
-					   struct dma_device *dmadev,
-					   unsigned int channels,
-					   bool slave)
+		struct dma_device *dmadev, unsigned int channels, bool slave)
 {
 	struct pl08x_dma_chan *chan;
 	int i;
@@ -1700,7 +1686,7 @@
 	 * to cope with that situation.
 	 */
 	for (i = 0; i < channels; i++) {
-		chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
+		chan = kzalloc(sizeof(*chan), GFP_KERNEL);
 		if (!chan) {
 			dev_err(&pl08x->adev->dev,
 				"%s no memory for channel\n", __func__);
@@ -1728,7 +1714,7 @@
 			kfree(chan);
 			continue;
 		}
-		dev_info(&pl08x->adev->dev,
+		dev_dbg(&pl08x->adev->dev,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
@@ -1837,9 +1823,9 @@
 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
 {
 	/* Expose a simple debugfs interface to view all clocks */
-	(void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
-				   NULL, pl08x,
-				   &pl08x_debugfs_operations);
+	(void) debugfs_create_file(dev_name(&pl08x->adev->dev),
+			S_IFREG | S_IRUGO, NULL, pl08x,
+			&pl08x_debugfs_operations);
 }
 
 #else
@@ -1860,12 +1846,15 @@
 		return ret;
 
 	/* Create the driver state holder */
-	pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
+	pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
 	if (!pl08x) {
 		ret = -ENOMEM;
 		goto out_no_pl08x;
 	}
 
+	pm_runtime_set_active(&adev->dev);
+	pm_runtime_enable(&adev->dev);
+
 	/* Initialize memcpy engine */
 	dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
 	pl08x->memcpy.dev = &adev->dev;
@@ -1939,7 +1928,7 @@
 	}
 
 	/* Initialize physical channels */
-	pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
+	pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
 			GFP_KERNEL);
 	if (!pl08x->phy_chans) {
 		dev_err(&adev->dev, "%s failed to allocate "
@@ -1956,9 +1945,8 @@
 		spin_lock_init(&ch->lock);
 		ch->serving = NULL;
 		ch->signal = -1;
-		dev_info(&adev->dev,
-			 "physical channel %d is %s\n", i,
-			 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
+		dev_dbg(&adev->dev, "physical channel %d is %s\n",
+			i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
 	}
 
 	/* Register as many memcpy channels as there are physical channels */
@@ -1974,8 +1962,7 @@
 
 	/* Register slave channels */
 	ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
-					      pl08x->pd->num_slave_channels,
-					      true);
+			pl08x->pd->num_slave_channels, true);
 	if (ret <= 0) {
 		dev_warn(&pl08x->adev->dev,
 			"%s failed to enumerate slave channels - %d\n",
@@ -2005,6 +1992,8 @@
 	dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
 		 amba_part(adev), amba_rev(adev),
 		 (unsigned long long)adev->res.start, adev->irq[0]);
+
+	pm_runtime_put(&adev->dev);
 	return 0;
 
 out_no_slave_reg:
@@ -2023,6 +2012,9 @@
 	dma_pool_destroy(pl08x->pool);
 out_no_lli_pool:
 out_no_platdata:
+	pm_runtime_put(&adev->dev);
+	pm_runtime_disable(&adev->dev);
+
 	kfree(pl08x);
 out_no_pl08x:
 	amba_release_regions(adev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6a483ea..fcfa0a8 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@
 {
 	struct at_desc *desc, *_desc;
 	struct at_desc *ret = NULL;
+	unsigned long flags;
 	unsigned int i = 0;
 	LIST_HEAD(tmp_list);
 
-	spin_lock_bh(&atchan->lock);
+	spin_lock_irqsave(&atchan->lock, flags);
 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
 		i++;
 		if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@
 		dev_dbg(chan2dev(&atchan->chan_common),
 				"desc %p not ACKed\n", desc);
 	}
-	spin_unlock_bh(&atchan->lock);
+	spin_unlock_irqrestore(&atchan->lock, flags);
 	dev_vdbg(chan2dev(&atchan->chan_common),
 		"scanned %u descriptors on freelist\n", i);
 
@@ -129,9 +130,9 @@
 	if (!ret) {
 		ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
 		if (ret) {
-			spin_lock_bh(&atchan->lock);
+			spin_lock_irqsave(&atchan->lock, flags);
 			atchan->descs_allocated++;
-			spin_unlock_bh(&atchan->lock);
+			spin_unlock_irqrestore(&atchan->lock, flags);
 		} else {
 			dev_err(chan2dev(&atchan->chan_common),
 					"not enough descriptors available\n");
@@ -150,8 +151,9 @@
 {
 	if (desc) {
 		struct at_desc *child;
+		unsigned long flags;
 
-		spin_lock_bh(&atchan->lock);
+		spin_lock_irqsave(&atchan->lock, flags);
 		list_for_each_entry(child, &desc->tx_list, desc_node)
 			dev_vdbg(chan2dev(&atchan->chan_common),
 					"moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@
 		dev_vdbg(chan2dev(&atchan->chan_common),
 			 "moving desc %p to freelist\n", desc);
 		list_add(&desc->desc_node, &atchan->free_list);
-		spin_unlock_bh(&atchan->lock);
+		spin_unlock_irqrestore(&atchan->lock, flags);
 	}
 }
 
@@ -299,7 +301,7 @@
 
 	/* for cyclic transfers,
 	 * no need to replay callback function while stopping */
-	if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+	if (!atc_chan_is_cyclic(atchan)) {
 		dma_async_tx_callback	callback = txd->callback;
 		void			*param = txd->callback_param;
 
@@ -471,16 +473,17 @@
 static void atc_tasklet(unsigned long data)
 {
 	struct at_dma_chan *atchan = (struct at_dma_chan *)data;
+	unsigned long flags;
 
-	spin_lock(&atchan->lock);
+	spin_lock_irqsave(&atchan->lock, flags);
 	if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
 		atc_handle_error(atchan);
-	else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+	else if (atc_chan_is_cyclic(atchan))
 		atc_handle_cyclic(atchan);
 	else
 		atc_advance_work(atchan);
 
-	spin_unlock(&atchan->lock);
+	spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@
 	struct at_desc		*desc = txd_to_at_desc(tx);
 	struct at_dma_chan	*atchan = to_at_dma_chan(tx->chan);
 	dma_cookie_t		cookie;
+	unsigned long		flags;
 
-	spin_lock_bh(&atchan->lock);
+	spin_lock_irqsave(&atchan->lock, flags);
 	cookie = atc_assign_cookie(atchan, desc);
 
 	if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@
 		list_add_tail(&desc->desc_node, &atchan->queue);
 	}
 
-	spin_unlock_bh(&atchan->lock);
+	spin_unlock_irqrestore(&atchan->lock, flags);
 
 	return cookie;
 }
@@ -927,28 +931,29 @@
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 	struct at_dma		*atdma = to_at_dma(chan->device);
 	int			chan_id = atchan->chan_common.chan_id;
+	unsigned long		flags;
 
 	LIST_HEAD(list);
 
 	dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
 
 	if (cmd == DMA_PAUSE) {
-		spin_lock_bh(&atchan->lock);
+		spin_lock_irqsave(&atchan->lock, flags);
 
 		dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
 		set_bit(ATC_IS_PAUSED, &atchan->status);
 
-		spin_unlock_bh(&atchan->lock);
+		spin_unlock_irqrestore(&atchan->lock, flags);
 	} else if (cmd == DMA_RESUME) {
-		if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+		if (!atc_chan_is_paused(atchan))
 			return 0;
 
-		spin_lock_bh(&atchan->lock);
+		spin_lock_irqsave(&atchan->lock, flags);
 
 		dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
 		clear_bit(ATC_IS_PAUSED, &atchan->status);
 
-		spin_unlock_bh(&atchan->lock);
+		spin_unlock_irqrestore(&atchan->lock, flags);
 	} else if (cmd == DMA_TERMINATE_ALL) {
 		struct at_desc	*desc, *_desc;
 		/*
@@ -957,7 +962,7 @@
 		 * channel. We still have to poll the channel enable bit due
 		 * to AHB/HSB limitations.
 		 */
-		spin_lock_bh(&atchan->lock);
+		spin_lock_irqsave(&atchan->lock, flags);
 
 		/* disabling channel: must also remove suspend state */
 		dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@
 		/* if channel dedicated to cyclic operations, free it */
 		clear_bit(ATC_IS_CYCLIC, &atchan->status);
 
-		spin_unlock_bh(&atchan->lock);
+		spin_unlock_irqrestore(&atchan->lock, flags);
 	} else {
 		return -ENXIO;
 	}
@@ -1004,9 +1009,10 @@
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 	dma_cookie_t		last_used;
 	dma_cookie_t		last_complete;
+	unsigned long		flags;
 	enum dma_status		ret;
 
-	spin_lock_bh(&atchan->lock);
+	spin_lock_irqsave(&atchan->lock, flags);
 
 	last_complete = atchan->completed_cookie;
 	last_used = chan->cookie;
@@ -1021,7 +1027,7 @@
 		ret = dma_async_is_complete(cookie, last_complete, last_used);
 	}
 
-	spin_unlock_bh(&atchan->lock);
+	spin_unlock_irqrestore(&atchan->lock, flags);
 
 	if (ret != DMA_SUCCESS)
 		dma_set_tx_state(txstate, last_complete, last_used,
@@ -1029,7 +1035,7 @@
 	else
 		dma_set_tx_state(txstate, last_complete, last_used, 0);
 
-	if (test_bit(ATC_IS_PAUSED, &atchan->status))
+	if (atc_chan_is_paused(atchan))
 		ret = DMA_PAUSED;
 
 	dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1046,18 +1052,19 @@
 static void atc_issue_pending(struct dma_chan *chan)
 {
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
+	unsigned long		flags;
 
 	dev_vdbg(chan2dev(chan), "issue_pending\n");
 
 	/* Not needed for cyclic transfers */
-	if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+	if (atc_chan_is_cyclic(atchan))
 		return;
 
-	spin_lock_bh(&atchan->lock);
+	spin_lock_irqsave(&atchan->lock, flags);
 	if (!atc_chan_is_enabled(atchan)) {
 		atc_advance_work(atchan);
 	}
-	spin_unlock_bh(&atchan->lock);
+	spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 /**
@@ -1073,6 +1080,7 @@
 	struct at_dma		*atdma = to_at_dma(chan->device);
 	struct at_desc		*desc;
 	struct at_dma_slave	*atslave;
+	unsigned long		flags;
 	int			i;
 	u32			cfg;
 	LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@
 		list_add_tail(&desc->desc_node, &tmp_list);
 	}
 
-	spin_lock_bh(&atchan->lock);
+	spin_lock_irqsave(&atchan->lock, flags);
 	atchan->descs_allocated = i;
 	list_splice(&tmp_list, &atchan->free_list);
 	atchan->completed_cookie = chan->cookie = 1;
-	spin_unlock_bh(&atchan->lock);
+	spin_unlock_irqrestore(&atchan->lock, flags);
 
 	/* channel parameters */
 	channel_writel(atchan, CFG, cfg);
@@ -1260,12 +1268,11 @@
 
 	/* initialize channels related values */
 	INIT_LIST_HEAD(&atdma->dma_common.channels);
-	for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
+	for (i = 0; i < pdata->nr_channels; i++) {
 		struct at_dma_chan	*atchan = &atdma->chan[i];
 
 		atchan->chan_common.device = &atdma->dma_common;
 		atchan->chan_common.cookie = atchan->completed_cookie = 1;
-		atchan->chan_common.chan_id = i;
 		list_add_tail(&atchan->chan_common.device_node,
 				&atdma->dma_common.channels);
 
@@ -1293,22 +1300,20 @@
 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
 
-	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
+	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
-
-	if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+		/* controller can do slave DMA: can trigger cyclic transfers */
+		dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
 		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
-
-	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
-	    dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
 		atdma->dma_common.device_control = atc_control;
+	}
 
 	dma_writel(atdma, EN, AT_DMA_ENABLE);
 
 	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
-	  atdma->dma_common.chancnt);
+	  pdata->nr_channels);
 
 	dma_async_device_register(&atdma->dma_common);
 
@@ -1377,27 +1382,112 @@
 	clk_disable(atdma->clk);
 }
 
+static int at_dma_prepare(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct at_dma *atdma = platform_get_drvdata(pdev);
+	struct dma_chan *chan, *_chan;
+
+	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+			device_node) {
+		struct at_dma_chan *atchan = to_at_dma_chan(chan);
+		/* wait for transaction completion (except in cyclic case) */
+		if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
+			return -EAGAIN;
+	}
+	return 0;
+}
+
+static void atc_suspend_cyclic(struct at_dma_chan *atchan)
+{
+	struct dma_chan	*chan = &atchan->chan_common;
+
+	/* Channel should be paused by user
+	 * do it anyway even if it is not done already */
+	if (!atc_chan_is_paused(atchan)) {
+		dev_warn(chan2dev(chan),
+		"cyclic channel not paused, should be done by channel user\n");
+		atc_control(chan, DMA_PAUSE, 0);
+	}
+
+	/* now preserve additional data for cyclic operations */
+	/* next descriptor address in the cyclic list */
+	atchan->save_dscr = channel_readl(atchan, DSCR);
+
+	vdbg_dump_regs(atchan);
+}
+
 static int at_dma_suspend_noirq(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct at_dma *atdma = platform_get_drvdata(pdev);
+	struct dma_chan *chan, *_chan;
 
-	at_dma_off(platform_get_drvdata(pdev));
+	/* preserve data */
+	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+			device_node) {
+		struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+		if (atc_chan_is_cyclic(atchan))
+			atc_suspend_cyclic(atchan);
+		atchan->save_cfg = channel_readl(atchan, CFG);
+	}
+	atdma->save_imr = dma_readl(atdma, EBCIMR);
+
+	/* disable DMA controller */
+	at_dma_off(atdma);
 	clk_disable(atdma->clk);
 	return 0;
 }
 
+static void atc_resume_cyclic(struct at_dma_chan *atchan)
+{
+	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
+
+	/* restore channel status for cyclic descriptors list:
+	 * next descriptor in the cyclic list at the time of suspend */
+	channel_writel(atchan, SADDR, 0);
+	channel_writel(atchan, DADDR, 0);
+	channel_writel(atchan, CTRLA, 0);
+	channel_writel(atchan, CTRLB, 0);
+	channel_writel(atchan, DSCR, atchan->save_dscr);
+	dma_writel(atdma, CHER, atchan->mask);
+
+	/* channel pause status should be removed by channel user
+	 * We cannot take the initiative to do it here */
+
+	vdbg_dump_regs(atchan);
+}
+
 static int at_dma_resume_noirq(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct at_dma *atdma = platform_get_drvdata(pdev);
+	struct dma_chan *chan, *_chan;
 
+	/* bring back DMA controller */
 	clk_enable(atdma->clk);
 	dma_writel(atdma, EN, AT_DMA_ENABLE);
+
+	/* clear any pending interrupt */
+	while (dma_readl(atdma, EBCISR))
+		cpu_relax();
+
+	/* restore saved data */
+	dma_writel(atdma, EBCIER, atdma->save_imr);
+	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+			device_node) {
+		struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+		channel_writel(atchan, CFG, atchan->save_cfg);
+		if (atc_chan_is_cyclic(atchan))
+			atc_resume_cyclic(atchan);
+	}
 	return 0;
 }
 
 static const struct dev_pm_ops at_dma_dev_pm_ops = {
+	.prepare = at_dma_prepare,
 	.suspend_noirq = at_dma_suspend_noirq,
 	.resume_noirq = at_dma_resume_noirq,
 };
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 087dbf1..aa4c9ae 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@
  * @status: transmit status information from irq/prep* functions
  *                to tasklet (use atomic operations)
  * @tasklet: bottom half to finish transaction work
+ * @save_cfg: configuration register that is saved on suspend/resume cycle
+ * @save_dscr: for cyclic operations, preserve next descriptor address in
+ *             the cyclic list on suspend/resume cycle
  * @lock: serializes enqueue/dequeue operations to descriptors lists
  * @completed_cookie: identifier for the most recently completed operation
  * @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@
 	u8			mask;
 	unsigned long		status;
 	struct tasklet_struct	tasklet;
+	u32			save_cfg;
+	u32			save_dscr;
 
 	spinlock_t		lock;
 
@@ -248,6 +253,7 @@
  * @chan_common: common dmaengine dma_device object members
  * @ch_regs: memory mapped register base
  * @clk: dma controller clock
+ * @save_imr: interrupt mask register that is saved on suspend/resume cycle
  * @all_chan_mask: all channels availlable in a mask
  * @dma_desc_pool: base of DMA descriptor region (DMA address)
  * @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@
 	struct dma_device	dma_common;
 	void __iomem		*regs;
 	struct clk		*clk;
+	u32			save_imr;
 
 	u8			all_chan_mask;
 
@@ -355,6 +362,23 @@
 	return !!(dma_readl(atdma, CHSR) & atchan->mask);
 }
 
+/**
+ * atc_chan_is_paused - test channel pause/resume status
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
+{
+	return test_bit(ATC_IS_PAUSED, &atchan->status);
+}
+
+/**
+ * atc_chan_is_cyclic - test if given channel has cyclic property set
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
+{
+	return test_bit(ATC_IS_CYCLIC, &atchan->status);
+}
 
 /**
  * set_desc_eol - set end-of-link to descriptor so it will end transfer
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 765f5ff..eb1d864 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -10,6 +10,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
+#include <linux/freezer.h>
 #include <linux/init.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
@@ -251,6 +252,7 @@
 	int			i;
 
 	thread_name = current->comm;
+	set_freezable_with_signal();
 
 	ret = -ENOMEM;
 
@@ -305,7 +307,8 @@
 		dma_addr_t dma_srcs[src_cnt];
 		dma_addr_t dma_dsts[dst_cnt];
 		struct completion cmp;
-		unsigned long tmo = msecs_to_jiffies(timeout);
+		unsigned long start, tmo, end = 0 /* compiler... */;
+		bool reload = true;
 		u8 align = 0;
 
 		total_tests++;
@@ -404,7 +407,17 @@
 		}
 		dma_async_issue_pending(chan);
 
-		tmo = wait_for_completion_timeout(&cmp, tmo);
+		do {
+			start = jiffies;
+			if (reload)
+				end = start + msecs_to_jiffies(timeout);
+			else if (end <= start)
+				end = start + 1;
+			tmo = wait_for_completion_interruptible_timeout(&cmp,
+								end - start);
+			reload = try_to_freeze();
+		} while (tmo == -ERESTARTSYS);
+
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
 		if (tmo == 0) {
@@ -477,6 +490,8 @@
 	pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
 			thread_name, total_tests, failed_tests, ret);
 
+	/* terminate all transfers on specified channels */
+	chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
 	if (iterations > 0)
 		while (!kthread_should_stop()) {
 			DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -499,6 +514,10 @@
 		list_del(&thread->node);
 		kfree(thread);
 	}
+
+	/* terminate all transfers on specified channels */
+	dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
+
 	kfree(dtc);
 }
 
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 4d180ca..9bfd6d3 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1407,12 +1407,11 @@
 	dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
 
 	INIT_LIST_HEAD(&dw->dma.channels);
-	for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
+	for (i = 0; i < pdata->nr_channels; i++) {
 		struct dw_dma_chan	*dwc = &dw->chan[i];
 
 		dwc->chan.device = &dw->dma;
 		dwc->chan.cookie = dwc->completed = 1;
-		dwc->chan.chan_id = i;
 		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
 			list_add_tail(&dwc->chan.device_node,
 					&dw->dma.channels);
@@ -1468,7 +1467,7 @@
 	dma_writel(dw, CFG, DW_CFG_DMA_EN);
 
 	printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
-			dev_name(&pdev->dev), dw->dma.chancnt);
+			dev_name(&pdev->dev), pdata->nr_channels);
 
 	dma_async_device_register(&dw->dma);
 
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5d7a49b..b47e2b8 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/dmaengine.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index d99f71c..d746899 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7bd7e98..eab1fe7 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
@@ -318,6 +319,7 @@
 	dma_addr_t			context_phys;
 	struct dma_device		dma_device;
 	struct clk			*clk;
+	struct mutex			channel_0_lock;
 	struct sdma_script_start_addrs	*script_addrs;
 };
 
@@ -415,11 +417,15 @@
 	dma_addr_t buf_phys;
 	int ret;
 
+	mutex_lock(&sdma->channel_0_lock);
+
 	buf_virt = dma_alloc_coherent(NULL,
 			size,
 			&buf_phys, GFP_KERNEL);
-	if (!buf_virt)
-		return -ENOMEM;
+	if (!buf_virt) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
 
 	bd0->mode.command = C0_SETPM;
 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@@ -433,6 +439,9 @@
 
 	dma_free_coherent(NULL, size, buf_virt, buf_phys);
 
+err_out:
+	mutex_unlock(&sdma->channel_0_lock);
+
 	return ret;
 }
 
@@ -656,6 +665,8 @@
 	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
 	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
 
+	mutex_lock(&sdma->channel_0_lock);
+
 	memset(context, 0, sizeof(*context));
 	context->channel_state.pc = load_address;
 
@@ -676,6 +687,8 @@
 
 	ret = sdma_run_channel(&sdma->channel[0]);
 
+	mutex_unlock(&sdma->channel_0_lock);
+
 	return ret;
 }
 
@@ -1131,18 +1144,17 @@
 			saddr_arr[i] = addr_arr[i];
 }
 
-static int __init sdma_get_firmware(struct sdma_engine *sdma,
-		const char *fw_name)
+static void sdma_load_firmware(const struct firmware *fw, void *context)
 {
-	const struct firmware *fw;
+	struct sdma_engine *sdma = context;
 	const struct sdma_firmware_header *header;
-	int ret;
 	const struct sdma_script_start_addrs *addr;
 	unsigned short *ram_code;
 
-	ret = request_firmware(&fw, fw_name, sdma->dev);
-	if (ret)
-		return ret;
+	if (!fw) {
+		dev_err(sdma->dev, "firmware not found\n");
+		return;
+	}
 
 	if (fw->size < sizeof(*header))
 		goto err_firmware;
@@ -1172,6 +1184,16 @@
 
 err_firmware:
 	release_firmware(fw);
+}
+
+static int __init sdma_get_firmware(struct sdma_engine *sdma,
+		const char *fw_name)
+{
+	int ret;
+
+	ret = request_firmware_nowait(THIS_MODULE,
+			FW_ACTION_HOTPLUG, fw_name, sdma->dev,
+			GFP_KERNEL, sdma, sdma_load_firmware);
 
 	return ret;
 }
@@ -1269,11 +1291,14 @@
 	struct sdma_platform_data *pdata = pdev->dev.platform_data;
 	int i;
 	struct sdma_engine *sdma;
+	s32 *saddr_arr;
 
 	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
 	if (!sdma)
 		return -ENOMEM;
 
+	mutex_init(&sdma->channel_0_lock);
+
 	sdma->dev = &pdev->dev;
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1310,6 +1335,11 @@
 		goto err_alloc;
 	}
 
+	/* initially no scripts available */
+	saddr_arr = (s32 *)sdma->script_addrs;
+	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+		saddr_arr[i] = -EINVAL;
+
 	if (of_id)
 		pdev->id_entry = of_id->data;
 	sdma->devtype = pdev->id_entry->driver_data;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 8a3fdd8..9e96c43 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -115,16 +115,15 @@
 
 /**
  * dmac1_mask_periphral_intr -	mask the periphral interrupt
- * @midc: dma channel for which masking is required
+ * @mid: dma device for which masking is required
  *
  * Masks the DMA periphral interrupt
  * this is valid for DMAC1 family controllers only
  * This controller should have periphral mask registers already mapped
  */
-static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
+static void dmac1_mask_periphral_intr(struct middma_device *mid)
 {
 	u32 pimr;
-	struct middma_device *mid = to_middma_device(midc->chan.device);
 
 	if (mid->pimr_mask) {
 		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
@@ -184,7 +183,6 @@
 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
 {
 	/*Check LPE PISR, make sure fwd is disabled*/
-	dmac1_mask_periphral_intr(midc);
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
@@ -1114,7 +1112,6 @@
 
 		midch->chan.device = &dma->common;
 		midch->chan.cookie =  1;
-		midch->chan.chan_id = i;
 		midch->ch_id = dma->chan_base + i;
 		pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
 
@@ -1150,7 +1147,6 @@
 	dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
 	dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
 	dma->common.dev = &pdev->dev;
-	dma->common.chancnt = dma->max_chan;
 
 	dma->common.device_alloc_chan_resources =
 					intel_mid_dma_alloc_chan_resources;
@@ -1350,6 +1346,7 @@
 		if (device->ch[i].in_use)
 			return -EAGAIN;
 	}
+	dmac1_mask_periphral_intr(device);
 	device->state = SUSPENDED;
 	pci_save_state(pci);
 	pci_disable_device(pci);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index b9bae94..8ba4edc 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -741,7 +741,6 @@
 		mchan = &mdma->channels[i];
 
 		mchan->chan.device = dma;
-		mchan->chan.chan_id = i;
 		mchan->chan.cookie = 1;
 		mchan->completed_cookie = mchan->chan.cookie;
 
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index be641cb..b4588bd 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -130,6 +130,23 @@
 	struct mxs_dma_chan		mxs_chans[MXS_DMA_CHANNELS];
 };
 
+static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
+{
+	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+	int chan_id = mxs_chan->chan.chan_id;
+	int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
+
+	/* enable apbh channel clock */
+	if (dma_is_apbh()) {
+		if (apbh_is_old())
+			writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+				mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+		else
+			writel(1 << chan_id,
+				mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+	}
+}
+
 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 {
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,38 +165,21 @@
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int chan_id = mxs_chan->chan.chan_id;
 
+	/* clkgate needs to be enabled before writing other registers */
+	mxs_dma_clkgate(mxs_chan, 1);
+
 	/* set cmd_addr up */
 	writel(mxs_chan->ccw_phys,
 		mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
 
-	/* enable apbh channel clock */
-	if (dma_is_apbh()) {
-		if (apbh_is_old())
-			writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
-				mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
-		else
-			writel(1 << chan_id,
-				mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
-	}
-
 	/* write 1 to SEMA to kick off the channel */
 	writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
 }
 
 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
 {
-	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-	int chan_id = mxs_chan->chan.chan_id;
-
 	/* disable apbh channel clock */
-	if (dma_is_apbh()) {
-		if (apbh_is_old())
-			writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
-				mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
-		else
-			writel(1 << chan_id,
-				mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
-	}
+	mxs_dma_clkgate(mxs_chan, 0);
 
 	mxs_chan->status = DMA_SUCCESS;
 }
@@ -338,7 +338,10 @@
 	if (ret)
 		goto err_clk;
 
+	/* clkgate needs to be enabled for reset to finish */
+	mxs_dma_clkgate(mxs_chan, 1);
 	mxs_dma_reset_chan(mxs_chan);
+	mxs_dma_clkgate(mxs_chan, 0);
 
 	dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
 	mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1ac8d4b..a6d0e3d 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -60,7 +60,7 @@
 #define DMA_DESC_FOLLOW_WITHOUT_IRQ	0x2
 #define DMA_DESC_FOLLOW_WITH_IRQ	0x3
 
-#define MAX_CHAN_NR			8
+#define MAX_CHAN_NR			12
 
 #define DMA_MASK_CTL0_MODE	0x33333333
 #define DMA_MASK_CTL2_MODE	0x00003333
@@ -872,8 +872,7 @@
 	int i;
 
 	nr_channels = id->driver_data;
-	pd = kzalloc(sizeof(struct pch_dma)+
-		sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 	if (!pd)
 		return -ENOMEM;
 
@@ -926,7 +925,6 @@
 	}
 
 	pd->dma.dev = &pdev->dev;
-	pd->dma.chancnt = nr_channels;
 
 	INIT_LIST_HEAD(&pd->dma.channels);
 
@@ -935,7 +933,6 @@
 
 		pd_chan->chan.device = &pd->dma;
 		pd_chan->chan.cookie = 1;
-		pd_chan->chan.chan_id = i;
 
 		pd_chan->membase = &regs->desc[i];
 
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 00eee59..57104147 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -17,6 +17,8 @@
 #include <linux/interrupt.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl330.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
 
 #define NR_DEFAULT_DESC	16
 
@@ -68,6 +70,14 @@
 	 * NULL if the channel is available to be acquired.
 	 */
 	void *pl330_chid;
+
+	/* For D-to-M and M-to-D channels */
+	int burst_sz; /* the peripheral fifo width */
+	int burst_len; /* the number of burst */
+	dma_addr_t fifo_addr;
+
+	/* for cyclic capability */
+	bool cyclic;
 };
 
 struct dma_pl330_dmac {
@@ -83,6 +93,8 @@
 
 	/* Peripheral channels connected to this DMAC */
 	struct dma_pl330_chan *peripherals; /* keep at end */
+
+	struct clk *clk;
 };
 
 struct dma_pl330_desc {
@@ -152,6 +164,31 @@
 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 }
 
+static inline void handle_cyclic_desc_list(struct list_head *list)
+{
+	struct dma_pl330_desc *desc;
+	struct dma_pl330_chan *pch;
+	unsigned long flags;
+
+	if (list_empty(list))
+		return;
+
+	list_for_each_entry(desc, list, node) {
+		dma_async_tx_callback callback;
+
+		/* Change status to reload it */
+		desc->status = PREP;
+		pch = desc->pchan;
+		callback = desc->txd.callback;
+		if (callback)
+			callback(desc->txd.callback_param);
+	}
+
+	spin_lock_irqsave(&pch->lock, flags);
+	list_splice_tail_init(list, &pch->work_list);
+	spin_unlock_irqrestore(&pch->lock, flags);
+}
+
 static inline void fill_queue(struct dma_pl330_chan *pch)
 {
 	struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@
 
 	spin_unlock_irqrestore(&pch->lock, flags);
 
-	free_desc_list(&list);
+	if (pch->cyclic)
+		handle_cyclic_desc_list(&list);
+	else
+		free_desc_list(&list);
 }
 
 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@
 	spin_lock_irqsave(&pch->lock, flags);
 
 	pch->completed = chan->cookie = 1;
+	pch->cyclic = false;
 
 	pch->pl330_chid = pl330_request_channel(&pdmac->pif);
 	if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@
 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
-	struct dma_pl330_desc *desc;
+	struct dma_pl330_desc *desc, *_dt;
 	unsigned long flags;
+	struct dma_pl330_dmac *pdmac = pch->dmac;
+	struct dma_slave_config *slave_config;
+	LIST_HEAD(list);
 
-	/* Only supports DMA_TERMINATE_ALL */
-	if (cmd != DMA_TERMINATE_ALL)
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		spin_lock_irqsave(&pch->lock, flags);
+
+		/* FLUSH the PL330 Channel thread */
+		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+
+		/* Mark all desc done */
+		list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
+			desc->status = DONE;
+			pch->completed = desc->txd.cookie;
+			list_move_tail(&desc->node, &list);
+		}
+
+		list_splice_tail_init(&list, &pdmac->desc_pool);
+		spin_unlock_irqrestore(&pch->lock, flags);
+		break;
+	case DMA_SLAVE_CONFIG:
+		slave_config = (struct dma_slave_config *)arg;
+
+		if (slave_config->direction == DMA_TO_DEVICE) {
+			if (slave_config->dst_addr)
+				pch->fifo_addr = slave_config->dst_addr;
+			if (slave_config->dst_addr_width)
+				pch->burst_sz = __ffs(slave_config->dst_addr_width);
+			if (slave_config->dst_maxburst)
+				pch->burst_len = slave_config->dst_maxburst;
+		} else if (slave_config->direction == DMA_FROM_DEVICE) {
+			if (slave_config->src_addr)
+				pch->fifo_addr = slave_config->src_addr;
+			if (slave_config->src_addr_width)
+				pch->burst_sz = __ffs(slave_config->src_addr_width);
+			if (slave_config->src_maxburst)
+				pch->burst_len = slave_config->src_maxburst;
+		}
+		break;
+	default:
+		dev_err(pch->dmac->pif.dev, "Not supported command.\n");
 		return -ENXIO;
-
-	spin_lock_irqsave(&pch->lock, flags);
-
-	/* FLUSH the PL330 Channel thread */
-	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
-
-	/* Mark all desc done */
-	list_for_each_entry(desc, &pch->work_list, node)
-		desc->status = DONE;
-
-	spin_unlock_irqrestore(&pch->lock, flags);
-
-	pl330_tasklet((unsigned long) pch);
+	}
 
 	return 0;
 }
@@ -288,6 +356,9 @@
 	pl330_release_channel(pch->pl330_chid);
 	pch->pl330_chid = NULL;
 
+	if (pch->cyclic)
+		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
+
 	spin_unlock_irqrestore(&pch->lock, flags);
 }
 
@@ -453,7 +524,7 @@
 
 	if (peri) {
 		desc->req.rqtype = peri->rqtype;
-		desc->req.peri = peri->peri_id;
+		desc->req.peri = pch->chan.chan_id;
 	} else {
 		desc->req.rqtype = MEMTOMEM;
 		desc->req.peri = 0;
@@ -524,6 +595,51 @@
 	return burst_len;
 }
 
+static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
+		size_t period_len, enum dma_data_direction direction)
+{
+	struct dma_pl330_desc *desc;
+	struct dma_pl330_chan *pch = to_pchan(chan);
+	dma_addr_t dst;
+	dma_addr_t src;
+
+	desc = pl330_get_desc(pch);
+	if (!desc) {
+		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+			__func__, __LINE__);
+		return NULL;
+	}
+
+	switch (direction) {
+	case DMA_TO_DEVICE:
+		desc->rqcfg.src_inc = 1;
+		desc->rqcfg.dst_inc = 0;
+		src = dma_addr;
+		dst = pch->fifo_addr;
+		break;
+	case DMA_FROM_DEVICE:
+		desc->rqcfg.src_inc = 0;
+		desc->rqcfg.dst_inc = 1;
+		src = pch->fifo_addr;
+		dst = dma_addr;
+		break;
+	default:
+		dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
+		__func__, __LINE__);
+		return NULL;
+	}
+
+	desc->rqcfg.brst_size = pch->burst_sz;
+	desc->rqcfg.brst_len = 1;
+
+	pch->cyclic = true;
+
+	fill_px(&desc->px, dst, src, period_len);
+
+	return &desc->txd;
+}
+
 static struct dma_async_tx_descriptor *
 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
 		dma_addr_t src, size_t len, unsigned long flags)
@@ -579,7 +695,7 @@
 	struct dma_pl330_peri *peri = chan->private;
 	struct scatterlist *sg;
 	unsigned long flags;
-	int i, burst_size;
+	int i;
 	dma_addr_t addr;
 
 	if (unlikely(!pch || !sgl || !sg_len || !peri))
@@ -595,8 +711,7 @@
 		return NULL;
 	}
 
-	addr = peri->fifo_addr;
-	burst_size = peri->burst_sz;
+	addr = pch->fifo_addr;
 
 	first = NULL;
 
@@ -644,7 +759,7 @@
 				sg_dma_address(sg), addr, sg_dma_len(sg));
 		}
 
-		desc->rqcfg.brst_size = burst_size;
+		desc->rqcfg.brst_size = pch->burst_sz;
 		desc->rqcfg.brst_len = 1;
 	}
 
@@ -696,6 +811,30 @@
 		goto probe_err1;
 	}
 
+	pdmac->clk = clk_get(&adev->dev, "dma");
+	if (IS_ERR(pdmac->clk)) {
+		dev_err(&adev->dev, "Cannot get operation clock.\n");
+		ret = -EINVAL;
+		goto probe_err1;
+	}
+
+	amba_set_drvdata(adev, pdmac);
+
+#ifdef CONFIG_PM_RUNTIME
+	/* to use the runtime PM helper functions */
+	pm_runtime_enable(&adev->dev);
+
+	/* enable the power domain */
+	if (pm_runtime_get_sync(&adev->dev)) {
+		dev_err(&adev->dev, "failed to get runtime pm\n");
+		ret = -ENODEV;
+		goto probe_err1;
+	}
+#else
+	/* enable dma clk */
+	clk_enable(pdmac->clk);
+#endif
+
 	irq = adev->irq[0];
 	ret = request_irq(irq, pl330_irq_handler, 0,
 			dev_name(&adev->dev), pi);
@@ -732,6 +871,7 @@
 			case MEMTODEV:
 			case DEVTOMEM:
 				dma_cap_set(DMA_SLAVE, pd->cap_mask);
+				dma_cap_set(DMA_CYCLIC, pd->cap_mask);
 				break;
 			default:
 				dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -747,11 +887,9 @@
 		spin_lock_init(&pch->lock);
 		pch->pl330_chid = NULL;
 		pch->chan.device = pd;
-		pch->chan.chan_id = i;
 		pch->dmac = pdmac;
 
 		/* Add the channel to the DMAC list */
-		pd->chancnt++;
 		list_add_tail(&pch->chan.device_node, &pd->channels);
 	}
 
@@ -760,6 +898,7 @@
 	pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
 	pd->device_free_chan_resources = pl330_free_chan_resources;
 	pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
+	pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
 	pd->device_tx_status = pl330_tx_status;
 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
 	pd->device_control = pl330_control;
@@ -771,8 +910,6 @@
 		goto probe_err4;
 	}
 
-	amba_set_drvdata(adev, pdmac);
-
 	dev_info(&adev->dev,
 		"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
 	dev_info(&adev->dev,
@@ -833,6 +970,13 @@
 	res = &adev->res;
 	release_mem_region(res->start, resource_size(res));
 
+#ifdef CONFIG_PM_RUNTIME
+	pm_runtime_put(&adev->dev);
+	pm_runtime_disable(&adev->dev);
+#else
+	clk_disable(pdmac->clk);
+#endif
+
 	kfree(pdmac);
 
 	return 0;
@@ -846,10 +990,49 @@
 	{ 0, 0 },
 };
 
+#ifdef CONFIG_PM_RUNTIME
+static int pl330_runtime_suspend(struct device *dev)
+{
+	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
+
+	if (!pdmac) {
+		dev_err(dev, "failed to get dmac\n");
+		return -ENODEV;
+	}
+
+	clk_disable(pdmac->clk);
+
+	return 0;
+}
+
+static int pl330_runtime_resume(struct device *dev)
+{
+	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
+
+	if (!pdmac) {
+		dev_err(dev, "failed to get dmac\n");
+		return -ENODEV;
+	}
+
+	clk_enable(pdmac->clk);
+
+	return 0;
+}
+#else
+#define pl330_runtime_suspend	NULL
+#define pl330_runtime_resume	NULL
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops pl330_pm_ops = {
+	.runtime_suspend = pl330_runtime_suspend,
+	.runtime_resume = pl330_runtime_resume,
+};
+
 static struct amba_driver pl330_driver = {
 	.drv = {
 		.owner = THIS_MODULE,
 		.name = "dma-pl330",
+		.pm = &pl330_pm_ops,
 	},
 	.id_table = pl330_ids,
 	.probe = pl330_probe,
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 7f49235..81809c2 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -259,14 +259,23 @@
 	return 0;
 }
 
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
 	struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+	struct sh_dmae_slave *param = tx->chan->private;
 	dma_async_tx_callback callback = tx->callback;
 	dma_cookie_t cookie;
+	bool power_up;
 
-	spin_lock_bh(&sh_chan->desc_lock);
+	spin_lock_irq(&sh_chan->desc_lock);
+
+	if (list_empty(&sh_chan->ld_queue))
+		power_up = true;
+	else
+		power_up = false;
 
 	cookie = sh_chan->common.cookie;
 	cookie++;
@@ -302,7 +311,38 @@
 		tx->cookie, &last->async_tx, sh_chan->id,
 		desc->hw.sar, desc->hw.tcr, desc->hw.dar);
 
-	spin_unlock_bh(&sh_chan->desc_lock);
+	if (power_up) {
+		sh_chan->pm_state = DMAE_PM_BUSY;
+
+		pm_runtime_get(sh_chan->dev);
+
+		spin_unlock_irq(&sh_chan->desc_lock);
+
+		pm_runtime_barrier(sh_chan->dev);
+
+		spin_lock_irq(&sh_chan->desc_lock);
+
+		/* Have we been reset, while waiting? */
+		if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
+			dev_dbg(sh_chan->dev, "Bring up channel %d\n",
+				sh_chan->id);
+			if (param) {
+				const struct sh_dmae_slave_config *cfg =
+					param->config;
+
+				dmae_set_dmars(sh_chan, cfg->mid_rid);
+				dmae_set_chcr(sh_chan, cfg->chcr);
+			} else {
+				dmae_init(sh_chan);
+			}
+
+			if (sh_chan->pm_state == DMAE_PM_PENDING)
+				sh_chan_xfer_ld_queue(sh_chan);
+			sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+		}
+	}
+
+	spin_unlock_irq(&sh_chan->desc_lock);
 
 	return cookie;
 }
@@ -346,8 +386,6 @@
 	struct sh_dmae_slave *param = chan->private;
 	int ret;
 
-	pm_runtime_get_sync(sh_chan->dev);
-
 	/*
 	 * This relies on the guarantee from dmaengine that alloc_chan_resources
 	 * never runs concurrently with itself or free_chan_resources.
@@ -367,31 +405,20 @@
 		}
 
 		param->config = cfg;
-
-		dmae_set_dmars(sh_chan, cfg->mid_rid);
-		dmae_set_chcr(sh_chan, cfg->chcr);
-	} else {
-		dmae_init(sh_chan);
 	}
 
-	spin_lock_bh(&sh_chan->desc_lock);
 	while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
-		spin_unlock_bh(&sh_chan->desc_lock);
 		desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
-		if (!desc) {
-			spin_lock_bh(&sh_chan->desc_lock);
+		if (!desc)
 			break;
-		}
 		dma_async_tx_descriptor_init(&desc->async_tx,
 					&sh_chan->common);
 		desc->async_tx.tx_submit = sh_dmae_tx_submit;
 		desc->mark = DESC_IDLE;
 
-		spin_lock_bh(&sh_chan->desc_lock);
 		list_add(&desc->node, &sh_chan->ld_free);
 		sh_chan->descs_allocated++;
 	}
-	spin_unlock_bh(&sh_chan->desc_lock);
 
 	if (!sh_chan->descs_allocated) {
 		ret = -ENOMEM;
@@ -405,7 +432,7 @@
 		clear_bit(param->slave_id, sh_dmae_slave_used);
 etestused:
 efindslave:
-	pm_runtime_put(sh_chan->dev);
+	chan->private = NULL;
 	return ret;
 }
 
@@ -417,7 +444,6 @@
 	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
 	struct sh_desc *desc, *_desc;
 	LIST_HEAD(list);
-	int descs = sh_chan->descs_allocated;
 
 	/* Protect against ISR */
 	spin_lock_irq(&sh_chan->desc_lock);
@@ -437,15 +463,12 @@
 		chan->private = NULL;
 	}
 
-	spin_lock_bh(&sh_chan->desc_lock);
+	spin_lock_irq(&sh_chan->desc_lock);
 
 	list_splice_init(&sh_chan->ld_free, &list);
 	sh_chan->descs_allocated = 0;
 
-	spin_unlock_bh(&sh_chan->desc_lock);
-
-	if (descs > 0)
-		pm_runtime_put(sh_chan->dev);
+	spin_unlock_irq(&sh_chan->desc_lock);
 
 	list_for_each_entry_safe(desc, _desc, &list, node)
 		kfree(desc);
@@ -534,6 +557,7 @@
 	struct sh_desc *first = NULL, *new = NULL /* compiler... */;
 	LIST_HEAD(tx_list);
 	int chunks = 0;
+	unsigned long irq_flags;
 	int i;
 
 	if (!sg_len)
@@ -544,7 +568,7 @@
 			(SH_DMA_TCR_MAX + 1);
 
 	/* Have to lock the whole loop to protect against concurrent release */
-	spin_lock_bh(&sh_chan->desc_lock);
+	spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
 
 	/*
 	 * Chaining:
@@ -590,7 +614,7 @@
 	/* Put them back on the free list, so, they don't get lost */
 	list_splice_tail(&tx_list, &sh_chan->ld_free);
 
-	spin_unlock_bh(&sh_chan->desc_lock);
+	spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
 
 	return &first->async_tx;
 
@@ -599,7 +623,7 @@
 		new->mark = DESC_IDLE;
 	list_splice(&tx_list, &sh_chan->ld_free);
 
-	spin_unlock_bh(&sh_chan->desc_lock);
+	spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
 
 	return NULL;
 }
@@ -661,6 +685,7 @@
 			   unsigned long arg)
 {
 	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+	unsigned long flags;
 
 	/* Only supports DMA_TERMINATE_ALL */
 	if (cmd != DMA_TERMINATE_ALL)
@@ -669,7 +694,7 @@
 	if (!chan)
 		return -EINVAL;
 
-	spin_lock_bh(&sh_chan->desc_lock);
+	spin_lock_irqsave(&sh_chan->desc_lock, flags);
 	dmae_halt(sh_chan);
 
 	if (!list_empty(&sh_chan->ld_queue)) {
@@ -678,9 +703,8 @@
 						  struct sh_desc, node);
 		desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
 			sh_chan->xmit_shift;
-
 	}
-	spin_unlock_bh(&sh_chan->desc_lock);
+	spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
 
 	sh_dmae_chan_ld_cleanup(sh_chan, true);
 
@@ -695,8 +719,9 @@
 	dma_cookie_t cookie = 0;
 	dma_async_tx_callback callback = NULL;
 	void *param = NULL;
+	unsigned long flags;
 
-	spin_lock_bh(&sh_chan->desc_lock);
+	spin_lock_irqsave(&sh_chan->desc_lock, flags);
 	list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
 		struct dma_async_tx_descriptor *tx = &desc->async_tx;
 
@@ -762,7 +787,13 @@
 		     async_tx_test_ack(&desc->async_tx)) || all) {
 			/* Remove from ld_queue list */
 			desc->mark = DESC_IDLE;
+
 			list_move(&desc->node, &sh_chan->ld_free);
+
+			if (list_empty(&sh_chan->ld_queue)) {
+				dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
+				pm_runtime_put(sh_chan->dev);
+			}
 		}
 	}
 
@@ -773,7 +804,7 @@
 		 */
 		sh_chan->completed_cookie = sh_chan->common.cookie;
 
-	spin_unlock_bh(&sh_chan->desc_lock);
+	spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
 
 	if (callback)
 		callback(param);
@@ -792,14 +823,14 @@
 		;
 }
 
+/* Called under spin_lock_irq(&sh_chan->desc_lock) */
 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
 {
 	struct sh_desc *desc;
 
-	spin_lock_bh(&sh_chan->desc_lock);
 	/* DMA work check */
 	if (dmae_is_busy(sh_chan))
-		goto sh_chan_xfer_ld_queue_end;
+		return;
 
 	/* Find the first not transferred descriptor */
 	list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -812,15 +843,18 @@
 			dmae_start(sh_chan);
 			break;
 		}
-
-sh_chan_xfer_ld_queue_end:
-	spin_unlock_bh(&sh_chan->desc_lock);
 }
 
 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
 {
 	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
-	sh_chan_xfer_ld_queue(sh_chan);
+
+	spin_lock_irq(&sh_chan->desc_lock);
+	if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
+		sh_chan_xfer_ld_queue(sh_chan);
+	else
+		sh_chan->pm_state = DMAE_PM_PENDING;
+	spin_unlock_irq(&sh_chan->desc_lock);
 }
 
 static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
@@ -831,6 +865,7 @@
 	dma_cookie_t last_used;
 	dma_cookie_t last_complete;
 	enum dma_status status;
+	unsigned long flags;
 
 	sh_dmae_chan_ld_cleanup(sh_chan, false);
 
@@ -841,7 +876,7 @@
 	BUG_ON(last_complete < 0);
 	dma_set_tx_state(txstate, last_complete, last_used, 0);
 
-	spin_lock_bh(&sh_chan->desc_lock);
+	spin_lock_irqsave(&sh_chan->desc_lock, flags);
 
 	status = dma_async_is_complete(cookie, last_complete, last_used);
 
@@ -859,7 +894,7 @@
 			}
 	}
 
-	spin_unlock_bh(&sh_chan->desc_lock);
+	spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
 
 	return status;
 }
@@ -912,6 +947,12 @@
 
 		list_splice_init(&sh_chan->ld_queue, &dl);
 
+		if (!list_empty(&dl)) {
+			dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
+			pm_runtime_put(sh_chan->dev);
+		}
+		sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+
 		spin_unlock(&sh_chan->desc_lock);
 
 		/* Complete all  */
@@ -952,7 +993,7 @@
 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
 	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
 
-	spin_lock(&sh_chan->desc_lock);
+	spin_lock_irq(&sh_chan->desc_lock);
 	list_for_each_entry(desc, &sh_chan->ld_queue, node) {
 		if (desc->mark == DESC_SUBMITTED &&
 		    ((desc->direction == DMA_FROM_DEVICE &&
@@ -965,10 +1006,10 @@
 			break;
 		}
 	}
-	spin_unlock(&sh_chan->desc_lock);
-
 	/* Next desc */
 	sh_chan_xfer_ld_queue(sh_chan);
+	spin_unlock_irq(&sh_chan->desc_lock);
+
 	sh_dmae_chan_ld_cleanup(sh_chan, false);
 }
 
@@ -1036,7 +1077,9 @@
 		return -ENOMEM;
 	}
 
-	/* copy struct dma_device */
+	new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+
+	/* reference struct dma_device */
 	new_sh_chan->common.device = &shdev->common;
 
 	new_sh_chan->dev = shdev->common.dev;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index dc56576..2b55a27 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -23,6 +23,12 @@
 
 struct device;
 
+enum dmae_pm_state {
+	DMAE_PM_ESTABLISHED,
+	DMAE_PM_BUSY,
+	DMAE_PM_PENDING,
+};
+
 struct sh_dmae_chan {
 	dma_cookie_t completed_cookie;	/* The maximum cookie completed */
 	spinlock_t desc_lock;		/* Descriptor operation lock */
@@ -38,6 +44,7 @@
 	u32 __iomem *base;
 	char dev_id[16];		/* unique name per DMAC of channel */
 	int pm_error;
+	enum dmae_pm_state pm_state;
 };
 
 struct sh_dmae_device {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index f69f90a6..a4a398f 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -753,7 +753,7 @@
 
 	INIT_LIST_HEAD(&td->dma.channels);
 
-	for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+	for (i = 0; i < pdata->nr_channels; i++) {
 		struct timb_dma_chan *td_chan = &td->channels[i];
 		struct timb_dma_platform_data_channel *pchan =
 			pdata->channels + i;
@@ -762,12 +762,11 @@
 		if ((i % 2) == pchan->rx) {
 			dev_err(&pdev->dev, "Wrong channel configuration\n");
 			err = -EINVAL;
-			goto err_tasklet_kill;
+			goto err_free_irq;
 		}
 
 		td_chan->chan.device = &td->dma;
 		td_chan->chan.cookie = 1;
-		td_chan->chan.chan_id = i;
 		spin_lock_init(&td_chan->lock);
 		INIT_LIST_HEAD(&td_chan->active_list);
 		INIT_LIST_HEAD(&td_chan->queue);
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index f1b7f65..e229576 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -151,7 +151,8 @@
 		p += scnprintf(p, left, "\tbase_address: %x\n",
 			     info->params.interface_path.isa.base_address);
 	} else if (!strncmp(info->params.host_bus_type, "PCIX", 4) ||
-		   !strncmp(info->params.host_bus_type, "PCI", 3)) {
+		   !strncmp(info->params.host_bus_type, "PCI", 3) ||
+		   !strncmp(info->params.host_bus_type, "XPRS", 4)) {
 		p += scnprintf(p, left,
 			     "\t%02x:%02x.%d  channel: %u\n",
 			     info->params.interface_path.pci.bus,
@@ -159,7 +160,6 @@
 			     info->params.interface_path.pci.function,
 			     info->params.interface_path.pci.channel);
 	} else if (!strncmp(info->params.host_bus_type, "IBND", 4) ||
-		   !strncmp(info->params.host_bus_type, "XPRS", 4) ||
 		   !strncmp(info->params.host_bus_type, "HTPT", 4)) {
 		p += scnprintf(p, left,
 			     "\tTBD: %llx\n",
@@ -668,7 +668,7 @@
 {
 	struct edd_info *info = edd_dev_get_info(edev);
 
-	if (edd_dev_is_type(edev, "PCI")) {
+	if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
 		return pci_get_bus_and_slot(info->params.interface_path.pci.bus,
 				     PCI_DEVFN(info->params.interface_path.pci.slot,
 					       info->params.interface_path.pci.
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6b6616a..4720f68 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -192,9 +192,6 @@
 	/* forced geometry settings */
 	struct hd_geometry geometry;
 
-	/* For saving the address of __make_request for request based dm */
-	make_request_fn *saved_make_request_fn;
-
 	/* sysfs handle */
 	struct kobject kobj;
 
@@ -1403,7 +1400,7 @@
  * The request function that just remaps the bio built up by
  * dm_merge_bvec.
  */
-static int _dm_request(struct request_queue *q, struct bio *bio)
+static void _dm_request(struct request_queue *q, struct bio *bio)
 {
 	int rw = bio_data_dir(bio);
 	struct mapped_device *md = q->queuedata;
@@ -1424,19 +1421,12 @@
 			queue_io(md, bio);
 		else
 			bio_io_error(bio);
-		return 0;
+		return;
 	}
 
 	__split_and_process_bio(md, bio);
 	up_read(&md->io_lock);
-	return 0;
-}
-
-static int dm_make_request(struct request_queue *q, struct bio *bio)
-{
-	struct mapped_device *md = q->queuedata;
-
-	return md->saved_make_request_fn(q, bio); /* call __make_request() */
+	return;
 }
 
 static int dm_request_based(struct mapped_device *md)
@@ -1444,14 +1434,14 @@
 	return blk_queue_stackable(md->queue);
 }
 
-static int dm_request(struct request_queue *q, struct bio *bio)
+static void dm_request(struct request_queue *q, struct bio *bio)
 {
 	struct mapped_device *md = q->queuedata;
 
 	if (dm_request_based(md))
-		return dm_make_request(q, bio);
-
-	return _dm_request(q, bio);
+		blk_queue_bio(q, bio);
+	else
+		_dm_request(q, bio);
 }
 
 void dm_dispatch_request(struct request *rq)
@@ -2191,7 +2181,6 @@
 		return 0;
 
 	md->queue = q;
-	md->saved_make_request_fn = md->queue->make_request_fn;
 	dm_init_md_queue(md);
 	blk_queue_softirq_done(md->queue, dm_softirq_done);
 	blk_queue_prep_rq(md->queue, dm_prep_fn);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 60816b1..918fb8a 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,7 +169,7 @@
 		conf->nfaults = n+1;
 }
 
-static int make_request(struct mddev *mddev, struct bio *bio)
+static void make_request(struct mddev *mddev, struct bio *bio)
 {
 	struct faulty_conf *conf = mddev->private;
 	int failit = 0;
@@ -181,7 +181,7 @@
 			 * just fail immediately
 			 */
 			bio_endio(bio, -EIO);
-			return 0;
+			return;
 		}
 
 		if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
@@ -211,15 +211,15 @@
 	}
 	if (failit) {
 		struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
+
 		b->bi_bdev = conf->rdev->bdev;
 		b->bi_private = bio;
 		b->bi_end_io = faulty_fail;
-		generic_make_request(b);
-		return 0;
-	} else {
+		bio = b;
+	} else
 		bio->bi_bdev = conf->rdev->bdev;
-		return 1;
-	}
+
+	generic_make_request(bio);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 10c5844..a820358 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -264,14 +264,14 @@
 	return 0;
 }
 
-static int linear_make_request (struct mddev *mddev, struct bio *bio)
+static void linear_make_request(struct mddev *mddev, struct bio *bio)
 {
 	struct dev_info *tmp_dev;
 	sector_t start_sector;
 
 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bio);
-		return 0;
+		return;
 	}
 
 	rcu_read_lock();
@@ -293,7 +293,7 @@
 		       (unsigned long long)start_sector);
 		rcu_read_unlock();
 		bio_io_error(bio);
-		return 0;
+		return;
 	}
 	if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
 		     tmp_dev->end_sector)) {
@@ -307,20 +307,17 @@
 
 		bp = bio_split(bio, end_sector - bio->bi_sector);
 
-		if (linear_make_request(mddev, &bp->bio1))
-			generic_make_request(&bp->bio1);
-		if (linear_make_request(mddev, &bp->bio2))
-			generic_make_request(&bp->bio2);
+		linear_make_request(mddev, &bp->bio1);
+		linear_make_request(mddev, &bp->bio2);
 		bio_pair_release(bp);
-		return 0;
+		return;
 	}
 		    
 	bio->bi_bdev = tmp_dev->rdev->bdev;
 	bio->bi_sector = bio->bi_sector - start_sector
 		+ tmp_dev->rdev->data_offset;
 	rcu_read_unlock();
-
-	return 1;
+	generic_make_request(bio);
 }
 
 static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 266e82e..2acb328 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -332,18 +332,17 @@
  * call has finished, the bio has been linked into some internal structure
  * and so is visible to ->quiesce(), so we don't need the refcount any more.
  */
-static int md_make_request(struct request_queue *q, struct bio *bio)
+static void md_make_request(struct request_queue *q, struct bio *bio)
 {
 	const int rw = bio_data_dir(bio);
 	struct mddev *mddev = q->queuedata;
-	int rv;
 	int cpu;
 	unsigned int sectors;
 
 	if (mddev == NULL || mddev->pers == NULL
 	    || !mddev->ready) {
 		bio_io_error(bio);
-		return 0;
+		return;
 	}
 	smp_rmb(); /* Ensure implications of  'active' are visible */
 	rcu_read_lock();
@@ -368,7 +367,7 @@
 	 * go away inside make_request
 	 */
 	sectors = bio_sectors(bio);
-	rv = mddev->pers->make_request(mddev, bio);
+	mddev->pers->make_request(mddev, bio);
 
 	cpu = part_stat_lock();
 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -377,8 +376,6 @@
 
 	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
 		wake_up(&mddev->sb_wait);
-
-	return rv;
 }
 
 /* mddev_suspend makes sure no new requests are submitted
@@ -477,8 +474,7 @@
 		bio_endio(bio, 0);
 	else {
 		bio->bi_rw &= ~REQ_FLUSH;
-		if (mddev->pers->make_request(mddev, bio))
-			generic_make_request(bio);
+		mddev->pers->make_request(mddev, bio);
 	}
 
 	mddev->flush_bio = NULL;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 51c1d91..cf742d9 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -419,7 +419,7 @@
 	int level;
 	struct list_head list;
 	struct module *owner;
-	int (*make_request)(struct mddev *mddev, struct bio *bio);
+	void (*make_request)(struct mddev *mddev, struct bio *bio);
 	int (*run)(struct mddev *mddev);
 	int (*stop)(struct mddev *mddev);
 	void (*status)(struct seq_file *seq, struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d32c785..ad20a28 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,7 +106,7 @@
 	rdev_dec_pending(rdev, conf->mddev);
 }
 
-static int multipath_make_request(struct mddev *mddev, struct bio * bio)
+static void multipath_make_request(struct mddev *mddev, struct bio * bio)
 {
 	struct mpconf *conf = mddev->private;
 	struct multipath_bh * mp_bh;
@@ -114,7 +114,7 @@
 
 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bio);
-		return 0;
+		return;
 	}
 
 	mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +126,7 @@
 	if (mp_bh->path < 0) {
 		bio_endio(bio, -EIO);
 		mempool_free(mp_bh, conf->pool);
-		return 0;
+		return;
 	}
 	multipath = conf->multipaths + mp_bh->path;
 
@@ -137,7 +137,7 @@
 	mp_bh->bio.bi_end_io = multipath_end_request;
 	mp_bh->bio.bi_private = mp_bh;
 	generic_make_request(&mp_bh->bio);
-	return 0;
+	return;
 }
 
 static void multipath_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0eb08a4..27e19e2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -468,7 +468,7 @@
 	}
 }
 
-static int raid0_make_request(struct mddev *mddev, struct bio *bio)
+static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 {
 	unsigned int chunk_sects;
 	sector_t sector_offset;
@@ -477,7 +477,7 @@
 
 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bio);
-		return 0;
+		return;
 	}
 
 	chunk_sects = mddev->chunk_sectors;
@@ -497,13 +497,10 @@
 		else
 			bp = bio_split(bio, chunk_sects -
 				       sector_div(sector, chunk_sects));
-		if (raid0_make_request(mddev, &bp->bio1))
-			generic_make_request(&bp->bio1);
-		if (raid0_make_request(mddev, &bp->bio2))
-			generic_make_request(&bp->bio2);
-
+		raid0_make_request(mddev, &bp->bio1);
+		raid0_make_request(mddev, &bp->bio2);
 		bio_pair_release(bp);
-		return 0;
+		return;
 	}
 
 	sector_offset = bio->bi_sector;
@@ -513,10 +510,9 @@
 	bio->bi_bdev = tmp_dev->bdev;
 	bio->bi_sector = sector_offset + zone->dev_start +
 		tmp_dev->data_offset;
-	/*
-	 * Let the main block layer submit the IO and resolve recursion:
-	 */
-	return 1;
+
+	generic_make_request(bio);
+	return;
 
 bad_map:
 	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
@@ -525,7 +521,7 @@
 	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 
 	bio_io_error(bio);
-	return 0;
+	return;
 }
 
 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4602fc5..cae8746 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -807,7 +807,7 @@
 	pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
 }
 
-static int make_request(struct mddev *mddev, struct bio * bio)
+static void make_request(struct mddev *mddev, struct bio * bio)
 {
 	struct r1conf *conf = mddev->private;
 	struct mirror_info *mirror;
@@ -892,7 +892,7 @@
 		if (rdisk < 0) {
 			/* couldn't find anywhere to read from */
 			raid_end_bio_io(r1_bio);
-			return 0;
+			return;
 		}
 		mirror = conf->mirrors + rdisk;
 
@@ -950,7 +950,7 @@
 			goto read_again;
 		} else
 			generic_make_request(read_bio);
-		return 0;
+		return;
 	}
 
 	/*
@@ -1151,8 +1151,6 @@
 
 	if (do_sync || !bitmap || !plugged)
 		md_wakeup_thread(mddev->thread);
-
-	return 0;
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2193,7 +2191,6 @@
 		bio->bi_next = NULL;
 		bio->bi_flags &= ~(BIO_POOL_MASK-1);
 		bio->bi_flags |= 1 << BIO_UPTODATE;
-		bio->bi_comp_cpu = -1;
 		bio->bi_rw = READ;
 		bio->bi_vcnt = 0;
 		bio->bi_idx = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c025a82..dde6dd4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -842,7 +842,7 @@
 	spin_unlock_irq(&conf->resync_lock);
 }
 
-static int make_request(struct mddev *mddev, struct bio * bio)
+static void make_request(struct mddev *mddev, struct bio * bio)
 {
 	struct r10conf *conf = mddev->private;
 	struct mirror_info *mirror;
@@ -861,7 +861,7 @@
 
 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bio);
-		return 0;
+		return;
 	}
 
 	/* If this request crosses a chunk boundary, we need to
@@ -893,10 +893,8 @@
 		conf->nr_waiting++;
 		spin_unlock_irq(&conf->resync_lock);
 
-		if (make_request(mddev, &bp->bio1))
-			generic_make_request(&bp->bio1);
-		if (make_request(mddev, &bp->bio2))
-			generic_make_request(&bp->bio2);
+		make_request(mddev, &bp->bio1);
+		make_request(mddev, &bp->bio2);
 
 		spin_lock_irq(&conf->resync_lock);
 		conf->nr_waiting--;
@@ -904,14 +902,14 @@
 		spin_unlock_irq(&conf->resync_lock);
 
 		bio_pair_release(bp);
-		return 0;
+		return;
 	bad_map:
 		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
 		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
 		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 
 		bio_io_error(bio);
-		return 0;
+		return;
 	}
 
 	md_write_start(mddev, bio);
@@ -954,7 +952,7 @@
 		slot = r10_bio->read_slot;
 		if (disk < 0) {
 			raid_end_bio_io(r10_bio);
-			return 0;
+			return;
 		}
 		mirror = conf->mirrors + disk;
 
@@ -1002,7 +1000,7 @@
 			goto read_again;
 		} else
 			generic_make_request(read_bio);
-		return 0;
+		return;
 	}
 
 	/*
@@ -1176,7 +1174,6 @@
 
 	if (do_sync || !mddev->bitmap || !plugged)
 		md_wakeup_thread(mddev->thread);
-	return 0;
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f6fe053..bb1b461 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3688,7 +3688,7 @@
 	return sh;
 }
 
-static int make_request(struct mddev *mddev, struct bio * bi)
+static void make_request(struct mddev *mddev, struct bio * bi)
 {
 	struct r5conf *conf = mddev->private;
 	int dd_idx;
@@ -3701,7 +3701,7 @@
 
 	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bi);
-		return 0;
+		return;
 	}
 
 	md_write_start(mddev, bi);
@@ -3709,7 +3709,7 @@
 	if (rw == READ &&
 	     mddev->reshape_position == MaxSector &&
 	     chunk_aligned_read(mddev,bi))
-		return 0;
+		return;
 
 	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
 	last_sector = bi->bi_sector + (bi->bi_size>>9);
@@ -3844,8 +3844,6 @@
 
 		bio_endio(bi, 0);
 	}
-
-	return 0;
 }
 
 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index d2856b6..720f993 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -913,9 +913,9 @@
 }
 
 static void s3cmci_dma_setup(struct s3cmci_host *host,
-			     enum s3c2410_dmasrc source)
+			     enum dma_data_direction source)
 {
-	static enum s3c2410_dmasrc last_source = -1;
+	static enum dma_data_direction last_source = -1;
 	static int setup_ok;
 
 	if (last_source == source)
@@ -1087,7 +1087,7 @@
 
 	BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
 
-	s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW);
+	s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 	s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
 
 	dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 9b43ae9..a5a55da2 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -27,7 +27,7 @@
 
 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
 static int dcssblk_release(struct gendisk *disk, fmode_t mode);
-static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
+static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
 static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
 				 void **kaddr, unsigned long *pfn);
 
@@ -814,7 +814,7 @@
 	return rc;
 }
 
-static int
+static void
 dcssblk_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct dcssblk_dev_info *dev_info;
@@ -871,10 +871,9 @@
 		bytes_done += bvec->bv_len;
 	}
 	bio_endio(bio, 0);
-	return 0;
+	return;
 fail:
 	bio_io_error(bio);
-	return 0;
 }
 
 static int
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 1f6a4d8..98f3e4a 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -181,7 +181,7 @@
 /*
  * Block device make request function.
  */
-static int xpram_make_request(struct request_queue *q, struct bio *bio)
+static void xpram_make_request(struct request_queue *q, struct bio *bio)
 {
 	xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
 	struct bio_vec *bvec;
@@ -221,10 +221,9 @@
 	}
 	set_bit(BIO_UPTODATE, &bio->bi_flags);
 	bio_endio(bio, 0);
-	return 0;
+	return;
 fail:
 	bio_io_error(bio);
-	return 0;
 }
 
 static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 63de1c7..049ea90 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.8"
+#define BNX2FC_VERSION		"1.0.9"
 
 #define PFX			"bnx2fc: "
 
@@ -145,6 +145,9 @@
 #define REC_RETRY_COUNT			1
 #define BNX2FC_NUM_ERR_BITS		63
 
+#define BNX2FC_RELOGIN_WAIT_TIME	200
+#define BNX2FC_RELOGIN_WAIT_CNT		10
+
 /* bnx2fc driver uses only one instance of fcoe_percpu_s */
 extern struct fcoe_percpu_s bnx2fc_global;
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index fd382fe..ce0ce3e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -268,17 +268,6 @@
 
 	orig_io_req = cb_arg->aborted_io_req;
 	srr_req = cb_arg->io_req;
-	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
-		BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
-			orig_io_req->xid);
-		goto srr_compl_done;
-	}
-	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
-		BNX2FC_IO_DBG(srr_req, "rec abts in prog "
-		       "orig_io - 0x%x\n",
-			orig_io_req->xid);
-		goto srr_compl_done;
-	}
 	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
 		/* SRR timedout */
 		BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
@@ -290,6 +279,12 @@
 				"failed. issue cleanup\n");
 			bnx2fc_initiate_cleanup(srr_req);
 		}
+		if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+		    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+			BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
+				      orig_io_req->xid, orig_io_req->req_flags);
+			goto srr_compl_done;
+		}
 		orig_io_req->srr_retry++;
 		if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
 			struct bnx2fc_rport *tgt = orig_io_req->tgt;
@@ -311,6 +306,12 @@
 		}
 		goto srr_compl_done;
 	}
+	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+	    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+		BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
+			      orig_io_req->xid, orig_io_req->req_flags);
+		goto srr_compl_done;
+	}
 	mp_req = &(srr_req->mp_req);
 	fc_hdr = &(mp_req->resp_fc_hdr);
 	resp_len = mp_req->resp_len;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 85bcc4b..8c6156a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Oct 02, 2011"
+#define DRV_MODULE_RELDATE	"Oct 21, 2011"
 
 
 static char version[] __devinitdata =
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0c64d18..84a78af 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1103,7 +1103,10 @@
 	struct fc_rport_libfc_priv *rp = rport->dd_data;
 	struct bnx2fc_cmd *io_req;
 	struct fc_lport *lport;
+	struct fc_rport_priv *rdata;
 	struct bnx2fc_rport *tgt;
+	int logo_issued;
+	int wait_cnt = 0;
 	int rc = FAILED;
 
 
@@ -1192,8 +1195,40 @@
 	} else {
 		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
 				"already in abts processing\n", io_req->xid);
+		if (cancel_delayed_work(&io_req->timeout_work))
+			kref_put(&io_req->refcount,
+				 bnx2fc_cmd_release); /* drop timer hold */
+		bnx2fc_initiate_cleanup(io_req);
+
+		spin_unlock_bh(&tgt->tgt_lock);
+
+		wait_for_completion(&io_req->tm_done);
+
+		spin_lock_bh(&tgt->tgt_lock);
+		io_req->wait_for_comp = 0;
+		rdata = io_req->tgt->rdata;
+		logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+					       &tgt->flags);
 		kref_put(&io_req->refcount, bnx2fc_cmd_release);
 		spin_unlock_bh(&tgt->tgt_lock);
+
+		if (!logo_issued) {
+			BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+				      tgt->flags);
+			mutex_lock(&lport->disc.disc_mutex);
+			lport->tt.rport_logoff(rdata);
+			mutex_unlock(&lport->disc.disc_mutex);
+			do {
+				msleep(BNX2FC_RELOGIN_WAIT_TIME);
+				/*
+				 * If session not recovered, let SCSI-ml
+				 * escalate error recovery.
+				 */
+				if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
+					return FAILED;
+			} while (!test_bit(BNX2FC_FLAG_SESSION_READY,
+					   &tgt->flags));
+		}
 		return SUCCESS;
 	}
 	if (rc == FAILED) {
@@ -1275,6 +1310,8 @@
 		   io_req->refcount.refcount.counter, io_req->cmd_type);
 	bnx2fc_scsi_done(io_req, DID_ERROR);
 	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	if (io_req->wait_for_comp)
+		complete(&io_req->tm_done);
 }
 
 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 7c05fd9..339ea23 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -441,7 +441,15 @@
 
 	spin_lock_irqsave(q->queue_lock, flags);
 	sdev = q->queuedata;
-	if (sdev && sdev->scsi_dh_data)
+	if (!sdev) {
+		spin_unlock_irqrestore(q->queue_lock, flags);
+		err = SCSI_DH_NOSYS;
+		if (fn)
+			fn(data, err);
+		return err;
+	}
+
+	if (sdev->scsi_dh_data)
 		scsi_dh = sdev->scsi_dh_data->scsi_dh;
 	dev = get_device(&sdev->sdev_gendev);
 	if (!scsi_dh || !dev ||
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 627f4b5..fe4df2d 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -507,7 +507,7 @@
 	int len, k, off, valid_states = 0;
 	unsigned char *ucp;
 	unsigned err;
-	unsigned long expiry, interval = 1;
+	unsigned long expiry, interval = 1000;
 
 	expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
  retry:
@@ -734,6 +734,7 @@
 	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
 	sdev->scsi_dh_data = scsi_dh_data;
 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+	sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
 
 	return 0;
 
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 61384ee4..cefbe44 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2347,14 +2347,11 @@
 		goto done;
 
 	mac = fr_cb(fp)->granted_mac;
-	if (is_zero_ether_addr(mac)) {
-		/* pre-FIP */
-		if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
-			fc_frame_free(fp);
-			return;
-		}
-	}
-	fcoe_update_src_mac(lport, mac);
+	/* pre-FIP */
+	if (is_zero_ether_addr(mac))
+		fcoe_ctlr_recv_flogi(fip, lport, fp);
+	if (!is_zero_ether_addr(mac))
+		fcoe_update_src_mac(lport, mac);
 done:
 	fc_lport_flogi_resp(seq, fp, lport);
 }
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 4f7a582..351dc0b 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -286,6 +286,7 @@
 {
 	struct Scsi_Host *shost = dev_to_shost(dev);
 	struct device *parent = dev->parent;
+	struct request_queue *q;
 
 	scsi_proc_hostdir_rm(shost->hostt);
 
@@ -293,9 +294,11 @@
 		kthread_stop(shost->ehandler);
 	if (shost->work_q)
 		destroy_workqueue(shost->work_q);
-	if (shost->uspace_req_q) {
-		kfree(shost->uspace_req_q->queuedata);
-		scsi_free_queue(shost->uspace_req_q);
+	q = shost->uspace_req_q;
+	if (q) {
+		kfree(q->queuedata);
+		q->queuedata = NULL;
+		scsi_free_queue(q);
 	}
 
 	scsi_destroy_command_freelist(shost);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9825ecf..e76107b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
 #include <linux/bitmap.h>
 #include <linux/atomic.h>
 #include <linux/kthread.h>
+#include <linux/jiffies.h>
 #include "hpsa_cmd.h"
 #include "hpsa.h"
 
@@ -127,6 +128,10 @@
 
 static int number_of_controllers;
 
+static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
+static spinlock_t lockup_detector_lock;
+static struct task_struct *hpsa_lockup_detector;
+
 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
@@ -484,6 +489,7 @@
 #endif
 	.sdev_attrs = hpsa_sdev_attrs,
 	.shost_attrs = hpsa_shost_attrs,
+	.max_sectors = 8192,
 };
 
 
@@ -566,16 +572,16 @@
 	 * assumes h->devlock is held
 	 */
 	int i, found = 0;
-	DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
+	DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
 
-	memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
+	memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
 
 	for (i = 0; i < h->ndevices; i++) {
 		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
 			set_bit(h->dev[i]->target, lun_taken);
 	}
 
-	for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
+	for (i = 0; i < HPSA_MAX_DEVICES; i++) {
 		if (!test_bit(i, lun_taken)) {
 			/* *bus = 1; */
 			*target = i;
@@ -598,7 +604,7 @@
 	unsigned char addr1[8], addr2[8];
 	struct hpsa_scsi_dev_t *sd;
 
-	if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
+	if (n >= HPSA_MAX_DEVICES) {
 		dev_err(&h->pdev->dev, "too many devices, some will be "
 			"inaccessible.\n");
 		return -1;
@@ -673,7 +679,7 @@
 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
 {
 	/* assumes h->devlock is held */
-	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
 	removed[*nremoved] = h->dev[entry];
 	(*nremoved)++;
 
@@ -702,7 +708,7 @@
 	int i;
 	struct hpsa_scsi_dev_t *sd;
 
-	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
 
 	sd = h->dev[entry];
 	removed[*nremoved] = h->dev[entry];
@@ -814,10 +820,8 @@
 	int nadded, nremoved;
 	struct Scsi_Host *sh = NULL;
 
-	added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
-		GFP_KERNEL);
-	removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
-		GFP_KERNEL);
+	added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
+	removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
 
 	if (!added || !removed) {
 		dev_warn(&h->pdev->dev, "out of memory in "
@@ -1338,6 +1342,22 @@
 	wait_for_completion(&wait);
 }
 
+static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	unsigned long flags;
+
+	/* If controller lockup detected, fake a hardware error. */
+	spin_lock_irqsave(&h->lock, flags);
+	if (unlikely(h->lockup_detected)) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+	} else {
+		spin_unlock_irqrestore(&h->lock, flags);
+		hpsa_scsi_do_simple_cmd_core(h, c);
+	}
+}
+
 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
 	struct CommandList *c, int data_direction)
 {
@@ -1735,7 +1755,6 @@
 	if (is_scsi_rev_5(h))
 		return 0; /* p1210m doesn't need to do this. */
 
-#define MAX_MSA2XXX_ENCLOSURES 32
 	if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
 		dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
 			"enclosures exceeded.  Check your hardware "
@@ -1846,8 +1865,7 @@
 	int raid_ctlr_position;
 	DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
 
-	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
-		GFP_KERNEL);
+	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
 	physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
 	logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
 	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
@@ -1870,6 +1888,13 @@
 
 	/* Allocate the per device structures */
 	for (i = 0; i < ndevs_to_allocate; i++) {
+		if (i >= HPSA_MAX_DEVICES) {
+			dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
+				"  %d devices ignored.\n", HPSA_MAX_DEVICES,
+				ndevs_to_allocate - HPSA_MAX_DEVICES);
+			break;
+		}
+
 		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
 		if (!currentsd[i]) {
 			dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
@@ -1956,7 +1981,7 @@
 		default:
 			break;
 		}
-		if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+		if (ncurrent >= HPSA_MAX_DEVICES)
 			break;
 	}
 	adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
@@ -2048,8 +2073,14 @@
 	}
 	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
 
-	/* Need a lock as this is being allocated from the pool */
 	spin_lock_irqsave(&h->lock, flags);
+	if (unlikely(h->lockup_detected)) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		cmd->result = DID_ERROR << 16;
+		done(cmd);
+		return 0;
+	}
+	/* Need a lock as this is being allocated from the pool */
 	c = cmd_alloc(h);
 	spin_unlock_irqrestore(&h->lock, flags);
 	if (c == NULL) {			/* trouble... */
@@ -2601,7 +2632,7 @@
 		c->SG[0].Len = iocommand.buf_size;
 		c->SG[0].Ext = 0; /* we are not chaining*/
 	}
-	hpsa_scsi_do_simple_cmd_core(h, c);
+	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
 	if (iocommand.buf_size > 0)
 		hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
 	check_ioctl_unit_attention(h, c);
@@ -2724,7 +2755,7 @@
 			c->SG[i].Ext = 0;
 		}
 	}
-	hpsa_scsi_do_simple_cmd_core(h, c);
+	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
 	if (sg_used)
 		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
 	check_ioctl_unit_attention(h, c);
@@ -2872,6 +2903,8 @@
 			c->Request.Timeout = 0;
 			c->Request.CDB[0] = BMIC_WRITE;
 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+			c->Request.CDB[7] = (size >> 8) & 0xFF;
+			c->Request.CDB[8] = size & 0xFF;
 			break;
 		case TEST_UNIT_READY:
 			c->Request.CDBLen = 6;
@@ -3091,6 +3124,7 @@
 	if (interrupt_not_for_us(h))
 		return IRQ_NONE;
 	spin_lock_irqsave(&h->lock, flags);
+	h->last_intr_timestamp = get_jiffies_64();
 	while (interrupt_pending(h)) {
 		raw_tag = get_next_completion(h);
 		while (raw_tag != FIFO_EMPTY)
@@ -3110,6 +3144,7 @@
 		return IRQ_NONE;
 
 	spin_lock_irqsave(&h->lock, flags);
+	h->last_intr_timestamp = get_jiffies_64();
 	raw_tag = get_next_completion(h);
 	while (raw_tag != FIFO_EMPTY)
 		raw_tag = next_command(h);
@@ -3126,6 +3161,7 @@
 	if (interrupt_not_for_us(h))
 		return IRQ_NONE;
 	spin_lock_irqsave(&h->lock, flags);
+	h->last_intr_timestamp = get_jiffies_64();
 	while (interrupt_pending(h)) {
 		raw_tag = get_next_completion(h);
 		while (raw_tag != FIFO_EMPTY) {
@@ -3146,6 +3182,7 @@
 	u32 raw_tag;
 
 	spin_lock_irqsave(&h->lock, flags);
+	h->last_intr_timestamp = get_jiffies_64();
 	raw_tag = get_next_completion(h);
 	while (raw_tag != FIFO_EMPTY) {
 		if (hpsa_tag_contains_index(raw_tag))
@@ -3300,6 +3337,13 @@
 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 		pmcsr |= PCI_D0;
 		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+		/*
+		 * The P600 requires a small delay when changing states.
+		 * Otherwise we may think the board did not reset and we bail.
+		 * This for kdump only and is particular to the P600.
+		 */
+		msleep(500);
 	}
 	return 0;
 }
@@ -4083,6 +4127,149 @@
 	kfree(h);
 }
 
+static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
+{
+	assert_spin_locked(&lockup_detector_lock);
+	if (!hpsa_lockup_detector)
+		return;
+	if (h->lockup_detected)
+		return; /* already stopped the lockup detector */
+	list_del(&h->lockup_list);
+}
+
+/* Called when controller lockup detected. */
+static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
+{
+	struct CommandList *c = NULL;
+
+	assert_spin_locked(&h->lock);
+	/* Mark all outstanding commands as failed and complete them. */
+	while (!list_empty(list)) {
+		c = list_entry(list->next, struct CommandList, list);
+		c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+		finish_cmd(c, c->Header.Tag.lower);
+	}
+}
+
+static void controller_lockup_detected(struct ctlr_info *h)
+{
+	unsigned long flags;
+
+	assert_spin_locked(&lockup_detector_lock);
+	remove_ctlr_from_lockup_detector_list(h);
+	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+	spin_lock_irqsave(&h->lock, flags);
+	h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+	spin_unlock_irqrestore(&h->lock, flags);
+	dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
+			h->lockup_detected);
+	pci_disable_device(h->pdev);
+	spin_lock_irqsave(&h->lock, flags);
+	fail_all_cmds_on_list(h, &h->cmpQ);
+	fail_all_cmds_on_list(h, &h->reqQ);
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
+#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
+#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
+
+static void detect_controller_lockup(struct ctlr_info *h)
+{
+	u64 now;
+	u32 heartbeat;
+	unsigned long flags;
+
+	assert_spin_locked(&lockup_detector_lock);
+	now = get_jiffies_64();
+	/* If we've received an interrupt recently, we're ok. */
+	if (time_after64(h->last_intr_timestamp +
+				(HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+		return;
+
+	/*
+	 * If we've already checked the heartbeat recently, we're ok.
+	 * This could happen if someone sends us a signal. We
+	 * otherwise don't care about signals in this thread.
+	 */
+	if (time_after64(h->last_heartbeat_timestamp +
+				(HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+		return;
+
+	/* If heartbeat has not changed since we last looked, we're not ok. */
+	spin_lock_irqsave(&h->lock, flags);
+	heartbeat = readl(&h->cfgtable->HeartBeat);
+	spin_unlock_irqrestore(&h->lock, flags);
+	if (h->last_heartbeat == heartbeat) {
+		controller_lockup_detected(h);
+		return;
+	}
+
+	/* We're ok. */
+	h->last_heartbeat = heartbeat;
+	h->last_heartbeat_timestamp = now;
+}
+
+static int detect_controller_lockup_thread(void *notused)
+{
+	struct ctlr_info *h;
+	unsigned long flags;
+
+	while (1) {
+		struct list_head *this, *tmp;
+
+		schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
+		if (kthread_should_stop())
+			break;
+		spin_lock_irqsave(&lockup_detector_lock, flags);
+		list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
+			h = list_entry(this, struct ctlr_info, lockup_list);
+			detect_controller_lockup(h);
+		}
+		spin_unlock_irqrestore(&lockup_detector_lock, flags);
+	}
+	return 0;
+}
+
+static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lockup_detector_lock, flags);
+	list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
+	spin_unlock_irqrestore(&lockup_detector_lock, flags);
+}
+
+static void start_controller_lockup_detector(struct ctlr_info *h)
+{
+	/* Start the lockup detector thread if not already started */
+	if (!hpsa_lockup_detector) {
+		spin_lock_init(&lockup_detector_lock);
+		hpsa_lockup_detector =
+			kthread_run(detect_controller_lockup_thread,
+						NULL, "hpsa");
+	}
+	if (!hpsa_lockup_detector) {
+		dev_warn(&h->pdev->dev,
+			"Could not start lockup detector thread\n");
+		return;
+	}
+	add_ctlr_to_lockup_detector_list(h);
+}
+
+static void stop_controller_lockup_detector(struct ctlr_info *h)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lockup_detector_lock, flags);
+	remove_ctlr_from_lockup_detector_list(h);
+	/* If the list of ctlr's to monitor is empty, stop the thread */
+	if (list_empty(&hpsa_ctlr_list)) {
+		kthread_stop(hpsa_lockup_detector);
+		hpsa_lockup_detector = NULL;
+	}
+	spin_unlock_irqrestore(&lockup_detector_lock, flags);
+}
+
 static int __devinit hpsa_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
@@ -4120,7 +4307,6 @@
 		return -ENOMEM;
 
 	h->pdev = pdev;
-	h->busy_initializing = 1;
 	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
 	INIT_LIST_HEAD(&h->cmpQ);
 	INIT_LIST_HEAD(&h->reqQ);
@@ -4229,7 +4415,7 @@
 
 	hpsa_hba_inquiry(h);
 	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
-	h->busy_initializing = 0;
+	start_controller_lockup_detector(h);
 	return 1;
 
 clean4:
@@ -4238,7 +4424,6 @@
 	free_irq(h->intr[h->intr_mode], h);
 clean2:
 clean1:
-	h->busy_initializing = 0;
 	kfree(h);
 	return rc;
 }
@@ -4293,10 +4478,11 @@
 	struct ctlr_info *h;
 
 	if (pci_get_drvdata(pdev) == NULL) {
-		dev_err(&pdev->dev, "unable to remove device \n");
+		dev_err(&pdev->dev, "unable to remove device\n");
 		return;
 	}
 	h = pci_get_drvdata(pdev);
+	stop_controller_lockup_detector(h);
 	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
 	hpsa_shutdown(pdev);
 	iounmap(h->vaddr);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 7f53ceaa..91edafb 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -95,8 +95,6 @@
 	unsigned long  		*cmd_pool_bits;
 	int			nr_allocs;
 	int			nr_frees;
-	int			busy_initializing;
-	int			busy_scanning;
 	int			scan_finished;
 	spinlock_t		scan_lock;
 	wait_queue_head_t	scan_wait_queue;
@@ -104,8 +102,7 @@
 	struct Scsi_Host *scsi_host;
 	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
 	int ndevices; /* number of used elements in .dev[] array. */
-#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
-	struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+	struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
 	/*
 	 * Performant mode tables.
 	 */
@@ -124,6 +121,11 @@
 	unsigned char reply_pool_wraparound;
 	u32 *blockFetchTable;
 	unsigned char *hba_inquiry_data;
+	u64 last_intr_timestamp;
+	u32 last_heartbeat;
+	u64 last_heartbeat_timestamp;
+	u32 lockup_detected;
+	struct list_head lockup_list;
 };
 #define HPSA_ABORT_MSG 0
 #define HPSA_DEVICE_RESET_MSG 1
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 55d741b..3fd4715 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -123,8 +123,11 @@
 
 /* FIXME this is a per controller value (barf!) */
 #define HPSA_MAX_TARGETS_PER_CTLR 16
-#define HPSA_MAX_LUN 256
+#define HPSA_MAX_LUN 1024
 #define HPSA_MAX_PHYS_LUN 1024
+#define MAX_MSA2XXX_ENCLOSURES 32
+#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
+	MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */
 
 /* SCSI-3 Commands */
 #pragma pack(1)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 73e24b4..fd860d9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9123,6 +9123,8 @@
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 6d257e0..ac84736 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -82,6 +82,7 @@
 
 #define IPR_SUBS_DEV_ID_57B4    0x033B
 #define IPR_SUBS_DEV_ID_57B2    0x035F
+#define IPR_SUBS_DEV_ID_57C3    0x0353
 #define IPR_SUBS_DEV_ID_57C4    0x0354
 #define IPR_SUBS_DEV_ID_57C6    0x0357
 #define IPR_SUBS_DEV_ID_57CC    0x035C
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index f07f30f..e7fe9c4 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1350,7 +1350,7 @@
 	u->stp_max_occupancy_timeout = stp_max_occ_to;
 	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
 	u->no_outbound_task_timeout = no_outbound_task_to;
-	u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+	u->max_concurr_spinup = max_concurr_spinup;
 }
 
 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
@@ -1661,7 +1661,7 @@
 	ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
 
 	/* Default to APC mode. */
-	ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+	ihost->oem_parameters.controller.max_concurr_spin_up = 1;
 
 	/* Default to no SSC operation. */
 	ihost->oem_parameters.controller.do_enable_ssc = false;
@@ -1787,7 +1787,8 @@
 	} else
 		return -EINVAL;
 
-	if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+	if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
+	    oem->controller.max_concurr_spin_up < 1)
 		return -EINVAL;
 
 	return 0;
@@ -1810,6 +1811,16 @@
 	return SCI_FAILURE_INVALID_STATE;
 }
 
+static u8 max_spin_up(struct isci_host *ihost)
+{
+	if (ihost->user_parameters.max_concurr_spinup)
+		return min_t(u8, ihost->user_parameters.max_concurr_spinup,
+			     MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+	else
+		return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
+			     MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+}
+
 static void power_control_timeout(unsigned long data)
 {
 	struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1839,8 +1850,7 @@
 		if (iphy == NULL)
 			continue;
 
-		if (ihost->power_control.phys_granted_power >=
-		    ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+		if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
 			break;
 
 		ihost->power_control.requesters[i] = NULL;
@@ -1865,8 +1875,7 @@
 {
 	BUG_ON(iphy == NULL);
 
-	if (ihost->power_control.phys_granted_power <
-	    ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+	if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
 		ihost->power_control.phys_granted_power++;
 		sci_phy_consume_power_handler(iphy);
 
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 43fe840..a97edab 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -118,7 +118,7 @@
 module_param(phy_gen, byte, 0);
 MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
 
-unsigned char max_concurr_spinup = 1;
+unsigned char max_concurr_spinup;
 module_param(max_concurr_spinup, byte, 0);
 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
 
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 8e59c88..ac7f277 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -145,48 +145,15 @@
 	}
 }
 
-/* called under sci_lock to stabilize phy:port associations */
-void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
-{
-	int i;
-
-	clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
-	wake_up(&ihost->eventq);
-
-	if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
-		return;
-
-	for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
-		struct isci_phy *iphy = iport->phy_table[i];
-
-		if (!iphy)
-			continue;
-
-		ihost->sas_ha.notify_port_event(&iphy->sas_phy,
-						PORTE_BROADCAST_RCVD);
-		break;
-	}
-}
-
 static void isci_port_bc_change_received(struct isci_host *ihost,
 					 struct isci_port *iport,
 					 struct isci_phy *iphy)
 {
-	if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
-		dev_dbg(&ihost->pdev->dev,
-			"%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
-			__func__, iphy, &iphy->sas_phy);
-		set_bit(IPORT_BCN_PENDING, &iport->flags);
-		atomic_inc(&iport->event);
-		wake_up(&ihost->eventq);
-	} else {
-		dev_dbg(&ihost->pdev->dev,
-			"%s: isci_phy = %p, sas_phy = %p\n",
-			__func__, iphy, &iphy->sas_phy);
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_phy = %p, sas_phy = %p\n",
+		__func__, iphy, &iphy->sas_phy);
 
-		ihost->sas_ha.notify_port_event(&iphy->sas_phy,
-						PORTE_BROADCAST_RCVD);
-	}
+	ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
 	sci_port_bcn_enable(iport);
 }
 
@@ -278,9 +245,6 @@
 		/* check to see if this is the last phy on this port. */
 		if (isci_phy->sas_phy.port &&
 		    isci_phy->sas_phy.port->num_phys == 1) {
-			atomic_inc(&isci_port->event);
-			isci_port_bcn_enable(isci_host, isci_port);
-
 			/* change the state for all devices on this port.  The
 			 * next task sent to this device will be returned as
 			 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
@@ -350,6 +314,34 @@
 	dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
 }
 
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+	switch (state) {
+	case SCI_PORT_READY:
+	case SCI_PORT_SUB_WAITING:
+	case SCI_PORT_SUB_OPERATIONAL:
+	case SCI_PORT_SUB_CONFIGURING:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+				      enum sci_port_states state)
+{
+	struct sci_base_state_machine *sm = &iport->sm;
+	enum sci_port_states old_state = sm->current_state_id;
+
+	if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+		iport->ready_exit = true;
+
+	sci_change_state(sm, state);
+	iport->ready_exit = false;
+}
+
 /**
  * isci_port_hard_reset_complete() - This function is called by the sci core
  *    when the hard reset complete notification has been received.
@@ -368,6 +360,26 @@
 	/* Save the status of the hard reset from the port. */
 	isci_port->hard_reset_status = completion_status;
 
+	if (completion_status != SCI_SUCCESS) {
+
+		/* The reset failed.  The port state is now SCI_PORT_FAILED. */
+		if (isci_port->active_phy_mask == 0) {
+
+			/* Generate the link down now to the host, since it
+			 * was intercepted by the hard reset state machine when
+			 * it really happened.
+			 */
+			isci_port_link_down(isci_port->isci_host,
+					    &isci_port->isci_host->phys[
+						   isci_port->last_active_phy],
+					    isci_port);
+		}
+		/* Advance the port state so that link state changes will be
+		* noticed.
+		*/
+		port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
+
+	}
 	complete_all(&isci_port->hard_reset_complete);
 }
 
@@ -657,6 +669,8 @@
 	struct isci_host *ihost = iport->owning_controller;
 
 	iport->active_phy_mask &= ~(1 << iphy->phy_index);
+	if (!iport->active_phy_mask)
+		iport->last_active_phy = iphy->phy_index;
 
 	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
 
@@ -683,33 +697,6 @@
 	}
 }
 
-static bool is_port_ready_state(enum sci_port_states state)
-{
-	switch (state) {
-	case SCI_PORT_READY:
-	case SCI_PORT_SUB_WAITING:
-	case SCI_PORT_SUB_OPERATIONAL:
-	case SCI_PORT_SUB_CONFIGURING:
-		return true;
-	default:
-		return false;
-	}
-}
-
-/* flag dummy rnc hanling when exiting a ready state */
-static void port_state_machine_change(struct isci_port *iport,
-				      enum sci_port_states state)
-{
-	struct sci_base_state_machine *sm = &iport->sm;
-	enum sci_port_states old_state = sm->current_state_id;
-
-	if (is_port_ready_state(old_state) && !is_port_ready_state(state))
-		iport->ready_exit = true;
-
-	sci_change_state(sm, state);
-	iport->ready_exit = false;
-}
-
 /**
  * sci_port_general_link_up_handler - phy can be assigned to port?
  * @sci_port: sci_port object for which has a phy that has gone link up.
@@ -1622,7 +1609,8 @@
 	iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
 	iport->physical_port_index = index;
 	iport->active_phy_mask     = 0;
-	iport->ready_exit	      = false;
+	iport->last_active_phy     = 0;
+	iport->ready_exit	   = false;
 
 	iport->owning_controller = ihost;
 
@@ -1648,7 +1636,6 @@
 	init_completion(&iport->start_complete);
 	iport->isci_host = ihost;
 	isci_port_change_state(iport, isci_freed);
-	atomic_set(&iport->event, 0);
 }
 
 /**
@@ -1676,7 +1663,7 @@
 {
 	unsigned long flags;
 	enum sci_status status;
-	int idx, ret = TMF_RESP_FUNC_COMPLETE;
+	int ret = TMF_RESP_FUNC_COMPLETE;
 
 	dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
 		__func__, iport);
@@ -1697,8 +1684,13 @@
 			"%s: iport = %p; hard reset completion\n",
 			__func__, iport);
 
-		if (iport->hard_reset_status != SCI_SUCCESS)
+		if (iport->hard_reset_status != SCI_SUCCESS) {
 			ret = TMF_RESP_FUNC_FAILED;
+
+			dev_err(&ihost->pdev->dev,
+				"%s: iport = %p; hard reset failed (0x%x)\n",
+				__func__, iport, iport->hard_reset_status);
+		}
 	} else {
 		ret = TMF_RESP_FUNC_FAILED;
 
@@ -1718,18 +1710,6 @@
 			"%s: iport = %p; hard reset failed "
 			"(0x%x) - driving explicit link fail for all phys\n",
 			__func__, iport, iport->hard_reset_status);
-
-		/* Down all phys in the port. */
-		spin_lock_irqsave(&ihost->scic_lock, flags);
-		for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
-			struct isci_phy *iphy = iport->phy_table[idx];
-
-			if (!iphy)
-				continue;
-			sci_phy_stop(iphy);
-			sci_phy_start(iphy);
-		}
-		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 	}
 	return ret;
 }
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index b50ecd4e..cb5ffbc 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -77,7 +77,6 @@
 
 /**
  * struct isci_port - isci direct attached sas port object
- * @event: counts bcns and port stop events (for bcn filtering)
  * @ready_exit: several states constitute 'ready'. When exiting ready we
  *              need to take extra port-teardown actions that are
  *              skipped when exiting to another 'ready' state.
@@ -92,10 +91,6 @@
  */
 struct isci_port {
 	enum isci_status status;
-	#define IPORT_BCN_BLOCKED 0
-	#define IPORT_BCN_PENDING 1
-	unsigned long flags;
-	atomic_t event;
 	struct isci_host *isci_host;
 	struct asd_sas_port sas_port;
 	struct list_head remote_dev_list;
@@ -109,6 +104,7 @@
 	u8 logical_port_index;
 	u8 physical_port_index;
 	u8 active_phy_mask;
+	u8 last_active_phy;
 	u16 reserved_rni;
 	u16 reserved_tag;
 	u32 started_request_count;
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index dc007e6..2c75248 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -112,7 +112,7 @@
 	 * This field specifies the maximum number of direct attached devices
 	 * that can have power supplied to them simultaneously.
 	 */
-	u8 max_number_concurrent_device_spin_up;
+	u8 max_concurr_spinup;
 
 	/**
 	 * This field specifies the number of seconds to allow a phy to consume
@@ -219,7 +219,7 @@
 struct sci_oem_params {
 	struct {
 		uint8_t mode_type;
-		uint8_t max_concurrent_dev_spin_up;
+		uint8_t max_concurr_spin_up;
 		uint8_t do_enable_ssc;
 		uint8_t reserved;
 	} controller;
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index fbf9ce2..b207cd3 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1438,88 +1438,3 @@
 
 	return status == SCI_SUCCESS ? 0 : -ENODEV;
 }
-/**
- * isci_device_is_reset_pending() - This function will check if there is any
- *    pending reset condition on the device.
- * @request: This parameter is the isci_device object.
- *
- * true if there is a reset pending for the device.
- */
-bool isci_device_is_reset_pending(
-	struct isci_host *isci_host,
-	struct isci_remote_device *isci_device)
-{
-	struct isci_request *isci_request;
-	struct isci_request *tmp_req;
-	bool reset_is_pending = false;
-	unsigned long flags;
-
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: isci_device = %p\n", __func__, isci_device);
-
-	spin_lock_irqsave(&isci_host->scic_lock, flags);
-
-	/* Check for reset on all pending requests. */
-	list_for_each_entry_safe(isci_request, tmp_req,
-				 &isci_device->reqs_in_process, dev_node) {
-		dev_dbg(&isci_host->pdev->dev,
-			"%s: isci_device = %p request = %p\n",
-			__func__, isci_device, isci_request);
-
-		if (isci_request->ttype == io_task) {
-			struct sas_task *task = isci_request_access_task(
-				isci_request);
-
-			spin_lock(&task->task_state_lock);
-			if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
-				reset_is_pending = true;
-			spin_unlock(&task->task_state_lock);
-		}
-	}
-
-	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: isci_device = %p reset_is_pending = %d\n",
-		__func__, isci_device, reset_is_pending);
-
-	return reset_is_pending;
-}
-
-/**
- * isci_device_clear_reset_pending() - This function will clear if any pending
- *    reset condition flags on the device.
- * @request: This parameter is the isci_device object.
- *
- * true if there is a reset pending for the device.
- */
-void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
-{
-	struct isci_request *isci_request;
-	struct isci_request *tmp_req;
-	unsigned long flags = 0;
-
-	dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
-		 __func__, idev, ihost);
-
-	spin_lock_irqsave(&ihost->scic_lock, flags);
-
-	/* Clear reset pending on all pending requests. */
-	list_for_each_entry_safe(isci_request, tmp_req,
-				 &idev->reqs_in_process, dev_node) {
-		dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
-			 __func__, idev, isci_request);
-
-		if (isci_request->ttype == io_task) {
-
-			unsigned long flags2;
-			struct sas_task *task = isci_request_access_task(
-				isci_request);
-
-			spin_lock_irqsave(&task->task_state_lock, flags2);
-			task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
-			spin_unlock_irqrestore(&task->task_state_lock, flags2);
-		}
-	}
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
-}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index e1747ea..483ee501 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -132,10 +132,7 @@
 				      struct isci_remote_device *idev);
 void isci_remote_device_gone(struct domain_device *domain_dev);
 int isci_remote_device_found(struct domain_device *domain_dev);
-bool isci_device_is_reset_pending(struct isci_host *ihost,
-				  struct isci_remote_device *idev);
-void isci_device_clear_reset_pending(struct isci_host *ihost,
-				     struct isci_remote_device *idev);
+
 /**
  * sci_remote_device_stop() - This method will stop both transmission and
  *    reception of link activity for the supplied remote device.  This method
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 565a9f0..192cb48 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -191,7 +191,7 @@
 
 	task_iu->task_func = isci_tmf->tmf_code;
 	task_iu->task_tag =
-		(ireq->ttype == tmf_task) ?
+		(test_bit(IREQ_TMF, &ireq->flags)) ?
 		isci_tmf->io_tag :
 		SCI_CONTROLLER_INVALID_IO_TAG;
 }
@@ -516,7 +516,7 @@
 	struct domain_device *dev = ireq->target_device->domain_dev;
 
 	/* check for management protocols */
-	if (ireq->ttype == tmf_task) {
+	if (test_bit(IREQ_TMF, &ireq->flags)) {
 		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
 
 		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
@@ -632,7 +632,7 @@
 	enum sci_status status = SCI_SUCCESS;
 
 	/* check for management protocols */
-	if (ireq->ttype == tmf_task) {
+	if (test_bit(IREQ_TMF, &ireq->flags)) {
 		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
 
 		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
@@ -2630,14 +2630,8 @@
 	switch (task_notification_selection) {
 
 	case isci_perform_normal_io_completion:
-
 		/* Normal notification (task_done) */
-		dev_dbg(&host->pdev->dev,
-			"%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
-			__func__,
-			task,
-			task->task_status.resp, response,
-			task->task_status.stat, status);
+
 		/* Add to the completed list. */
 		list_add(&request->completed_node,
 			 &host->requests_to_complete);
@@ -2650,13 +2644,6 @@
 		/* No notification to libsas because this request is
 		 * already in the abort path.
 		 */
-		dev_dbg(&host->pdev->dev,
-			 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
-			 __func__,
-			 task,
-			 task->task_status.resp, response,
-			 task->task_status.stat, status);
-
 		/* Wake up whatever process was waiting for this
 		 * request to complete.
 		 */
@@ -2673,30 +2660,22 @@
 
 	case isci_perform_error_io_completion:
 		/* Use sas_task_abort */
-		dev_dbg(&host->pdev->dev,
-			 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
-			 __func__,
-			 task,
-			 task->task_status.resp, response,
-			 task->task_status.stat, status);
 		/* Add to the aborted list. */
 		list_add(&request->completed_node,
 			 &host->requests_to_errorback);
 		break;
 
 	default:
-		dev_dbg(&host->pdev->dev,
-			 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
-			 __func__,
-			 task,
-			 task->task_status.resp, response,
-			 task->task_status.stat, status);
-
 		/* Add to the error to libsas list. */
 		list_add(&request->completed_node,
 			 &host->requests_to_errorback);
 		break;
 	}
+	dev_dbg(&host->pdev->dev,
+		"%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
+		__func__, task_notification_selection, task,
+		(task) ? task->task_status.resp : 0, response,
+		(task) ? task->task_status.stat : 0, status);
 }
 
 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
@@ -2728,9 +2707,9 @@
 	struct sas_task *task = isci_request_access_task(request);
 	struct ssp_response_iu *resp_iu;
 	unsigned long task_flags;
-	struct isci_remote_device *idev = isci_lookup_device(task->dev);
-	enum service_response response       = SAS_TASK_UNDELIVERED;
-	enum exec_status status         = SAS_ABORTED_TASK;
+	struct isci_remote_device *idev = request->target_device;
+	enum service_response response = SAS_TASK_UNDELIVERED;
+	enum exec_status status = SAS_ABORTED_TASK;
 	enum isci_request_status request_status;
 	enum isci_completion_selection complete_to_host
 		= isci_perform_normal_io_completion;
@@ -3061,7 +3040,6 @@
 
 	/* complete the io request to the core. */
 	sci_controller_complete_io(ihost, request->target_device, request);
-	isci_put_device(idev);
 
 	/* set terminated handle so it cannot be completed or
 	 * terminated again, and to cause any calls into abort
@@ -3080,7 +3058,7 @@
 	/* XXX as hch said always creating an internal sas_task for tmf
 	 * requests would simplify the driver
 	 */
-	task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+	task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
 
 	/* all unaccelerated request types (non ssp or ncq) handled with
 	 * substates
@@ -3564,7 +3542,7 @@
 
 	ireq = isci_request_from_tag(ihost, tag);
 	ireq->ttype_ptr.io_task_ptr = task;
-	ireq->ttype = io_task;
+	clear_bit(IREQ_TMF, &ireq->flags);
 	task->lldd_task = ireq;
 
 	return ireq;
@@ -3578,7 +3556,7 @@
 
 	ireq = isci_request_from_tag(ihost, tag);
 	ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
-	ireq->ttype = tmf_task;
+	set_bit(IREQ_TMF, &ireq->flags);
 
 	return ireq;
 }
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index f720b97..be38933 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -77,11 +77,6 @@
 	dead        = 0x07
 };
 
-enum task_type {
-	io_task  = 0,
-	tmf_task = 1
-};
-
 enum sci_request_protocol {
 	SCIC_NO_PROTOCOL,
 	SCIC_SMP_PROTOCOL,
@@ -116,7 +111,6 @@
 	#define IREQ_ACTIVE 3
 	unsigned long flags;
 	/* XXX kill ttype and ttype_ptr, allocate full sas_task */
-	enum task_type ttype;
 	union ttype_ptr_union {
 		struct sas_task *io_task_ptr;   /* When ttype==io_task  */
 		struct isci_tmf *tmf_task_ptr;  /* When ttype==tmf_task */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index e2d9418..66ad3dc 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -212,16 +212,27 @@
 					task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
 					spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-					/* Indicate QUEUE_FULL so that the scsi
-					* midlayer retries. if the request
-					* failed for remote device reasons,
-					* it gets returned as
-					* SAS_TASK_UNDELIVERED next time
-					* through.
-					*/
-					isci_task_refuse(ihost, task,
-							 SAS_TASK_COMPLETE,
-							 SAS_QUEUE_FULL);
+					if (test_bit(IDEV_GONE, &idev->flags)) {
+
+						/* Indicate that the device
+						 * is gone.
+						 */
+						isci_task_refuse(ihost, task,
+							SAS_TASK_UNDELIVERED,
+							SAS_DEVICE_UNKNOWN);
+					} else {
+						/* Indicate QUEUE_FULL so that
+						 * the scsi midlayer retries.
+						 * If the request failed for
+						 * remote device reasons, it
+						 * gets returned as
+						 * SAS_TASK_UNDELIVERED next
+						 * time through.
+						 */
+						isci_task_refuse(ihost, task,
+							SAS_TASK_COMPLETE,
+							SAS_QUEUE_FULL);
+					}
 				}
 			}
 		}
@@ -243,7 +254,7 @@
 	struct isci_tmf *isci_tmf;
 	enum sci_status status;
 
-	if (tmf_task != ireq->ttype)
+	if (!test_bit(IREQ_TMF, &ireq->flags))
 		return SCI_FAILURE;
 
 	isci_tmf = isci_request_access_tmf(ireq);
@@ -327,6 +338,60 @@
 	return ireq;
 }
 
+/**
+* isci_request_mark_zombie() - This function must be called with scic_lock held.
+*/
+static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
+{
+	struct completion *tmf_completion = NULL;
+	struct completion *req_completion;
+
+	/* Set the request state to "dead". */
+	ireq->status = dead;
+
+	req_completion = ireq->io_request_completion;
+	ireq->io_request_completion = NULL;
+
+	if (test_bit(IREQ_TMF, &ireq->flags)) {
+		/* Break links with the TMF request. */
+		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+		/* In the case where a task request is dying,
+		 * the thread waiting on the complete will sit and
+		 * timeout unless we wake it now.  Since the TMF
+		 * has a default error status, complete it here
+		 * to wake the waiting thread.
+		 */
+		if (tmf) {
+			tmf_completion = tmf->complete;
+			tmf->complete = NULL;
+		}
+		ireq->ttype_ptr.tmf_task_ptr = NULL;
+		dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
+			__func__, tmf->tmf_code, tmf->io_tag);
+	} else {
+		/* Break links with the sas_task - the callback is done
+		 * elsewhere.
+		 */
+		struct sas_task *task = isci_request_access_task(ireq);
+
+		if (task)
+			task->lldd_task = NULL;
+
+		ireq->ttype_ptr.io_task_ptr = NULL;
+	}
+
+	dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
+		 ireq->io_tag);
+
+	/* Don't force waiting threads to timeout. */
+	if (req_completion)
+		complete(req_completion);
+
+	if (tmf_completion != NULL)
+		complete(tmf_completion);
+}
+
 static int isci_task_execute_tmf(struct isci_host *ihost,
 				 struct isci_remote_device *idev,
 				 struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -364,6 +429,7 @@
 
 	/* Assign the pointer to the TMF's completion kernel wait structure. */
 	tmf->complete = &completion;
+	tmf->status = SCI_FAILURE_TIMEOUT;
 
 	ireq = isci_task_request_build(ihost, idev, tag, tmf);
 	if (!ireq)
@@ -399,18 +465,35 @@
 					       msecs_to_jiffies(timeout_ms));
 
 	if (timeleft == 0) {
+		/* The TMF did not complete - this could be because
+		 * of an unplug.  Terminate the TMF request now.
+		 */
 		spin_lock_irqsave(&ihost->scic_lock, flags);
 
 		if (tmf->cb_state_func != NULL)
-			tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+			tmf->cb_state_func(isci_tmf_timed_out, tmf,
+					   tmf->cb_data);
 
-		sci_controller_terminate_request(ihost,
-						  idev,
-						  ireq);
+		sci_controller_terminate_request(ihost, idev, ireq);
 
 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-		wait_for_completion(tmf->complete);
+		timeleft = wait_for_completion_timeout(
+			&completion,
+			msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
+
+		if (!timeleft) {
+			/* Strange condition - the termination of the TMF
+			 * request timed-out.
+			 */
+			spin_lock_irqsave(&ihost->scic_lock, flags);
+
+			/* If the TMF status has not changed, kill it. */
+			if (tmf->status == SCI_FAILURE_TIMEOUT)
+				isci_request_mark_zombie(ihost, ireq);
+
+			spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		}
 	}
 
 	isci_print_tmf(tmf);
@@ -501,48 +584,17 @@
 	return old_state;
 }
 
-/**
-* isci_request_cleanup_completed_loiterer() - This function will take care of
-*    the final cleanup on any request which has been explicitly terminated.
-* @isci_host: This parameter specifies the ISCI host object
-* @isci_device: This is the device to which the request is pending.
-* @isci_request: This parameter specifies the terminated request object.
-* @task: This parameter is the libsas I/O request.
-*/
-static void isci_request_cleanup_completed_loiterer(
-	struct isci_host          *isci_host,
-	struct isci_remote_device *isci_device,
-	struct isci_request       *isci_request,
-	struct sas_task           *task)
+static int isci_request_is_dealloc_managed(enum isci_request_status stat)
 {
-	unsigned long flags;
-
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: isci_device=%p, request=%p, task=%p\n",
-		__func__, isci_device, isci_request, task);
-
-	if (task != NULL) {
-
-		spin_lock_irqsave(&task->task_state_lock, flags);
-		task->lldd_task = NULL;
-
-		task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
-
-		isci_set_task_doneflags(task);
-
-		/* If this task is not in the abort path, call task_done. */
-		if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-
-			spin_unlock_irqrestore(&task->task_state_lock, flags);
-			task->task_done(task);
-		} else
-			spin_unlock_irqrestore(&task->task_state_lock, flags);
-	}
-
-	if (isci_request != NULL) {
-		spin_lock_irqsave(&isci_host->scic_lock, flags);
-		list_del_init(&isci_request->dev_node);
-		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+	switch (stat) {
+	case aborted:
+	case aborting:
+	case terminating:
+	case completed:
+	case dead:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -563,11 +615,9 @@
 	enum sci_status status      = SCI_SUCCESS;
 	bool was_terminated         = false;
 	bool needs_cleanup_handling = false;
-	enum isci_request_status request_status;
 	unsigned long     flags;
 	unsigned long     termination_completed = 1;
 	struct completion *io_request_completion;
-	struct sas_task   *task;
 
 	dev_dbg(&ihost->pdev->dev,
 		"%s: device = %p; request = %p\n",
@@ -577,10 +627,6 @@
 
 	io_request_completion = isci_request->io_request_completion;
 
-	task = (isci_request->ttype == io_task)
-		? isci_request_access_task(isci_request)
-		: NULL;
-
 	/* Note that we are not going to control
 	 * the target to abort the request.
 	 */
@@ -619,42 +665,27 @@
 				__func__, isci_request, io_request_completion);
 
 			/* Wait here for the request to complete. */
-			#define TERMINATION_TIMEOUT_MSEC 500
 			termination_completed
 				= wait_for_completion_timeout(
 				   io_request_completion,
-				   msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+				   msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
 
 			if (!termination_completed) {
 
 				/* The request to terminate has timed out.  */
-				spin_lock_irqsave(&ihost->scic_lock,
-						  flags);
+				spin_lock_irqsave(&ihost->scic_lock, flags);
 
 				/* Check for state changes. */
-				if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+				if (!test_bit(IREQ_TERMINATED,
+					      &isci_request->flags)) {
 
 					/* The best we can do is to have the
 					 * request die a silent death if it
 					 * ever really completes.
-					 *
-					 * Set the request state to "dead",
-					 * and clear the task pointer so that
-					 * an actual completion event callback
-					 * doesn't do anything.
 					 */
-					isci_request->status = dead;
-					isci_request->io_request_completion
-						= NULL;
-
-					if (isci_request->ttype == io_task) {
-
-						/* Break links with the
-						* sas_task.
-						*/
-						isci_request->ttype_ptr.io_task_ptr
-							= NULL;
-					}
+					isci_request_mark_zombie(ihost,
+								 isci_request);
+					needs_cleanup_handling = true;
 				} else
 					termination_completed = 1;
 
@@ -691,29 +722,28 @@
 			 * needs to be detached and freed here.
 			 */
 			spin_lock_irqsave(&isci_request->state_lock, flags);
-			request_status = isci_request->status;
 
-			if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
-			    && ((request_status == aborted)
-				|| (request_status == aborting)
-				|| (request_status == terminating)
-				|| (request_status == completed)
-				|| (request_status == dead)
-				)
-			    ) {
+			needs_cleanup_handling
+				= isci_request_is_dealloc_managed(
+					isci_request->status);
 
-				/* The completion routine won't free a request in
-				 * the aborted/aborting/etc. states, so we do
-				 * it here.
-				 */
-				needs_cleanup_handling = true;
-			}
 			spin_unlock_irqrestore(&isci_request->state_lock, flags);
 
 		}
-		if (needs_cleanup_handling)
-			isci_request_cleanup_completed_loiterer(
-				ihost, idev, isci_request, task);
+		if (needs_cleanup_handling) {
+
+			dev_dbg(&ihost->pdev->dev,
+				"%s: cleanup isci_device=%p, request=%p\n",
+				__func__, idev, isci_request);
+
+			if (isci_request != NULL) {
+				spin_lock_irqsave(&ihost->scic_lock, flags);
+				isci_free_tag(ihost, isci_request->io_tag);
+				isci_request_change_state(isci_request, unallocated);
+				list_del_init(&isci_request->dev_node);
+				spin_unlock_irqrestore(&ihost->scic_lock, flags);
+			}
+		}
 	}
 }
 
@@ -772,7 +802,9 @@
 		dev_dbg(&ihost->pdev->dev,
 			 "%s: idev=%p request=%p; task=%p old_state=%d\n",
 			 __func__, idev, ireq,
-			ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+			(!test_bit(IREQ_TMF, &ireq->flags)
+				? isci_request_access_task(ireq)
+				: NULL),
 			old_state);
 
 		/* If the old_state is started:
@@ -889,22 +921,14 @@
 		"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
 		 __func__, domain_device, isci_host, isci_device);
 
-	if (isci_device)
-		set_bit(IDEV_EH, &isci_device->flags);
+	if (!isci_device) {
+		/* If the device is gone, stop the escalations. */
+		dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
 
-	/* If there is a device reset pending on any request in the
-	 * device's list, fail this LUN reset request in order to
-	 * escalate to the device reset.
-	 */
-	if (!isci_device ||
-	    isci_device_is_reset_pending(isci_host, isci_device)) {
-		dev_dbg(&isci_host->pdev->dev,
-			 "%s: No dev (%p), or "
-			 "RESET PENDING: domain_device=%p\n",
-			 __func__, isci_device, domain_device);
-		ret = TMF_RESP_FUNC_FAILED;
+		ret = TMF_RESP_FUNC_COMPLETE;
 		goto out;
 	}
+	set_bit(IDEV_EH, &isci_device->flags);
 
 	/* Send the task management part of the reset. */
 	if (sas_protocol_ata(domain_device->tproto)) {
@@ -1013,7 +1037,7 @@
 	struct isci_tmf           tmf;
 	int                       ret = TMF_RESP_FUNC_FAILED;
 	unsigned long             flags;
-	bool                      any_dev_reset = false;
+	int                       perform_termination = 0;
 
 	/* Get the isci_request reference from the task.  Note that
 	 * this check does not depend on the pending request list
@@ -1035,89 +1059,34 @@
 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
 	dev_dbg(&isci_host->pdev->dev,
-		"%s: task = %p\n", __func__, task);
+		"%s: dev = %p, task = %p, old_request == %p\n",
+		__func__, isci_device, task, old_request);
 
-	if (!isci_device || !old_request)
-		goto out;
+	if (isci_device)
+		set_bit(IDEV_EH, &isci_device->flags);
 
-	set_bit(IDEV_EH, &isci_device->flags);
-
-	/* This version of the driver will fail abort requests for
-	 * SATA/STP.  Failing the abort request this way will cause the
-	 * SCSI error handler thread to escalate to LUN reset
+	/* Device reset conditions signalled in task_state_flags are the
+	 * responsbility of libsas to observe at the start of the error
+	 * handler thread.
 	 */
-	if (sas_protocol_ata(task->task_proto)) {
-		dev_dbg(&isci_host->pdev->dev,
-			    " task %p is for a STP/SATA device;"
-			    " returning TMF_RESP_FUNC_FAILED\n"
-			    " to cause a LUN reset...\n", task);
-		goto out;
-	}
-
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: old_request == %p\n", __func__, old_request);
-
-	any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-
-	any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
-
-	/* If the extraction of the request reference from the task
-	 * failed, then the request has been completed (or if there is a
-	 * pending reset then this abort request function must be failed
-	 * in order to escalate to the target reset).
-	 */
-	if ((old_request == NULL) || any_dev_reset) {
-
-		/* If the device reset task flag is set, fail the task
-		 * management request.  Otherwise, the original request
-		 * has completed.
-		 */
-		if (any_dev_reset) {
-
-			/* Turn off the task's DONE to make sure this
-			 * task is escalated to a target reset.
-			 */
-			task->task_state_flags &= ~SAS_TASK_STATE_DONE;
-
-			/* Make the reset happen as soon as possible. */
-			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
-
-			spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-			/* Fail the task management request in order to
-			 * escalate to the target reset.
-			 */
-			ret = TMF_RESP_FUNC_FAILED;
-
-			dev_dbg(&isci_host->pdev->dev,
-				"%s: Failing task abort in order to "
-				"escalate to target reset because\n"
-				"SAS_TASK_NEED_DEV_RESET is set for "
-				"task %p on dev %p\n",
-				__func__, task, isci_device);
-
-
-		} else {
-			/* The request has already completed and there
-			 * is nothing to do here other than to set the task
-			 * done bit, and indicate that the task abort function
-			 * was sucessful.
-			 */
-			isci_set_task_doneflags(task);
-
-			spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-			ret = TMF_RESP_FUNC_COMPLETE;
-
-			dev_dbg(&isci_host->pdev->dev,
-				"%s: abort task not needed for %p\n",
-				__func__, task);
-		}
-		goto out;
-	} else {
+	if (!isci_device || !old_request) {
+		/* The request has already completed and there
+		* is nothing to do here other than to set the task
+		* done bit, and indicate that the task abort function
+		* was sucessful.
+		*/
+		spin_lock_irqsave(&task->task_state_lock, flags);
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+					    SAS_TASK_STATE_PENDING);
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+		ret = TMF_RESP_FUNC_COMPLETE;
+
+		dev_dbg(&isci_host->pdev->dev,
+			"%s: abort task not needed for %p\n",
+			__func__, task);
+		goto out;
 	}
 
 	spin_lock_irqsave(&isci_host->scic_lock, flags);
@@ -1146,24 +1115,44 @@
 		goto out;
 	}
 	if (task->task_proto == SAS_PROTOCOL_SMP ||
+	    sas_protocol_ata(task->task_proto) ||
 	    test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
 
 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
 		dev_dbg(&isci_host->pdev->dev,
-			"%s: SMP request (%d)"
+			"%s: %s request"
 			" or complete_in_target (%d), thus no TMF\n",
-			__func__, (task->task_proto == SAS_PROTOCOL_SMP),
+			__func__,
+			((task->task_proto == SAS_PROTOCOL_SMP)
+				? "SMP"
+				: (sas_protocol_ata(task->task_proto)
+					? "SATA/STP"
+					: "<other>")
+			 ),
 			test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
 
-		/* Set the state on the task. */
-		isci_task_all_done(task);
+		if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+			spin_lock_irqsave(&task->task_state_lock, flags);
+			task->task_state_flags |= SAS_TASK_STATE_DONE;
+			task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+						    SAS_TASK_STATE_PENDING);
+			spin_unlock_irqrestore(&task->task_state_lock, flags);
+			ret = TMF_RESP_FUNC_COMPLETE;
+		} else {
+			spin_lock_irqsave(&task->task_state_lock, flags);
+			task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+						    SAS_TASK_STATE_PENDING);
+			spin_unlock_irqrestore(&task->task_state_lock, flags);
+		}
 
-		ret = TMF_RESP_FUNC_COMPLETE;
-
-		/* Stopping and SMP devices are not sent a TMF, and are not
-		 * reset, but the outstanding I/O request is terminated below.
+		/* STP and SMP devices are not sent a TMF, but the
+		 * outstanding I/O request is terminated below.  This is
+		 * because SATA/STP and SMP discovery path timeouts directly
+		 * call the abort task interface for cleanup.
 		 */
+		perform_termination = 1;
+
 	} else {
 		/* Fill in the tmf stucture */
 		isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
@@ -1172,22 +1161,24 @@
 
 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
-		#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+		#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
 		ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
 					    ISCI_ABORT_TASK_TIMEOUT_MS);
 
-		if (ret != TMF_RESP_FUNC_COMPLETE)
+		if (ret == TMF_RESP_FUNC_COMPLETE)
+			perform_termination = 1;
+		else
 			dev_dbg(&isci_host->pdev->dev,
-				"%s: isci_task_send_tmf failed\n",
-				__func__);
+				"%s: isci_task_send_tmf failed\n", __func__);
 	}
-	if (ret == TMF_RESP_FUNC_COMPLETE) {
+	if (perform_termination) {
 		set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
 
 		/* Clean up the request on our side, and wait for the aborted
 		 * I/O to complete.
 		 */
-		isci_terminate_request_core(isci_host, isci_device, old_request);
+		isci_terminate_request_core(isci_host, isci_device,
+					    old_request);
 	}
 
 	/* Make sure we do not leave a reference to aborted_io_completion */
@@ -1288,7 +1279,8 @@
 			   enum sci_task_status completion_status)
 {
 	struct isci_tmf *tmf = isci_request_access_tmf(ireq);
-	struct completion *tmf_complete;
+	struct completion *tmf_complete = NULL;
+	struct completion *request_complete = ireq->io_request_completion;
 
 	dev_dbg(&ihost->pdev->dev,
 		"%s: request = %p, status=%d\n",
@@ -1296,255 +1288,53 @@
 
 	isci_request_change_state(ireq, completed);
 
-	tmf->status = completion_status;
 	set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
 
-	if (tmf->proto == SAS_PROTOCOL_SSP) {
-		memcpy(&tmf->resp.resp_iu,
-		       &ireq->ssp.rsp,
-		       SSP_RESP_IU_MAX_SIZE);
-	} else if (tmf->proto == SAS_PROTOCOL_SATA) {
-		memcpy(&tmf->resp.d2h_fis,
-		       &ireq->stp.rsp,
-		       sizeof(struct dev_to_host_fis));
+	if (tmf) {
+		tmf->status = completion_status;
+
+		if (tmf->proto == SAS_PROTOCOL_SSP) {
+			memcpy(&tmf->resp.resp_iu,
+			       &ireq->ssp.rsp,
+			       SSP_RESP_IU_MAX_SIZE);
+		} else if (tmf->proto == SAS_PROTOCOL_SATA) {
+			memcpy(&tmf->resp.d2h_fis,
+			       &ireq->stp.rsp,
+			       sizeof(struct dev_to_host_fis));
+		}
+		/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+		tmf_complete = tmf->complete;
 	}
-
-	/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
-	tmf_complete = tmf->complete;
-
 	sci_controller_complete_io(ihost, ireq->target_device, ireq);
 	/* set the 'terminated' flag handle to make sure it cannot be terminated
 	 *  or completed again.
 	 */
 	set_bit(IREQ_TERMINATED, &ireq->flags);
 
-	isci_request_change_state(ireq, unallocated);
-	list_del_init(&ireq->dev_node);
+	/* As soon as something is in the terminate path, deallocation is
+	 * managed there.  Note that the final non-managed state of a task
+	 * request is "completed".
+	 */
+	if ((ireq->status == completed) ||
+	    !isci_request_is_dealloc_managed(ireq->status)) {
+		isci_request_change_state(ireq, unallocated);
+		isci_free_tag(ihost, ireq->io_tag);
+		list_del_init(&ireq->dev_node);
+	}
+
+	/* "request_complete" is set if the task was being terminated. */
+	if (request_complete)
+		complete(request_complete);
 
 	/* The task management part completes last. */
-	complete(tmf_complete);
-}
-
-static void isci_smp_task_timedout(unsigned long _task)
-{
-	struct sas_task *task = (void *) _task;
-	unsigned long flags;
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
-		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	complete(&task->completion);
-}
-
-static void isci_smp_task_done(struct sas_task *task)
-{
-	if (!del_timer(&task->timer))
-		return;
-	complete(&task->completion);
-}
-
-static int isci_smp_execute_task(struct isci_host *ihost,
-				 struct domain_device *dev, void *req,
-				 int req_size, void *resp, int resp_size)
-{
-	int res, retry;
-	struct sas_task *task = NULL;
-
-	for (retry = 0; retry < 3; retry++) {
-		task = sas_alloc_task(GFP_KERNEL);
-		if (!task)
-			return -ENOMEM;
-
-		task->dev = dev;
-		task->task_proto = dev->tproto;
-		sg_init_one(&task->smp_task.smp_req, req, req_size);
-		sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
-
-		task->task_done = isci_smp_task_done;
-
-		task->timer.data = (unsigned long) task;
-		task->timer.function = isci_smp_task_timedout;
-		task->timer.expires = jiffies + 10*HZ;
-		add_timer(&task->timer);
-
-		res = isci_task_execute_task(task, 1, GFP_KERNEL);
-
-		if (res) {
-			del_timer(&task->timer);
-			dev_dbg(&ihost->pdev->dev,
-				"%s: executing SMP task failed:%d\n",
-				__func__, res);
-			goto ex_err;
-		}
-
-		wait_for_completion(&task->completion);
-		res = -ECOMM;
-		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-			dev_dbg(&ihost->pdev->dev,
-				"%s: smp task timed out or aborted\n",
-				__func__);
-			isci_task_abort_task(task);
-			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-				dev_dbg(&ihost->pdev->dev,
-					"%s: SMP task aborted and not done\n",
-					__func__);
-				goto ex_err;
-			}
-		}
-		if (task->task_status.resp == SAS_TASK_COMPLETE &&
-		    task->task_status.stat == SAM_STAT_GOOD) {
-			res = 0;
-			break;
-		}
-		if (task->task_status.resp == SAS_TASK_COMPLETE &&
-		      task->task_status.stat == SAS_DATA_UNDERRUN) {
-			/* no error, but return the number of bytes of
-			* underrun */
-			res = task->task_status.residual;
-			break;
-		}
-		if (task->task_status.resp == SAS_TASK_COMPLETE &&
-		      task->task_status.stat == SAS_DATA_OVERRUN) {
-			res = -EMSGSIZE;
-			break;
-		} else {
-			dev_dbg(&ihost->pdev->dev,
-				"%s: task to dev %016llx response: 0x%x "
-				"status 0x%x\n", __func__,
-				SAS_ADDR(dev->sas_addr),
-				task->task_status.resp,
-				task->task_status.stat);
-			sas_free_task(task);
-			task = NULL;
-		}
-	}
-ex_err:
-	BUG_ON(retry == 3 && task != NULL);
-	sas_free_task(task);
-	return res;
-}
-
-#define DISCOVER_REQ_SIZE  16
-#define DISCOVER_RESP_SIZE 56
-
-int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
-				       struct domain_device *dev,
-				       int phy_id, int *adt)
-{
-	struct smp_resp *disc_resp;
-	u8 *disc_req;
-	int res;
-
-	disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
-	if (!disc_resp)
-		return -ENOMEM;
-
-	disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
-	if (disc_req) {
-		disc_req[0] = SMP_REQUEST;
-		disc_req[1] = SMP_DISCOVER;
-		disc_req[9] = phy_id;
-	} else {
-		kfree(disc_resp);
-		return -ENOMEM;
-	}
-	res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
-				    disc_resp, DISCOVER_RESP_SIZE);
-	if (!res) {
-		if (disc_resp->result != SMP_RESP_FUNC_ACC)
-			res = disc_resp->result;
-		else
-			*adt = disc_resp->disc.attached_dev_type;
-	}
-	kfree(disc_req);
-	kfree(disc_resp);
-
-	return res;
-}
-
-static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
-{
-	struct domain_device *dev = idev->domain_dev;
-	struct isci_port *iport = idev->isci_port;
-	struct isci_host *ihost = iport->isci_host;
-	int res, iteration = 0, attached_device_type;
-	#define STP_WAIT_MSECS 25000
-	unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
-	unsigned long deadline = jiffies + tmo;
-	enum {
-		SMP_PHYWAIT_PHYDOWN,
-		SMP_PHYWAIT_PHYUP,
-		SMP_PHYWAIT_DONE
-	} phy_state = SMP_PHYWAIT_PHYDOWN;
-
-	/* While there is time, wait for the phy to go away and come back */
-	while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
-		int event = atomic_read(&iport->event);
-
-		++iteration;
-
-		tmo = wait_event_timeout(ihost->eventq,
-					 event != atomic_read(&iport->event) ||
-					 !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
-					 tmo);
-		/* link down, stop polling */
-		if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
-			break;
-
-		dev_dbg(&ihost->pdev->dev,
-			"%s: iport %p, iteration %d,"
-			" phase %d: time_remaining %lu, bcns = %d\n",
-			__func__, iport, iteration, phy_state,
-			tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
-
-		res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
-							 &attached_device_type);
-		tmo = deadline - jiffies;
-
-		if (res) {
-			dev_dbg(&ihost->pdev->dev,
-				 "%s: iteration %d, phase %d:"
-				 " SMP error=%d, time_remaining=%lu\n",
-				 __func__, iteration, phy_state, res, tmo);
-			break;
-		}
-		dev_dbg(&ihost->pdev->dev,
-			"%s: iport %p, iteration %d,"
-			" phase %d: time_remaining %lu, bcns = %d, "
-			"attdevtype = %x\n",
-			__func__, iport, iteration, phy_state,
-			tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
-			attached_device_type);
-
-		switch (phy_state) {
-		case SMP_PHYWAIT_PHYDOWN:
-			/* Has the device gone away? */
-			if (!attached_device_type)
-				phy_state = SMP_PHYWAIT_PHYUP;
-
-			break;
-
-		case SMP_PHYWAIT_PHYUP:
-			/* Has the device come back? */
-			if (attached_device_type)
-				phy_state = SMP_PHYWAIT_DONE;
-			break;
-
-		case SMP_PHYWAIT_DONE:
-			break;
-		}
-
-	}
-	dev_dbg(&ihost->pdev->dev, "%s: done\n",  __func__);
+	if (tmf_complete)
+		complete(tmf_complete);
 }
 
 static int isci_reset_device(struct isci_host *ihost,
 			     struct isci_remote_device *idev)
 {
 	struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
-	struct isci_port *iport = idev->isci_port;
 	enum sci_status status;
 	unsigned long flags;
 	int rc;
@@ -1564,13 +1354,6 @@
 	}
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-	/* Make sure all pending requests are able to be fully terminated. */
-	isci_device_clear_reset_pending(ihost, idev);
-
-	/* If this is a device on an expander, disable BCN processing. */
-	if (!scsi_is_sas_phy_local(phy))
-		set_bit(IPORT_BCN_BLOCKED, &iport->flags);
-
 	rc = sas_phy_reset(phy, true);
 
 	/* Terminate in-progress I/O now. */
@@ -1581,21 +1364,6 @@
 	status = sci_remote_device_reset_complete(idev);
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-	/* If this is a device on an expander, bring the phy back up. */
-	if (!scsi_is_sas_phy_local(phy)) {
-		/* A phy reset will cause the device to go away then reappear.
-		 * Since libsas will take action on incoming BCNs (eg. remove
-		 * a device going through an SMP phy-control driven reset),
-		 * we need to wait until the phy comes back up before letting
-		 * discovery proceed in libsas.
-		 */
-		isci_wait_for_smp_phy_reset(idev, phy->number);
-
-		spin_lock_irqsave(&ihost->scic_lock, flags);
-		isci_port_bcn_enable(ihost, idev->isci_port);
-		spin_unlock_irqrestore(&ihost->scic_lock, flags);
-	}
-
 	if (status != SCI_SUCCESS) {
 		dev_dbg(&ihost->pdev->dev,
 			 "%s: sci_remote_device_reset_complete(%p) "
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 15b18d1..bc78c0a 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -58,6 +58,8 @@
 #include <scsi/sas_ata.h>
 #include "host.h"
 
+#define ISCI_TERMINATION_TIMEOUT_MSEC 500
+
 struct isci_request;
 
 /**
@@ -224,35 +226,6 @@
 	isci_perform_error_io_completion        /* Use sas_task_abort */
 };
 
-static inline void isci_set_task_doneflags(
-	struct sas_task *task)
-{
-	/* Since no futher action will be taken on this task,
-	 * make sure to mark it complete from the lldd perspective.
-	 */
-	task->task_state_flags |= SAS_TASK_STATE_DONE;
-	task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
-	task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
-}
-/**
- * isci_task_all_done() - This function clears the task bits to indicate the
- *    LLDD is done with the task.
- *
- *
- */
-static inline void isci_task_all_done(
-	struct sas_task *task)
-{
-	unsigned long flags;
-
-	/* Since no futher action will be taken on this task,
-	 * make sure to mark it complete from the lldd perspective.
-	 */
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	isci_set_task_doneflags(task);
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
 /**
  * isci_task_set_completion_status() - This function sets the completion status
  *    for the request.
@@ -334,7 +307,9 @@
 		/* Fall through to the normal case... */
 	case isci_perform_normal_io_completion:
 		/* Normal notification (task_done) */
-		isci_set_task_doneflags(task);
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+					    SAS_TASK_STATE_PENDING);
 		break;
 	default:
 		WARN_ONCE(1, "unknown task_notification_selection: %d\n",
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 7c055fd..1b22130 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -469,6 +469,7 @@
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	int error;
 	u32 f_ctl;
+	u8 fh_type = fh->fh_type;
 
 	ep = fc_seq_exch(sp);
 	WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
@@ -493,7 +494,7 @@
 	 */
 	error = lport->tt.frame_send(lport, fp);
 
-	if (fh->fh_type == FC_TYPE_BLS)
+	if (fh_type == FC_TYPE_BLS)
 		return error;
 
 	/*
@@ -1792,6 +1793,9 @@
 			goto restart;
 		}
 	}
+	pool->next_index = 0;
+	pool->left = FC_XID_UNKNOWN;
+	pool->right = FC_XID_UNKNOWN;
 	spin_unlock_bh(&pool->lock);
 }
 
@@ -2280,6 +2284,7 @@
 		goto free_mempool;
 	for_each_possible_cpu(cpu) {
 		pool = per_cpu_ptr(mp->pool, cpu);
+		pool->next_index = 0;
 		pool->left = FC_XID_UNKNOWN;
 		pool->right = FC_XID_UNKNOWN;
 		spin_lock_init(&pool->lock);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 628f347..2cb12b9 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1030,16 +1030,8 @@
 			   FCH_EVT_LIPRESET, 0);
 	fc_vports_linkchange(lport);
 	fc_lport_reset_locked(lport);
-	if (lport->link_up) {
-		/*
-		 * Wait upto resource allocation time out before
-		 * doing re-login since incomplete FIP exchanged
-		 * from last session may collide with exchanges
-		 * in new session.
-		 */
-		msleep(lport->r_a_tov);
+	if (lport->link_up)
 		fc_lport_enter_flogi(lport);
-	}
 }
 
 /**
@@ -1481,6 +1473,7 @@
 			 void *lp_arg)
 {
 	struct fc_lport *lport = lp_arg;
+	struct fc_frame_header *fh;
 	struct fc_els_flogi *flp;
 	u32 did;
 	u16 csp_flags;
@@ -1508,49 +1501,56 @@
 		goto err;
 	}
 
+	fh = fc_frame_header_get(fp);
 	did = fc_frame_did(fp);
-	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
-		flp = fc_frame_payload_get(fp, sizeof(*flp));
-		if (flp) {
-			mfs = ntohs(flp->fl_csp.sp_bb_data) &
-				FC_SP_BB_DATA_MASK;
-			if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
-			    mfs < lport->mfs)
-				lport->mfs = mfs;
-			csp_flags = ntohs(flp->fl_csp.sp_features);
-			r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
-			e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
-			if (csp_flags & FC_SP_FT_EDTR)
-				e_d_tov /= 1000000;
-
-			lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
-
-			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
-				if (e_d_tov > lport->e_d_tov)
-					lport->e_d_tov = e_d_tov;
-				lport->r_a_tov = 2 * e_d_tov;
-				fc_lport_set_port_id(lport, did, fp);
-				printk(KERN_INFO "host%d: libfc: "
-				       "Port (%6.6x) entered "
-				       "point-to-point mode\n",
-				       lport->host->host_no, did);
-				fc_lport_ptp_setup(lport, fc_frame_sid(fp),
-						   get_unaligned_be64(
-							   &flp->fl_wwpn),
-						   get_unaligned_be64(
-							   &flp->fl_wwnn));
-			} else {
-				lport->e_d_tov = e_d_tov;
-				lport->r_a_tov = r_a_tov;
-				fc_host_fabric_name(lport->host) =
-					get_unaligned_be64(&flp->fl_wwnn);
-				fc_lport_set_port_id(lport, did, fp);
-				fc_lport_enter_dns(lport);
-			}
-		}
-	} else {
-		FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
+	if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
+	    fc_frame_payload_op(fp) != ELS_LS_ACC) {
+		FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
 		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	flp = fc_frame_payload_get(fp, sizeof(*flp));
+	if (!flp) {
+		FC_LPORT_DBG(lport, "FLOGI bad response\n");
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	mfs = ntohs(flp->fl_csp.sp_bb_data) &
+		FC_SP_BB_DATA_MASK;
+	if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
+	    mfs < lport->mfs)
+		lport->mfs = mfs;
+	csp_flags = ntohs(flp->fl_csp.sp_features);
+	r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+	e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+	if (csp_flags & FC_SP_FT_EDTR)
+		e_d_tov /= 1000000;
+
+	lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
+
+	if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+		if (e_d_tov > lport->e_d_tov)
+			lport->e_d_tov = e_d_tov;
+		lport->r_a_tov = 2 * e_d_tov;
+		fc_lport_set_port_id(lport, did, fp);
+		printk(KERN_INFO "host%d: libfc: "
+		       "Port (%6.6x) entered "
+		       "point-to-point mode\n",
+		       lport->host->host_no, did);
+		fc_lport_ptp_setup(lport, fc_frame_sid(fp),
+				   get_unaligned_be64(
+					   &flp->fl_wwpn),
+				   get_unaligned_be64(
+					   &flp->fl_wwnn));
+	} else {
+		lport->e_d_tov = e_d_tov;
+		lport->r_a_tov = r_a_tov;
+		fc_host_fabric_name(lport->host) =
+			get_unaligned_be64(&flp->fl_wwnn);
+		fc_lport_set_port_id(lport, did, fp);
+		fc_lport_enter_dns(lport);
 	}
 
 out:
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 3105d5e..8dc1b32 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2011 LSI Corporation.
  *
  *
  *           Name:  mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.18
+ *  mpi2.h Version:  02.00.20
  *
  *  Version History
  *  ---------------
@@ -66,6 +66,9 @@
  *  08-11-10  02.00.17  Bumped MPI2_HEADER_VERSION_UNIT.
  *  11-10-10  02.00.18  Bumped MPI2_HEADER_VERSION_UNIT.
  *                      Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ *  02-23-11  02.00.19  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                      Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ *  03-09-11  02.00.20  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -91,7 +94,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x12)
+#define MPI2_HEADER_VERSION_UNIT            (0x14)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -515,6 +518,8 @@
 #define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION   (0x2F)
 /* Power Management Control */
 #define MPI2_FUNCTION_PWR_MGMT_CONTROL              (0x30)
+/* Send Host Message */
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE             (0x31)
 /* beginning of product-specific range */
 #define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC          (0xF0)
 /* end of product-specific range */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 61475a6..cfd95b4 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2011 LSI Corporation.
  *
  *
  *           Name:  mpi2_cnfg.h
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.17
+ *    mpi2_cnfg.h Version:  02.00.19
  *
  *  Version History
  *  ---------------
@@ -134,6 +134,12 @@
  *                      to MPI2_CONFIG_PAGE_IO_UNIT_7.
  *                      Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
  *                      and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ *  02-23-11  02.00.18  Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ *                      Added IO Unit Page 8, IO Unit Page 9,
+ *                      and IO Unit Page 10.
+ *                      Added SASNotifyPrimitiveMasks field to
+ *                      MPI2_CONFIG_PAGE_IOC_7.
+ *  03-09-11  02.00.19  Fixed IO Unit Page 10 (to match the spec).
  *  --------------------------------------------------------------------------
  */
 
@@ -329,7 +335,9 @@
     U8                      VP_ID;                      /* 0x08 */
     U8                      VF_ID;                      /* 0x09 */
     U16                     Reserved1;                  /* 0x0A */
-    U32                     Reserved2;                  /* 0x0C */
+	U8                      Reserved2;                  /* 0x0C */
+	U8                      ProxyVF_ID;                 /* 0x0D */
+	U16                     Reserved4;                  /* 0x0E */
     U32                     Reserved3;                  /* 0x10 */
     MPI2_CONFIG_PAGE_HEADER Header;                     /* 0x14 */
     U32                     PageAddress;                /* 0x18 */
@@ -915,6 +923,120 @@
 #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT      (0x01)
 #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS         (0x02)
 
+/* IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS     (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+	U16                     Flags;                /* 0x00 */
+	U16                     Reserved1;            /* 0x02 */
+	U16
+		Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */
+	U32                     Reserved2;            /* 0x0C */
+	U32                     Reserved3;            /* 0x10 */
+	U32                     Reserved4;            /* 0x14 */
+} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR,
+Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t;
+
+/* defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE         (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE         (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE         (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE         (0x0001)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES     (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+	MPI2_CONFIG_PAGE_HEADER Header;               /* 0x00 */
+	U32                     Reserved1;            /* 0x04 */
+	U32                     Reserved2;            /* 0x08 */
+	U8                      NumSensors;           /* 0x0C */
+	U8                      PollingInterval;      /* 0x0D */
+	U16                     Reserved3;            /* 0x0E */
+	MPI2_IOUNIT8_SENSOR
+			Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION                    (0x00)
+
+
+/* IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+	U16                     CurrentTemperature;     /* 0x00 */
+	U16                     Reserved1;              /* 0x02 */
+	U8                      Flags;                  /* 0x04 */
+	U8                      Reserved2;              /* 0x05 */
+	U16                     Reserved3;              /* 0x06 */
+	U32                     Reserved4;              /* 0x08 */
+	U32                     Reserved5;              /* 0x0C */
+} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR,
+Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t;
+
+/* defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID        (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES     (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+	MPI2_CONFIG_PAGE_HEADER Header;                /* 0x00 */
+	U32                     Reserved1;             /* 0x04 */
+	U32                     Reserved2;             /* 0x08 */
+	U8                      NumSensors;            /* 0x0C */
+	U8                      Reserved4;             /* 0x0D */
+	U16                     Reserved3;             /* 0x0E */
+	MPI2_IOUNIT9_SENSOR
+			Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION                    (0x00)
+
+
+/* IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+	U8                      CreditPercent;      /* 0x00 */
+	U8                      Reserved1;          /* 0x01 */
+	U16                     Reserved2;          /* 0x02 */
+} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION,
+Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES      (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+	MPI2_CONFIG_PAGE_HEADER Header;                    /* 0x00 */
+	U8                      NumFunctions;             /* 0x04 */
+	U8                      Reserved1;              /* 0x05 */
+	U16                     Reserved2;              /* 0x06 */
+	U32                     Reserved3;              /* 0x08 */
+	U32                     Reserved4;		/* 0x0C */
+	MPI2_IOUNIT10_FUNCTION
+		Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION                   (0x01)
+
 
 
 /****************************************************************************
@@ -1022,12 +1144,12 @@
     U32                     Reserved1;                  /* 0x04 */
     U32                     EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */
     U16                     SASBroadcastPrimitiveMasks; /* 0x18 */
-    U16                     Reserved2;                  /* 0x1A */
+	U16                     SASNotifyPrimitiveMasks;    /* 0x1A */
     U32                     Reserved3;                  /* 0x1C */
 } MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7,
   Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t;
 
-#define MPI2_IOCPAGE7_PAGEVERSION                       (0x01)
+#define MPI2_IOCPAGE7_PAGEVERSION                       (0x02)
 
 
 /* IOC Page 8 */
@@ -2070,16 +2192,16 @@
 #define MPI2_SASIOUNITPAGE8_PAGEVERSION     (0x00)
 
 /* defines for PowerManagementCapabilities field */
-#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD          (0x000001000)
-#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE        (0x000000800)
-#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE        (0x000000400)
-#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE       (0x000000200)
-#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE       (0x000000100)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD        (0x000000010)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE      (0x000000008)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE      (0x000000004)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE     (0x000000002)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE     (0x000000001)
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD          (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE        (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE        (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE       (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE       (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD        (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE      (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE      (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE     (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE     (0x00000001)
 
 
 
@@ -2266,6 +2388,7 @@
 /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
 
 /* values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE          (0x8000)
 #define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE           (0x1000)
 #define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE           (0x0800)
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY     (0x0400)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 1f0c190..93d9b69 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2011 LSI Corporation.
  *
  *
  *           Name:  mpi2_ioc.h
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.16
+ *  mpi2_ioc.h Version:  02.00.17
  *
  *  Version History
  *  ---------------
@@ -104,6 +104,12 @@
  *  05-12-10  02.00.15  Marked Task Set Full Event as obsolete.
  *                      Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
  *  11-10-10  02.00.16  Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ *  02-23-11  02.00.17  Added SAS NOTIFY Primitive event, and added
+ *                      SASNotifyPrimitiveMasks field to
+ *                      MPI2_EVENT_NOTIFICATION_REQUEST.
+ *                      Added Temperature Threshold Event.
+ *                      Added Host Message Event.
+ *                      Added Send Host Message request and reply.
  *  --------------------------------------------------------------------------
  */
 
@@ -421,7 +427,7 @@
     U32                     Reserved6;                      /* 0x10 */
     U32                     EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */
     U16                     SASBroadcastPrimitiveMasks;     /* 0x24 */
-    U16                     Reserved7;                      /* 0x26 */
+	 U16                     SASNotifyPrimitiveMasks;        /* 0x26 */
     U32                     Reserved8;                      /* 0x28 */
 } MPI2_EVENT_NOTIFICATION_REQUEST,
   MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
@@ -476,6 +482,9 @@
 #define MPI2_EVENT_GPIO_INTERRUPT                   (0x0023)
 #define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY         (0x0024)
 #define MPI2_EVENT_SAS_QUIESCE                      (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE             (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD                   (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE                     (0x0028)
 
 
 /* Log Entry Added Event data */
@@ -507,6 +516,39 @@
   MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
   Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t;
 
+/* Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+	U16         Status;                             /* 0x00 */
+	U8          SensorNum;                          /* 0x02 */
+	U8          Reserved1;                          /* 0x03 */
+	U16         CurrentTemperature;                 /* 0x04 */
+	U16         Reserved2;                          /* 0x06 */
+	U32         Reserved3;                          /* 0x08 */
+	U32         Reserved4;                          /* 0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE,
+Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t;
+
+/* Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED            (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED            (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED            (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED            (0x0001)
+
+
+/* Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+	U8          SourceVF_ID;                        /* 0x00 */
+	U8          Reserved1;                          /* 0x01 */
+	U16         Reserved2;                          /* 0x02 */
+	U32         Reserved3;                          /* 0x04 */
+	U32         HostData[1];                        /* 0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t;
+
+
 /* Hard Reset Received Event data */
 
 typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
@@ -749,6 +791,24 @@
 #define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED               (0x07)
 #define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED               (0x08)
 
+/* SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+	U8                      PhyNum;                     /* 0x00 */
+	U8                      Port;                       /* 0x01 */
+	U8                      Reserved1;                  /* 0x02 */
+	U8                      Primitive;                  /* 0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+Mpi2EventDataSasNotifyPrimitive_t,
+MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t;
+
+/* defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP                     (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED               (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1                         (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2                         (0x04)
+
 
 /* SAS Initiator Device Status Change Event data */
 
@@ -1001,6 +1061,53 @@
 
 
 /****************************************************************************
+*  SendHostMessage message
+****************************************************************************/
+
+/* SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+	U16                     HostDataLength;                 /* 0x00 */
+	U8                      ChainOffset;                    /* 0x02 */
+	U8                      Function;                       /* 0x03 */
+	U16                     Reserved1;                      /* 0x04 */
+	U8                      Reserved2;                      /* 0x06 */
+	U8                      MsgFlags;                       /* 0x07 */
+	U8                      VP_ID;                          /* 0x08 */
+	U8                      VF_ID;                          /* 0x09 */
+	U16                     Reserved3;                      /* 0x0A */
+	U8                      Reserved4;                      /* 0x0C */
+	U8                      DestVF_ID;                      /* 0x0D */
+	U16                     Reserved5;                      /* 0x0E */
+	U32                     Reserved6;                      /* 0x10 */
+	U32                     Reserved7;                      /* 0x14 */
+	U32                     Reserved8;                      /* 0x18 */
+	U32                     Reserved9;                      /* 0x1C */
+	U32                     Reserved10;                     /* 0x20 */
+	U32                     HostData[1];                    /* 0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t;
+
+
+/* SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+	U16                     HostDataLength;                 /* 0x00 */
+	U8                      MsgLength;                      /* 0x02 */
+	U8                      Function;                       /* 0x03 */
+	U16                     Reserved1;                      /* 0x04 */
+	U8                      Reserved2;                      /* 0x06 */
+	U8                      MsgFlags;                       /* 0x07 */
+	U8                      VP_ID;                          /* 0x08 */
+	U8                      VF_ID;                          /* 0x09 */
+	U16                     Reserved3;                      /* 0x0A */
+	U16                     Reserved4;                      /* 0x0C */
+	U16                     IOCStatus;                      /* 0x0E */
+	U32                     IOCLogInfo;                     /* 0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t;
+
+
+/****************************************************************************
 *  FWDownload message
 ****************************************************************************/
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 81209ca..beda04a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -81,6 +81,15 @@
 module_param_array(missing_delay, int, NULL, 0);
 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
 
+static int mpt2sas_fwfault_debug;
+MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+	"and halt firmware - (default=0)");
+
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+
 /* diag_buffer_enable is bitwise
  * bit 0 set = TRACE
  * bit 1 set = SNAPSHOT
@@ -93,14 +102,6 @@
 MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
     "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
 
-static int mpt2sas_fwfault_debug;
-MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
-    "and halt firmware - (default=0)");
-
-static int disable_discovery = -1;
-module_param(disable_discovery, int, 0);
-MODULE_PARM_DESC(disable_discovery, " disable discovery ");
-
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  *
@@ -691,6 +692,7 @@
 		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
 	}
 	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
+
 	complete(&ioc->base_cmds.done);
 	return 1;
 }
@@ -3470,6 +3472,58 @@
 }
 
 /**
+ * mpt2sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ *        0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+	u32 reply)
+{
+	MPI2DefaultReply_t *mpi_reply;
+	u16 ioc_status;
+
+	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+		return 1;
+
+	if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
+		return 1;
+
+	ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
+	if (mpi_reply) {
+		ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
+		memcpy(ioc->port_enable_cmds.reply, mpi_reply,
+		    mpi_reply->MsgLength*4);
+	}
+	ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
+
+	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+		ioc->port_enable_failed = 1;
+
+	if (ioc->is_driver_loading) {
+		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+			mpt2sas_port_enable_complete(ioc);
+			return 1;
+		} else {
+			ioc->start_scan_failed = ioc_status;
+			ioc->start_scan = 0;
+			return 1;
+		}
+	}
+	complete(&ioc->port_enable_cmds.done);
+	return 1;
+}
+
+
+/**
  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
  * @ioc: per adapter object
  * @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3480,67 +3534,151 @@
 _base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 {
 	Mpi2PortEnableRequest_t *mpi_request;
-	u32 ioc_state;
+	Mpi2PortEnableReply_t *mpi_reply;
 	unsigned long timeleft;
 	int r = 0;
 	u16 smid;
+	u16 ioc_status;
 
 	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
 
-	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
+	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
 		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
 		    ioc->name, __func__);
 		return -EAGAIN;
 	}
 
-	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
+	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
 	if (!smid) {
 		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
 		    ioc->name, __func__);
 		return -EAGAIN;
 	}
 
-	ioc->base_cmds.status = MPT2_CMD_PENDING;
+	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
 	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
-	ioc->base_cmds.smid = smid;
+	ioc->port_enable_cmds.smid = smid;
 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
-	mpi_request->VF_ID = 0; /* TODO */
-	mpi_request->VP_ID = 0;
 
+	init_completion(&ioc->port_enable_cmds.done);
 	mpt2sas_base_put_smid_default(ioc, smid);
-	init_completion(&ioc->base_cmds.done);
-	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
 	    300*HZ);
-	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
+	if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
 		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
 		    ioc->name, __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2PortEnableRequest_t)/4);
-		if (ioc->base_cmds.status & MPT2_CMD_RESET)
+		if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
 			r = -EFAULT;
 		else
 			r = -ETIME;
 		goto out;
-	} else
-		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
-		    ioc->name, __func__));
+	}
+	mpi_reply = ioc->port_enable_cmds.reply;
 
-	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL,
-	    60, sleep_flag);
-	if (ioc_state) {
-		printk(MPT2SAS_ERR_FMT "%s: failed going to operational state "
-		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
+	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+		printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
+		    ioc->name, __func__, ioc_status);
 		r = -EFAULT;
+		goto out;
 	}
  out:
-	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
-	printk(MPT2SAS_INFO_FMT "port enable: %s\n",
-	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+	printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+	    "SUCCESS" : "FAILED"));
 	return r;
 }
 
 /**
+ * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
+{
+	Mpi2PortEnableRequest_t *mpi_request;
+	u16 smid;
+
+	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
+
+	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
+		    ioc->name, __func__);
+		return -EAGAIN;
+	}
+
+	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+	if (!smid) {
+		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+		    ioc->name, __func__);
+		return -EAGAIN;
+	}
+
+	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
+	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+	ioc->port_enable_cmds.smid = smid;
+	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+	mpt2sas_base_put_smid_default(ioc, smid);
+	return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
+{
+	/* We wait for discovery to complete if IR firmware is loaded.
+	 * The sas topology events arrive before PD events, so we need time to
+	 * turn on the bit in ioc->pd_handles to indicate PD
+	 * Also, it maybe required to report Volumes ahead of physical
+	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+	 */
+	if (ioc->ir_firmware)
+		return 1;
+
+	/* if no Bios, then we don't need to wait */
+	if (!ioc->bios_pg3.BiosVersion)
+		return 0;
+
+	/* Bios is present, then we drop down here.
+	 *
+	 * If there any entries in the Bios Page 2, then we wait
+	 * for discovery to complete.
+	 */
+
+	/* Current Boot Device */
+	if ((ioc->bios_pg2.CurrentBootDeviceForm &
+	    MPI2_BIOSPAGE2_FORM_MASK) ==
+	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+	/* Request Boot Device */
+	   (ioc->bios_pg2.ReqBootDeviceForm &
+	    MPI2_BIOSPAGE2_FORM_MASK) ==
+	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+	/* Alternate Request Boot Device */
+	   (ioc->bios_pg2.ReqAltBootDeviceForm &
+	    MPI2_BIOSPAGE2_FORM_MASK) ==
+	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+		return 0;
+
+	return 1;
+}
+
+
+/**
  * _base_unmask_events - turn on notification for this event
  * @ioc: per adapter object
  * @event: firmware event
@@ -3962,6 +4100,7 @@
  skip_init_reply_post_host_index:
 
 	_base_unmask_interrupts(ioc);
+
 	r = _base_event_notification(ioc, sleep_flag);
 	if (r)
 		return r;
@@ -3969,7 +4108,18 @@
 	if (sleep_flag == CAN_SLEEP)
 		_base_static_config_pages(ioc);
 
-	if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) {
+
+	if (ioc->is_driver_loading) {
+
+
+
+		ioc->wait_for_discovery_to_complete =
+		    _base_determine_wait_on_discovery(ioc);
+		return r; /* scan_start and scan_finished support */
+	}
+
+
+	if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
 		if (ioc->manu_pg10.OEMIdentifier  == 0x80) {
 			hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
 			    MFG_PAGE10_HIDE_SSDS_MASK);
@@ -3978,13 +4128,6 @@
 		}
 	}
 
-	if (ioc->wait_for_port_enable_to_complete) {
-		if (diag_buffer_enable != 0)
-			mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
-		if (disable_discovery > 0)
-			return r;
-	}
-
 	r = _base_send_port_enable(ioc, sleep_flag);
 	if (r)
 		return r;
@@ -4121,6 +4264,10 @@
 	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
 	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
 
+	/* port_enable command bits */
+	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+
 	/* transport internal command bits */
 	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
 	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
@@ -4162,8 +4309,6 @@
 		goto out_free_resources;
 	}
 
-	init_completion(&ioc->shost_recovery_done);
-
 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 		ioc->event_masks[i] = -1;
 
@@ -4186,7 +4331,6 @@
 		_base_update_missing_delay(ioc, missing_delay[0],
 		    missing_delay[1]);
 
-	mpt2sas_base_start_watchdog(ioc);
 	return 0;
 
  out_free_resources:
@@ -4204,6 +4348,7 @@
 	kfree(ioc->scsih_cmds.reply);
 	kfree(ioc->config_cmds.reply);
 	kfree(ioc->base_cmds.reply);
+	kfree(ioc->port_enable_cmds.reply);
 	kfree(ioc->ctl_cmds.reply);
 	kfree(ioc->ctl_cmds.sense);
 	kfree(ioc->pfacts);
@@ -4243,6 +4388,7 @@
 	kfree(ioc->ctl_cmds.reply);
 	kfree(ioc->ctl_cmds.sense);
 	kfree(ioc->base_cmds.reply);
+	kfree(ioc->port_enable_cmds.reply);
 	kfree(ioc->tm_cmds.reply);
 	kfree(ioc->transport_cmds.reply);
 	kfree(ioc->scsih_cmds.reply);
@@ -4284,6 +4430,20 @@
 			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
 			complete(&ioc->base_cmds.done);
 		}
+		if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+			ioc->port_enable_failed = 1;
+			ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
+			mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+			if (ioc->is_driver_loading) {
+				ioc->start_scan_failed =
+				    MPI2_IOCSTATUS_INTERNAL_ERROR;
+				ioc->start_scan = 0;
+				ioc->port_enable_cmds.status =
+						MPT2_CMD_NOT_USED;
+			} else
+				complete(&ioc->port_enable_cmds.done);
+
+		}
 		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
 			ioc->config_cmds.status |= MPT2_CMD_RESET;
 			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
@@ -4349,7 +4509,6 @@
 {
 	int r;
 	unsigned long flags;
-	u8 pe_complete = ioc->wait_for_port_enable_to_complete;
 
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
@@ -4396,7 +4555,8 @@
 	/* If this hard reset is called while port enable is active, then
 	 * there is no reason to call make_ioc_operational
 	 */
-	if (pe_complete) {
+	if (ioc->is_driver_loading && ioc->port_enable_failed) {
+		ioc->remove_host = 1;
 		r = -EFAULT;
 		goto out;
 	}
@@ -4410,7 +4570,6 @@
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 	ioc->ioc_reset_in_progress_status = r;
 	ioc->shost_recovery = 0;
-	complete(&ioc->shost_recovery_done);
 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 	mutex_unlock(&ioc->reset_in_progress_mutex);
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 59354db..3c3babc 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"09.100.00.01"
-#define MPT2SAS_MAJOR_VERSION		09
+#define MPT2SAS_DRIVER_VERSION		"10.100.00.00"
+#define MPT2SAS_MAJOR_VERSION		10
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		00
-#define MPT2SAS_RELEASE_VERSION		01
+#define MPT2SAS_RELEASE_VERSION		00
 
 /*
  * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -655,7 +655,12 @@
  * @ignore_loginfos: ignore loginfos during task management
  * @remove_host: flag for when driver unloads, to avoid sending dev resets
  * @pci_error_recovery: flag to prevent ioc access until slot reset completes
- * @wait_for_port_enable_to_complete:
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ *                                               waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt2sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
  * @msix_enable: flag indicating msix is enabled
  * @msix_vector_count: number msix vectors
  * @cpu_msix_table: table for mapping cpus to msix index
@@ -790,15 +795,20 @@
 	u8		shost_recovery;
 
 	struct mutex	reset_in_progress_mutex;
-	struct completion	shost_recovery_done;
 	spinlock_t 	ioc_reset_in_progress_lock;
 	u8		ioc_link_reset_in_progress;
-	int		ioc_reset_in_progress_status;
+	u8		ioc_reset_in_progress_status;
 
 	u8		ignore_loginfos;
 	u8		remove_host;
 	u8		pci_error_recovery;
-	u8		wait_for_port_enable_to_complete;
+	u8		wait_for_discovery_to_complete;
+	struct completion	port_enable_done;
+	u8		is_driver_loading;
+	u8		port_enable_failed;
+
+	u8		start_scan;
+	u16		start_scan_failed;
 
 	u8		msix_enable;
 	u16		msix_vector_count;
@@ -814,11 +824,13 @@
 	u8		scsih_cb_idx;
 	u8		ctl_cb_idx;
 	u8		base_cb_idx;
+	u8		port_enable_cb_idx;
 	u8		config_cb_idx;
 	u8		tm_tr_cb_idx;
 	u8		tm_tr_volume_cb_idx;
 	u8		tm_sas_control_cb_idx;
 	struct _internal_cmd base_cmds;
+	struct _internal_cmd port_enable_cmds;
 	struct _internal_cmd transport_cmds;
 	struct _internal_cmd scsih_cmds;
 	struct _internal_cmd tm_cmds;
@@ -1001,6 +1013,8 @@
 
 u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
     u32 reply);
+u8 mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+	u8 msix_index,	u32 reply);
 void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr);
 
 u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked);
@@ -1015,6 +1029,8 @@
 
 void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
 
+int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
+
 /* scsih shared API */
 u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
     u32 reply);
@@ -1032,6 +1048,8 @@
 struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
     struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
 
+void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc);
+
 void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
 
 /* config shared API */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 2b11010..36ea0b2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -1356,6 +1356,9 @@
 	Mpi2ConfigReply_t mpi_reply;
 	int r, i, config_page_sz;
 	u16 ioc_status;
+	int config_num;
+	u16 element_type;
+	u16 phys_disk_dev_handle;
 
 	*volume_handle = 0;
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
@@ -1371,35 +1374,53 @@
 	if (r)
 		goto out;
 
-	mpi_request.PageAddress =
-	    cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
 	config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
 	config_page = kmalloc(config_page_sz, GFP_KERNEL);
-	if (!config_page)
+	if (!config_page) {
+		r = -1;
 		goto out;
-	r = _config_request(ioc, &mpi_request, &mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
-	    config_page_sz);
-	if (r)
-		goto out;
-
-	r = -1;
-	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
-	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
-		goto out;
-	for (i = 0; i < config_page->NumElements; i++) {
-		if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
-		    MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
-		    MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
-			continue;
-		if (le16_to_cpu(config_page->ConfigElement[i].
-		    PhysDiskDevHandle) == pd_handle) {
-			*volume_handle = le16_to_cpu(config_page->
-			    ConfigElement[i].VolDevHandle);
-			r = 0;
+	}
+	config_num = 0xff;
+	while (1) {
+		mpi_request.PageAddress = cpu_to_le32(config_num +
+		    MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+		r = _config_request(ioc, &mpi_request, &mpi_reply,
+		    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+		    config_page_sz);
+		if (r)
 			goto out;
+		r = -1;
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+		    MPI2_IOCSTATUS_MASK;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+			goto out;
+		for (i = 0; i < config_page->NumElements; i++) {
+			element_type = le16_to_cpu(config_page->
+			    ConfigElement[i].ElementFlags) &
+			    MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+			if (element_type ==
+			    MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+			    element_type ==
+			    MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+				phys_disk_dev_handle =
+				    le16_to_cpu(config_page->ConfigElement[i].
+				    PhysDiskDevHandle);
+				if (phys_disk_dev_handle == pd_handle) {
+					*volume_handle =
+					    le16_to_cpu(config_page->
+					    ConfigElement[i].VolDevHandle);
+					r = 0;
+					goto out;
+				}
+			} else if (element_type ==
+			    MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+				*volume_handle = 0;
+				r = 0;
+				goto out;
+			}
 		}
+		config_num = config_page->ConfigNum;
 	}
  out:
 	kfree(config_page);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 9adb013..aabcb91 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1207,6 +1207,9 @@
 	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
 		return -ENODEV;
 
+	if (ioc->shost_recovery || ioc->pci_error_recovery ||
+		ioc->is_driver_loading)
+		return -EAGAIN;
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
 
@@ -2178,7 +2181,8 @@
 		    !ioc)
 			return -ENODEV;
 
-		if (ioc->shost_recovery || ioc->pci_error_recovery)
+		if (ioc->shost_recovery || ioc->pci_error_recovery ||
+				ioc->is_driver_loading)
 			return -EAGAIN;
 
 		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
@@ -2297,7 +2301,8 @@
 	if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
 		return -ENODEV;
 
-	if (ioc->shost_recovery || ioc->pci_error_recovery)
+	if (ioc->shost_recovery || ioc->pci_error_recovery ||
+			ioc->is_driver_loading)
 		return -EAGAIN;
 
 	memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 1da1aa1..8889b1b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -71,6 +71,9 @@
 
 static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid);
 
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
 /* global parameters */
 LIST_HEAD(mpt2sas_ioc_list);
 
@@ -79,6 +82,7 @@
 static u8 tm_cb_idx = -1;
 static u8 ctl_cb_idx = -1;
 static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
 static u8 transport_cb_idx = -1;
 static u8 scsih_cb_idx = -1;
 static u8 config_cb_idx = -1;
@@ -103,6 +107,18 @@
 module_param(max_lun, int, 0);
 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
 
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
+	"(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+
 /**
  * struct sense_info - common structure for obtaining sense keys
  * @skey: sense key
@@ -117,8 +133,8 @@
 
 
 #define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
-#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
-
+#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
 /**
  * struct fw_event_work - firmware event struct
  * @list: link list framework
@@ -372,31 +388,34 @@
 	Mpi2SasDevicePage0_t sas_device_pg0;
 	Mpi2ConfigReply_t mpi_reply;
 	u32 ioc_status;
+	*sas_address = 0;
 
 	if (handle <= ioc->sas_hba.num_phys) {
 		*sas_address = ioc->sas_hba.sas_address;
 		return 0;
-	} else
-		*sas_address = 0;
+	}
 
 	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+		__FILE__, __LINE__, __func__);
 		return -ENXIO;
 	}
 
-	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
-	    MPI2_IOCSTATUS_MASK;
-	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
-		    "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
-		     __FILE__, __LINE__, __func__);
-		return -EIO;
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+		*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+		return 0;
 	}
 
-	*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
-	return 0;
+	/* we hit this becuase the given parent handle doesn't exist */
+	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+		return -ENXIO;
+	/* else error case */
+	printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x), "
+	    "failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
+	     __FILE__, __LINE__, __func__);
+	return -EIO;
 }
 
 /**
@@ -424,7 +443,11 @@
 	u16 slot;
 
 	 /* only process this function when driver loads */
-	if (!ioc->wait_for_port_enable_to_complete)
+	if (!ioc->is_driver_loading)
+		return;
+
+	 /* no Bios, return immediately */
+	if (!ioc->bios_pg3.BiosVersion)
 		return;
 
 	if (!is_raid) {
@@ -587,8 +610,15 @@
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
 	if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
-	     sas_device->sas_address_parent))
+	     sas_device->sas_address_parent)) {
 		_scsih_sas_device_remove(ioc, sas_device);
+		} else if (!sas_device->starget) {
+			if (!ioc->is_driver_loading)
+				mpt2sas_transport_port_remove(ioc,
+				sas_device->sas_address,
+			    sas_device->sas_address_parent);
+			_scsih_sas_device_remove(ioc, sas_device);
+		}
 }
 
 /**
@@ -1400,6 +1430,10 @@
 {
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 	struct scsi_target *starget;
+	struct Scsi_Host *shost;
+	struct MPT2SAS_ADAPTER *ioc;
+	struct _sas_device *sas_device;
+	unsigned long flags;
 
 	if (!sdev->hostdata)
 		return;
@@ -1407,6 +1441,19 @@
 	starget = scsi_target(sdev);
 	sas_target_priv_data = starget->hostdata;
 	sas_target_priv_data->num_luns--;
+
+	shost = dev_to_shost(&starget->dev);
+	ioc = shost_priv(shost);
+
+	if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
+		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+		   sas_target_priv_data->sas_address);
+		if (sas_device)
+			sas_device->starget = NULL;
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	}
+
 	kfree(sdev->hostdata);
 	sdev->hostdata = NULL;
 }
@@ -1598,8 +1645,10 @@
  * _scsih_get_volume_capabilities - volume capabilities
  * @ioc: per adapter object
  * @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
  */
-static void
+static int
 _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
     struct _raid_device *raid_device)
 {
@@ -1612,9 +1661,10 @@
 
 	if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
 	    &num_pds)) || !num_pds) {
-		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
-		return;
+		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+		    __func__));
+		return 1;
 	}
 
 	raid_device->num_pds = num_pds;
@@ -1622,17 +1672,19 @@
 	    sizeof(Mpi2RaidVol0PhysDisk_t));
 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
 	if (!vol_pg0) {
-		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
-		return;
+		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+		    __func__));
+		return 1;
 	}
 
 	if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
-		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+		    __func__));
 		kfree(vol_pg0);
-		return;
+		return 1;
 	}
 
 	raid_device->volume_type = vol_pg0->VolumeType;
@@ -1652,6 +1704,7 @@
 	}
 
 	kfree(vol_pg0);
+	return 0;
 }
 /**
  * _scsih_disable_ddio - Disable direct I/O for all the volumes
@@ -1922,13 +1975,20 @@
 		     sas_target_priv_data->handle);
 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 		if (!raid_device) {
-			printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
-			return 0;
+			dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+			    "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+			    __LINE__, __func__));
+			return 1;
 		}
 
 		_scsih_get_volume_capabilities(ioc, raid_device);
 
+		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+			dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+			    "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+			    __LINE__, __func__));
+			return 1;
+		}
 		/*
 		 * WARPDRIVE: Initialize the required data for Direct IO
 		 */
@@ -2002,11 +2062,22 @@
 	if (sas_device) {
 		if (sas_target_priv_data->flags &
 		    MPT_TARGET_FLAGS_RAID_COMPONENT) {
-			mpt2sas_config_get_volume_handle(ioc,
-			    sas_device->handle, &sas_device->volume_handle);
-			mpt2sas_config_get_volume_wwid(ioc,
+			if (mpt2sas_config_get_volume_handle(ioc,
+			    sas_device->handle, &sas_device->volume_handle)) {
+				dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+				    "failure at %s:%d/%s()!\n", ioc->name,
+				    __FILE__, __LINE__, __func__));
+				return 1;
+			}
+			if (sas_device->volume_handle &&
+			    mpt2sas_config_get_volume_wwid(ioc,
 			    sas_device->volume_handle,
-			    &sas_device->volume_wwid);
+			    &sas_device->volume_wwid)) {
+				dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+				    "failure at %s:%d/%s()!\n", ioc->name,
+				    __FILE__, __LINE__, __func__));
+				return 1;
+			}
 		}
 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
 			qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
@@ -2035,6 +2106,11 @@
 
 		if (!ssp_target)
 			_scsih_display_sata_capabilities(ioc, sas_device, sdev);
+	} else {
+		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+		    __func__));
+		return 1;
 	}
 
 	_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
@@ -2714,22 +2790,38 @@
 
 
 /**
- * _scsih_queue_rescan - queue a topology rescan from user context
+ * _scsih_error_recovery_delete_devices - remove devices not responding
  * @ioc: per adapter object
  *
  * Return nothing.
  */
 static void
-_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
+_scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
 {
 	struct fw_event_work *fw_event;
 
-	if (ioc->wait_for_port_enable_to_complete)
+	if (ioc->is_driver_loading)
 		return;
+	fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
+	fw_event->ioc = ioc;
+	_scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt2sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
+{
+	struct fw_event_work *fw_event;
+
 	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
 	if (!fw_event)
 		return;
-	fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
+	fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE;
 	fw_event->ioc = ioc;
 	_scsih_fw_event_add(ioc, fw_event);
 }
@@ -2977,14 +3069,27 @@
 	Mpi2SCSITaskManagementRequest_t *mpi_request;
 	u16 smid;
 	struct _sas_device *sas_device;
-	struct MPT2SAS_TARGET *sas_target_priv_data;
+	struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
+	u64 sas_address = 0;
 	unsigned long flags;
 	struct _tr_list *delayed_tr;
+	u32 ioc_state;
 
-	if (ioc->shost_recovery || ioc->remove_host ||
-	    ioc->pci_error_recovery) {
-		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
-		   "progress!\n", __func__, ioc->name));
+	if (ioc->remove_host) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+		    "removed: handle(0x%04x)\n", __func__, ioc->name, handle));
+		return;
+	} else if (ioc->pci_error_recovery) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+		    "error recovery: handle(0x%04x)\n", __func__, ioc->name,
+		    handle));
+		return;
+	}
+	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+		   "operational: handle(0x%04x)\n", __func__, ioc->name,
+		   handle));
 		return;
 	}
 
@@ -2998,13 +3103,18 @@
 	     sas_device->starget->hostdata) {
 		sas_target_priv_data = sas_device->starget->hostdata;
 		sas_target_priv_data->deleted = 1;
-		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
-		    "setting delete flag: handle(0x%04x), "
-		    "sas_addr(0x%016llx)\n", ioc->name, handle,
-		    (unsigned long long) sas_device->sas_address));
+		sas_address = sas_device->sas_address;
 	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
+	if (sas_target_priv_data) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: "
+		"handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle,
+			(unsigned long long)sas_address));
+		_scsih_ublock_io_device(ioc, handle);
+		sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
+	}
+
 	smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
 	if (!smid) {
 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
@@ -3185,11 +3295,21 @@
 	    mpt2sas_base_get_reply_virt_addr(ioc, reply);
 	Mpi2SasIoUnitControlRequest_t *mpi_request;
 	u16 smid_sas_ctrl;
+	u32 ioc_state;
 
-	if (ioc->shost_recovery || ioc->remove_host ||
-	    ioc->pci_error_recovery) {
-		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
-		   "progress!\n", __func__, ioc->name));
+	if (ioc->remove_host) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+		   "removed\n", __func__, ioc->name));
+		return 1;
+	} else if (ioc->pci_error_recovery) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+		    "error recovery\n", __func__, ioc->name));
+		return 1;
+	}
+	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+		    "operational\n", __func__, ioc->name));
 		return 1;
 	}
 
@@ -5099,7 +5219,7 @@
 	/* get device name */
 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
 
-	if (ioc->wait_for_port_enable_to_complete)
+	if (ioc->wait_for_discovery_to_complete)
 		_scsih_sas_device_init_add(ioc, sas_device);
 	else
 		_scsih_sas_device_add(ioc, sas_device);
@@ -5135,6 +5255,9 @@
 	if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
 		sas_target_priv_data = sas_device_backup.starget->hostdata;
 		sas_target_priv_data->deleted = 1;
+		_scsih_ublock_io_device(ioc, sas_device_backup.handle);
+		sas_target_priv_data->handle =
+		     MPT2SAS_INVALID_DEVICE_HANDLE;
 	}
 
 	_scsih_ublock_io_device(ioc, sas_device_backup.handle);
@@ -5288,7 +5411,7 @@
 		_scsih_sas_topology_change_event_debug(ioc, event_data);
 #endif
 
-	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
+	if (ioc->remove_host || ioc->pci_error_recovery)
 		return;
 
 	if (!ioc->sas_hba.num_phys)
@@ -5349,6 +5472,9 @@
 		switch (reason_code) {
 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
 
+			if (ioc->shost_recovery)
+				break;
+
 			if (link_rate == prev_link_rate)
 				break;
 
@@ -5362,6 +5488,9 @@
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
 
+			if (ioc->shost_recovery)
+				break;
+
 			mpt2sas_transport_update_links(ioc, sas_address,
 			    handle, phy_number, link_rate);
 
@@ -5622,7 +5751,7 @@
 	termination_count = 0;
 	query_count = 0;
 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
-		if (ioc->ioc_reset_in_progress_status)
+		if (ioc->shost_recovery)
 			goto out;
 		scmd = _scsih_scsi_lookup_get(ioc, smid);
 		if (!scmd)
@@ -5644,7 +5773,7 @@
 		lun = sas_device_priv_data->lun;
 		query_count++;
 
-		if (ioc->ioc_reset_in_progress_status)
+		if (ioc->shost_recovery)
 			goto out;
 
 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -5686,7 +5815,7 @@
 			goto broadcast_aen_retry;
 		}
 
-		if (ioc->ioc_reset_in_progress_status)
+		if (ioc->shost_recovery)
 			goto out_no_lock;
 
 		r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
@@ -5725,7 +5854,7 @@
 	    ioc->name, __func__, query_count, termination_count));
 
 	ioc->broadcast_aen_busy = 0;
-	if (!ioc->ioc_reset_in_progress_status)
+	if (!ioc->shost_recovery)
 		_scsih_ublock_io_all_device(ioc);
 	mutex_unlock(&ioc->tm_cmds.mutex);
 }
@@ -5789,8 +5918,11 @@
 static void
 _scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach)
 {
-	struct MPT2SAS_TARGET *sas_target_priv_data = starget->hostdata;
+	struct MPT2SAS_TARGET *sas_target_priv_data;
 
+	if (starget == NULL)
+		return;
+	sas_target_priv_data = starget->hostdata;
 	if (no_uld_attach)
 		sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
 	else
@@ -5845,7 +5977,7 @@
 	raid_device->handle = handle;
 	raid_device->wwid = wwid;
 	_scsih_raid_device_add(ioc, raid_device);
-	if (!ioc->wait_for_port_enable_to_complete) {
+	if (!ioc->wait_for_discovery_to_complete) {
 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
 		    raid_device->id, 0);
 		if (rc)
@@ -6127,6 +6259,10 @@
 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
 
 #endif
+
+	if (ioc->shost_recovery)
+		return;
+
 	foreign_config = (le32_to_cpu(event_data->Flags) &
 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
 
@@ -6185,6 +6321,9 @@
 	int rc;
 	Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
 
+	if (ioc->shost_recovery)
+		return;
+
 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
 		return;
 
@@ -6267,6 +6406,9 @@
 	Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
 	u64 sas_address;
 
+	if (ioc->shost_recovery)
+		return;
+
 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
 		return;
 
@@ -6510,10 +6652,10 @@
 	u32 device_info;
 	u16 slot;
 
-	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+	printk(MPT2SAS_INFO_FMT "search for end-devices: start\n", ioc->name);
 
 	if (list_empty(&ioc->sas_device_list))
-		return;
+		goto out;
 
 	handle = 0xFFFF;
 	while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
@@ -6532,6 +6674,9 @@
 		_scsih_mark_responding_sas_device(ioc, sas_address, slot,
 		    handle);
 	}
+out:
+	printk(MPT2SAS_INFO_FMT "search for end-devices: complete\n",
+	    ioc->name);
 }
 
 /**
@@ -6607,10 +6752,14 @@
 	u16 handle;
 	u8 phys_disk_num;
 
-	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+	if (!ioc->ir_firmware)
+		return;
+
+	printk(MPT2SAS_INFO_FMT "search for raid volumes: start\n",
+	    ioc->name);
 
 	if (list_empty(&ioc->raid_device_list))
-		return;
+		goto out;
 
 	handle = 0xFFFF;
 	while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
@@ -6649,6 +6798,9 @@
 			set_bit(handle, ioc->pd_handles);
 		}
 	}
+out:
+	printk(MPT2SAS_INFO_FMT "search for responding raid volumes: "
+	    "complete\n", ioc->name);
 }
 
 /**
@@ -6708,10 +6860,10 @@
 	u64 sas_address;
 	u16 handle;
 
-	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+	printk(MPT2SAS_INFO_FMT "search for expanders: start\n", ioc->name);
 
 	if (list_empty(&ioc->sas_expander_list))
-		return;
+		goto out;
 
 	handle = 0xFFFF;
 	while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
@@ -6730,6 +6882,8 @@
 		_scsih_mark_responding_expander(ioc, sas_address, handle);
 	}
 
+ out:
+	printk(MPT2SAS_INFO_FMT "search for expanders: complete\n", ioc->name);
 }
 
 /**
@@ -6745,6 +6899,8 @@
 	struct _sas_node *sas_expander;
 	struct _raid_device *raid_device, *raid_device_next;
 
+	printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
+	    ioc->name);
 
 	list_for_each_entry_safe(sas_device, sas_device_next,
 	    &ioc->sas_device_list, list) {
@@ -6764,6 +6920,9 @@
 		_scsih_remove_device(ioc, sas_device);
 	}
 
+	if (!ioc->ir_firmware)
+		goto retry_expander_search;
+
 	list_for_each_entry_safe(raid_device, raid_device_next,
 	    &ioc->raid_device_list, list) {
 		if (raid_device->responding) {
@@ -6790,52 +6949,170 @@
 		mpt2sas_expander_remove(ioc, sas_expander->sas_address);
 		goto retry_expander_search;
 	}
+	printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n",
+	    ioc->name);
+	/* unblock devices */
+	_scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT2SAS_ADAPTER *ioc,
+	struct _sas_node *sas_expander, u16 handle)
+{
+	Mpi2ExpanderPage1_t expander_pg1;
+	Mpi2ConfigReply_t mpi_reply;
+	int i;
+
+	for (i = 0 ; i < sas_expander->num_phys ; i++) {
+		if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply,
+		    &expander_pg1, i, handle))) {
+			printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+			    ioc->name, __FILE__, __LINE__, __func__);
+			return;
+		}
+
+		mpt2sas_transport_update_links(ioc, sas_expander->sas_address,
+		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+		    expander_pg1.NegotiatedLinkRate >> 4);
+	}
 }
 
 /**
- * _scsih_hide_unhide_sas_devices - add/remove device to/from OS
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
  * @ioc: per adapter object
  *
  * Return nothing.
  */
 static void
-_scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc)
+_scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 {
-	struct _sas_device *sas_device, *sas_device_next;
+	Mpi2ExpanderPage0_t expander_pg0;
+	Mpi2SasDevicePage0_t sas_device_pg0;
+	Mpi2RaidVolPage1_t volume_pg1;
+	Mpi2RaidVolPage0_t volume_pg0;
+	Mpi2RaidPhysDiskPage0_t pd_pg0;
+	Mpi2EventIrConfigElement_t element;
+	Mpi2ConfigReply_t mpi_reply;
+	u8 phys_disk_num;
+	u16 ioc_status;
+	u16 handle, parent_handle;
+	u64 sas_address;
+	struct _sas_device *sas_device;
+	struct _sas_node *expander_device;
+	static struct _raid_device *raid_device;
 
-	if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag !=
-	    MFG_PAGE10_HIDE_IF_VOL_PRESENT)
-		return;
+	printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
 
-	if (ioc->hide_drives) {
-		if (_scsih_get_num_volumes(ioc))
-			return;
-		ioc->hide_drives = 0;
-		list_for_each_entry_safe(sas_device, sas_device_next,
-		    &ioc->sas_device_list, list) {
-			if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
-				sas_device->sas_address_parent)) {
-				_scsih_sas_device_remove(ioc, sas_device);
-			} else if (!sas_device->starget) {
-				mpt2sas_transport_port_remove(ioc,
-				    sas_device->sas_address,
-				    sas_device->sas_address_parent);
-				_scsih_sas_device_remove(ioc, sas_device);
-			}
-		}
-	} else {
-		if (!_scsih_get_num_volumes(ioc))
-			return;
-		ioc->hide_drives = 1;
-		list_for_each_entry_safe(sas_device, sas_device_next,
-		    &ioc->sas_device_list, list) {
-			mpt2sas_transport_port_remove(ioc,
-			    sas_device->sas_address,
-			    sas_device->sas_address_parent);
+	_scsih_sas_host_refresh(ioc);
+
+	/* expanders */
+	handle = 0xFFFF;
+	while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+		    MPI2_IOCSTATUS_MASK;
+		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+			break;
+		handle = le16_to_cpu(expander_pg0.DevHandle);
+		expander_device = mpt2sas_scsih_expander_find_by_sas_address(
+		    ioc, le64_to_cpu(expander_pg0.SASAddress));
+		if (expander_device)
+			_scsih_refresh_expander_links(ioc, expander_device,
+			    handle);
+		else
+			_scsih_expander_add(ioc, handle);
+	}
+
+	if (!ioc->ir_firmware)
+		goto skip_to_sas;
+
+	/* phys disk */
+	phys_disk_num = 0xFF;
+	while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+	    phys_disk_num))) {
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+		    MPI2_IOCSTATUS_MASK;
+		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+			break;
+		phys_disk_num = pd_pg0.PhysDiskNum;
+		handle = le16_to_cpu(pd_pg0.DevHandle);
+		sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+		if (sas_device)
+			continue;
+		if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+		    handle) != 0)
+			continue;
+		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+		if (!_scsih_get_sas_address(ioc, parent_handle,
+		    &sas_address)) {
+			mpt2sas_transport_update_links(ioc, sas_address,
+			    handle, sas_device_pg0.PhyNum,
+			    MPI2_SAS_NEG_LINK_RATE_1_5);
+			set_bit(handle, ioc->pd_handles);
+			_scsih_add_device(ioc, handle, 0, 1);
 		}
 	}
+
+	/* volumes */
+	handle = 0xFFFF;
+	while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+		    MPI2_IOCSTATUS_MASK;
+		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+			break;
+		handle = le16_to_cpu(volume_pg1.DevHandle);
+		raid_device = _scsih_raid_device_find_by_wwid(ioc,
+		    le64_to_cpu(volume_pg1.WWID));
+		if (raid_device)
+			continue;
+		if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+		     sizeof(Mpi2RaidVolPage0_t)))
+			continue;
+		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+			element.VolDevHandle = volume_pg1.DevHandle;
+			_scsih_sas_volume_add(ioc, &element);
+		}
+	}
+
+ skip_to_sas:
+
+	/* sas devices */
+	handle = 0xFFFF;
+	while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+	    handle))) {
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+		    MPI2_IOCSTATUS_MASK;
+		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+			break;
+		handle = le16_to_cpu(sas_device_pg0.DevHandle);
+		if (!(_scsih_is_end_device(
+		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
+			continue;
+		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+		    le64_to_cpu(sas_device_pg0.SASAddress));
+		if (sas_device)
+			continue;
+		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+			mpt2sas_transport_update_links(ioc, sas_address, handle,
+			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+			_scsih_add_device(ioc, handle, 0, 0);
+		}
+	}
+
+	printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
 }
 
+
 /**
  * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
  * @ioc: per adapter object
@@ -6871,7 +7148,6 @@
 		}
 		_scsih_fw_event_cleanup_queue(ioc);
 		_scsih_flush_running_cmds(ioc);
-		_scsih_queue_rescan(ioc);
 		break;
 	case MPT2_IOC_DONE_RESET:
 		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -6881,6 +7157,13 @@
 		_scsih_search_responding_sas_devices(ioc);
 		_scsih_search_responding_raid_devices(ioc);
 		_scsih_search_responding_expanders(ioc);
+		if (!ioc->is_driver_loading) {
+			_scsih_prep_device_scan(ioc);
+			_scsih_search_responding_sas_devices(ioc);
+			_scsih_search_responding_raid_devices(ioc);
+			_scsih_search_responding_expanders(ioc);
+			_scsih_error_recovery_delete_devices(ioc);
+		}
 		break;
 	}
 }
@@ -6898,7 +7181,6 @@
 {
 	struct fw_event_work *fw_event = container_of(work,
 	    struct fw_event_work, delayed_work.work);
-	unsigned long flags;
 	struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
 
 	/* the queue is being flushed so ignore this event */
@@ -6908,23 +7190,21 @@
 		return;
 	}
 
-	if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
-		_scsih_fw_event_free(ioc, fw_event);
-		spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-		if (ioc->shost_recovery) {
-			init_completion(&ioc->shost_recovery_done);
-			spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
-			    flags);
-			wait_for_completion(&ioc->shost_recovery_done);
-		} else
-			spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
-			    flags);
-		_scsih_remove_unresponding_sas_devices(ioc);
-		_scsih_hide_unhide_sas_devices(ioc);
-		return;
-	}
-
 	switch (fw_event->event) {
+	case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
+		while (scsi_host_in_recovery(ioc->shost))
+			ssleep(1);
+		_scsih_remove_unresponding_sas_devices(ioc);
+		_scsih_scan_for_devices_after_reset(ioc);
+		break;
+	case MPT2SAS_PORT_ENABLE_COMPLETE:
+		ioc->start_scan = 0;
+
+
+
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
+		    "from worker thread\n", ioc->name));
+		break;
 	case MPT2SAS_TURN_ON_FAULT_LED:
 		_scsih_turn_on_fault_led(ioc, fw_event->device_handle);
 		break;
@@ -7121,6 +7401,8 @@
 	.slave_configure		= _scsih_slave_configure,
 	.target_destroy			= _scsih_target_destroy,
 	.slave_destroy			= _scsih_slave_destroy,
+	.scan_finished			= _scsih_scan_finished,
+	.scan_start			= _scsih_scan_start,
 	.change_queue_depth 		= _scsih_change_queue_depth,
 	.change_queue_type		= _scsih_change_queue_type,
 	.eh_abort_handler		= _scsih_abort,
@@ -7381,7 +7663,12 @@
 	unsigned long flags;
 	int rc;
 
+	 /* no Bios, return immediately */
+	if (!ioc->bios_pg3.BiosVersion)
+		return;
+
 	device = NULL;
+	is_raid = 0;
 	if (ioc->req_boot_device.device) {
 		device =  ioc->req_boot_device.device;
 		is_raid = ioc->req_boot_device.is_raid;
@@ -7417,8 +7704,9 @@
 		    sas_device->sas_address_parent)) {
 			_scsih_sas_device_remove(ioc, sas_device);
 		} else if (!sas_device->starget) {
-			mpt2sas_transport_port_remove(ioc, sas_address,
-			    sas_address_parent);
+			if (!ioc->is_driver_loading)
+				mpt2sas_transport_port_remove(ioc, sas_address,
+					sas_address_parent);
 			_scsih_sas_device_remove(ioc, sas_device);
 		}
 	}
@@ -7462,22 +7750,28 @@
 	/* SAS Device List */
 	list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
 	    list) {
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		list_move_tail(&sas_device->list, &ioc->sas_device_list);
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
 		if (ioc->hide_drives)
 			continue;
 
 		if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
 		    sas_device->sas_address_parent)) {
-			_scsih_sas_device_remove(ioc, sas_device);
+			list_del(&sas_device->list);
+			kfree(sas_device);
+			continue;
 		} else if (!sas_device->starget) {
-			mpt2sas_transport_port_remove(ioc,
-			    sas_device->sas_address,
-			    sas_device->sas_address_parent);
-			_scsih_sas_device_remove(ioc, sas_device);
+			if (!ioc->is_driver_loading)
+				mpt2sas_transport_port_remove(ioc,
+					sas_device->sas_address,
+					sas_device->sas_address_parent);
+			list_del(&sas_device->list);
+			kfree(sas_device);
+			continue;
+
 		}
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
+		list_move_tail(&sas_device->list, &ioc->sas_device_list);
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
 }
 
@@ -7490,9 +7784,7 @@
 static void
 _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
 {
-	u16 volume_mapping_flags =
-	    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
-	    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+	u16 volume_mapping_flags;
 
 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
 		return;  /* return when IOC doesn't support initiator mode */
@@ -7500,18 +7792,93 @@
 	_scsih_probe_boot_devices(ioc);
 
 	if (ioc->ir_firmware) {
-		if ((volume_mapping_flags &
-		     MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) {
-			_scsih_probe_sas(ioc);
+		volume_mapping_flags =
+		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+		if (volume_mapping_flags ==
+		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
 			_scsih_probe_raid(ioc);
+			_scsih_probe_sas(ioc);
 		} else {
-			_scsih_probe_raid(ioc);
 			_scsih_probe_sas(ioc);
+			_scsih_probe_raid(ioc);
 		}
 	} else
 		_scsih_probe_sas(ioc);
 }
 
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus.  In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+	int rc;
+
+	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+		mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+	ioc->start_scan = 1;
+	rc = mpt2sas_port_enable(ioc);
+
+	if (rc != 0)
+		printk(MPT2SAS_INFO_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodically until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+	if (time >= (300 * HZ)) {
+		ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+		printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout "
+		    "(timeout=300s)\n", ioc->name);
+		ioc->is_driver_loading = 0;
+		return 1;
+	}
+
+	if (ioc->start_scan)
+		return 0;
+
+	if (ioc->start_scan_failed) {
+		printk(MPT2SAS_INFO_FMT "port enable: FAILED with "
+		    "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed);
+		ioc->is_driver_loading = 0;
+		ioc->wait_for_discovery_to_complete = 0;
+		ioc->remove_host = 1;
+		return 1;
+	}
+
+	printk(MPT2SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name);
+	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+
+	if (ioc->wait_for_discovery_to_complete) {
+		ioc->wait_for_discovery_to_complete = 0;
+		_scsih_probe_devices(ioc);
+	}
+	mpt2sas_base_start_watchdog(ioc);
+	ioc->is_driver_loading = 0;
+	return 1;
+}
+
+
 /**
  * _scsih_probe - attach and add scsi host
  * @pdev: PCI device struct
@@ -7548,6 +7915,7 @@
 	ioc->tm_cb_idx = tm_cb_idx;
 	ioc->ctl_cb_idx = ctl_cb_idx;
 	ioc->base_cb_idx = base_cb_idx;
+	ioc->port_enable_cb_idx = port_enable_cb_idx;
 	ioc->transport_cb_idx = transport_cb_idx;
 	ioc->scsih_cb_idx = scsih_cb_idx;
 	ioc->config_cb_idx = config_cb_idx;
@@ -7620,14 +7988,14 @@
 		goto out_thread_fail;
 	}
 
-	ioc->wait_for_port_enable_to_complete = 1;
+	ioc->is_driver_loading = 1;
 	if ((mpt2sas_base_attach(ioc))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
 		goto out_attach_fail;
 	}
 
-	ioc->wait_for_port_enable_to_complete = 0;
+	scsi_scan_host(shost);
 	if (ioc->is_warpdrive) {
 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
 			ioc->hide_drives = 0;
@@ -7650,6 +8018,7 @@
  out_thread_fail:
 	list_del(&ioc->list);
 	scsi_remove_host(shost);
+	scsi_host_put(shost);
  out_add_shost_fail:
 	return -ENODEV;
 }
@@ -7896,6 +8265,8 @@
 
 	/* base internal commands callback handler */
 	base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
+	port_enable_cb_idx = mpt2sas_base_register_callback_handler(
+		mpt2sas_port_enable_done);
 
 	/* transport internal commands callback handler */
 	transport_cb_idx = mpt2sas_base_register_callback_handler(
@@ -7950,6 +8321,7 @@
 	mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
 	mpt2sas_base_release_callback_handler(tm_cb_idx);
 	mpt2sas_base_release_callback_handler(base_cb_idx);
+	mpt2sas_base_release_callback_handler(port_enable_cb_idx);
 	mpt2sas_base_release_callback_handler(transport_cb_idx);
 	mpt2sas_base_release_callback_handler(scsih_cb_idx);
 	mpt2sas_base_release_callback_handler(config_cb_idx);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 621b5e0..6f58919 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -732,6 +732,16 @@
 		.class_mask	= 0,
 		.driver_data	= chip_9485,
 	},
+	{ PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
+	{ PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+	{ PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
 
 	{ }	/* terminate list */
 };
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b86db84..5163edb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4102,7 +4102,7 @@
 	struct pmcraid_ioctl_header *hdr = NULL;
 	int retval = -ENOTTY;
 
-	hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
+	hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
 
 	if (!hdr) {
 		pmcraid_err("faile to allocate memory for ioctl header\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 3474e86..2516adf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2279,7 +2279,7 @@
 	ha = rsp->hw;
 
 	/* Clear the interrupt, if enabled, for this response queue */
-	if (rsp->options & ~BIT_6) {
+	if (!ha->flags.disable_msix_handshake) {
 		reg = &ha->iobase->isp24;
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fc3f168..b4d43ae 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1698,6 +1698,15 @@
 
 void scsi_free_queue(struct request_queue *q)
 {
+	unsigned long flags;
+
+	WARN_ON(q->queuedata);
+
+	/* cause scsi_request_fn() to kill all non-finished requests */
+	spin_lock_irqsave(q->queue_lock, flags);
+	q->request_fn(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
 	blk_cleanup_queue(q);
 }
 
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 44e8ca3..72273a0 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -322,6 +322,7 @@
 	scsi_device_set_state(sdev, SDEV_DEL);
 	transport_destroy_device(&sdev->sdev_gendev);
 	put_device(&sdev->sdev_dev);
+	scsi_free_queue(sdev->request_queue);
 	put_device(&sdev->sdev_gendev);
 out:
 	if (display_failure_msg)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1bcd65a..96029e6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -520,7 +520,7 @@
 /**
  * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests
  * @shost: shost for iscsi_host
- * @cls_host: iscsi_cls_host adding the structures to
+ * @ihost: iscsi_cls_host adding the structures to
  */
 static int
 iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a7942e5..fa3a591 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2590,18 +2590,16 @@
 		spin_unlock(&sd_index_lock);
 	} while (error == -EAGAIN);
 
-	if (error)
+	if (error) {
+		sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
 		goto out_put;
-
-	if (index >= SD_MAX_DISKS) {
-		error = -ENODEV;
-		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
-		goto out_free_index;
 	}
 
 	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
-	if (error)
+	if (error) {
+		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
 		goto out_free_index;
+	}
 
 	sdkp->device = sdp;
 	sdkp->driver = &sd_template;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 6ad798b..4163f29 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -9,12 +9,6 @@
 #define SD_MAJORS	16
 
 /*
- * This is limited by the naming scheme enforced in sd_probe,
- * add another character to it if you really need more disks.
- */
-#define SD_MAX_DISKS	(((26 * 26) + 26 + 1) * 26)
-
-/*
  * Time out in seconds for disks and Magneto-opticals (which are slower).
  */
 #define SD_TIMEOUT		(30 * HZ)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 1871b8a..9b28f39 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -462,14 +462,16 @@
 {
 	struct st_request *SRpnt = req->end_io_data;
 	struct scsi_tape *STp = SRpnt->stp;
+	struct bio *tmp;
 
 	STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
 	STp->buffer->cmdstat.residual = req->resid_len;
 
+	tmp = SRpnt->bio;
 	if (SRpnt->waiting)
 		complete(SRpnt->waiting);
 
-	blk_rq_unmap_user(SRpnt->bio);
+	blk_rq_unmap_user(tmp);
 	__blk_put_request(req->q, req);
 }
 
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 595dacc..019a716 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -131,6 +131,12 @@
 #define RXBUSY    (1<<2)
 #define TXBUSY    (1<<3)
 
+struct s3c64xx_spi_dma_data {
+	unsigned		ch;
+	enum dma_data_direction direction;
+	enum dma_ch	dmach;
+};
+
 /**
  * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
  * @clk: Pointer to the spi clock.
@@ -164,13 +170,14 @@
 	struct work_struct              work;
 	struct list_head                queue;
 	spinlock_t                      lock;
-	enum dma_ch                     rx_dmach;
-	enum dma_ch                     tx_dmach;
 	unsigned long                   sfr_start;
 	struct completion               xfer_completion;
 	unsigned                        state;
 	unsigned                        cur_mode, cur_bpw;
 	unsigned                        cur_speed;
+	struct s3c64xx_spi_dma_data	rx_dma;
+	struct s3c64xx_spi_dma_data	tx_dma;
+	struct samsung_dma_ops		*ops;
 };
 
 static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@@ -226,6 +233,78 @@
 	writel(val, regs + S3C64XX_SPI_CH_CFG);
 }
 
+static void s3c64xx_spi_dmacb(void *data)
+{
+	struct s3c64xx_spi_driver_data *sdd;
+	struct s3c64xx_spi_dma_data *dma = data;
+	unsigned long flags;
+
+	if (dma->direction == DMA_FROM_DEVICE)
+		sdd = container_of(data,
+			struct s3c64xx_spi_driver_data, rx_dma);
+	else
+		sdd = container_of(data,
+			struct s3c64xx_spi_driver_data, tx_dma);
+
+	spin_lock_irqsave(&sdd->lock, flags);
+
+	if (dma->direction == DMA_FROM_DEVICE) {
+		sdd->state &= ~RXBUSY;
+		if (!(sdd->state & TXBUSY))
+			complete(&sdd->xfer_completion);
+	} else {
+		sdd->state &= ~TXBUSY;
+		if (!(sdd->state & RXBUSY))
+			complete(&sdd->xfer_completion);
+	}
+
+	spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+					unsigned len, dma_addr_t buf)
+{
+	struct s3c64xx_spi_driver_data *sdd;
+	struct samsung_dma_prep_info info;
+
+	if (dma->direction == DMA_FROM_DEVICE)
+		sdd = container_of((void *)dma,
+			struct s3c64xx_spi_driver_data, rx_dma);
+	else
+		sdd = container_of((void *)dma,
+			struct s3c64xx_spi_driver_data, tx_dma);
+
+	info.cap = DMA_SLAVE;
+	info.len = len;
+	info.fp = s3c64xx_spi_dmacb;
+	info.fp_param = dma;
+	info.direction = dma->direction;
+	info.buf = buf;
+
+	sdd->ops->prepare(dma->ch, &info);
+	sdd->ops->trigger(dma->ch);
+}
+
+static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
+{
+	struct samsung_dma_info info;
+
+	sdd->ops = samsung_dma_get_ops();
+
+	info.cap = DMA_SLAVE;
+	info.client = &s3c64xx_spi_dma_client;
+	info.width = sdd->cur_bpw / 8;
+
+	info.direction = sdd->rx_dma.direction;
+	info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+	sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
+	info.direction =  sdd->tx_dma.direction;
+	info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+	sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
+
+	return 1;
+}
+
 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
 				struct spi_device *spi,
 				struct spi_transfer *xfer, int dma_mode)
@@ -258,10 +337,7 @@
 		chcfg |= S3C64XX_SPI_CH_TXCH_ON;
 		if (dma_mode) {
 			modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
-			s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8);
-			s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
-						xfer->tx_dma, xfer->len);
-			s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
+			prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
 		} else {
 			switch (sdd->cur_bpw) {
 			case 32:
@@ -293,10 +369,7 @@
 			writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
 					| S3C64XX_SPI_PACKET_CNT_EN,
 					regs + S3C64XX_SPI_PACKET_CNT);
-			s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8);
-			s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
-						xfer->rx_dma, xfer->len);
-			s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
+			prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
 		}
 	}
 
@@ -482,46 +555,6 @@
 	}
 }
 
-static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
-				 int size, enum s3c2410_dma_buffresult res)
-{
-	struct s3c64xx_spi_driver_data *sdd = buf_id;
-	unsigned long flags;
-
-	spin_lock_irqsave(&sdd->lock, flags);
-
-	if (res == S3C2410_RES_OK)
-		sdd->state &= ~RXBUSY;
-	else
-		dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
-
-	/* If the other done */
-	if (!(sdd->state & TXBUSY))
-		complete(&sdd->xfer_completion);
-
-	spin_unlock_irqrestore(&sdd->lock, flags);
-}
-
-static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
-				 int size, enum s3c2410_dma_buffresult res)
-{
-	struct s3c64xx_spi_driver_data *sdd = buf_id;
-	unsigned long flags;
-
-	spin_lock_irqsave(&sdd->lock, flags);
-
-	if (res == S3C2410_RES_OK)
-		sdd->state &= ~TXBUSY;
-	else
-		dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
-
-	/* If the other done */
-	if (!(sdd->state & RXBUSY))
-		complete(&sdd->xfer_completion);
-
-	spin_unlock_irqrestore(&sdd->lock, flags);
-}
-
 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
 
 static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
@@ -696,12 +729,10 @@
 			if (use_dma) {
 				if (xfer->tx_buf != NULL
 						&& (sdd->state & TXBUSY))
-					s3c2410_dma_ctrl(sdd->tx_dmach,
-							S3C2410_DMAOP_FLUSH);
+					sdd->ops->stop(sdd->tx_dma.ch);
 				if (xfer->rx_buf != NULL
 						&& (sdd->state & RXBUSY))
-					s3c2410_dma_ctrl(sdd->rx_dmach,
-							S3C2410_DMAOP_FLUSH);
+					sdd->ops->stop(sdd->rx_dma.ch);
 			}
 
 			goto out;
@@ -739,30 +770,6 @@
 		msg->complete(msg->context);
 }
 
-static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
-{
-	if (s3c2410_dma_request(sdd->rx_dmach,
-					&s3c64xx_spi_dma_client, NULL) < 0) {
-		dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
-		return 0;
-	}
-	s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
-	s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
-					sdd->sfr_start + S3C64XX_SPI_RX_DATA);
-
-	if (s3c2410_dma_request(sdd->tx_dmach,
-					&s3c64xx_spi_dma_client, NULL) < 0) {
-		dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
-		s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
-		return 0;
-	}
-	s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
-	s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
-					sdd->sfr_start + S3C64XX_SPI_TX_DATA);
-
-	return 1;
-}
-
 static void s3c64xx_spi_work(struct work_struct *work)
 {
 	struct s3c64xx_spi_driver_data *sdd = container_of(work,
@@ -799,8 +806,8 @@
 	spin_unlock_irqrestore(&sdd->lock, flags);
 
 	/* Free DMA channels */
-	s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
-	s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
+	sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
+	sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
 }
 
 static int s3c64xx_spi_transfer(struct spi_device *spi,
@@ -1017,8 +1024,10 @@
 	sdd->cntrlr_info = sci;
 	sdd->pdev = pdev;
 	sdd->sfr_start = mem_res->start;
-	sdd->tx_dmach = dmatx_res->start;
-	sdd->rx_dmach = dmarx_res->start;
+	sdd->tx_dma.dmach = dmatx_res->start;
+	sdd->tx_dma.direction = DMA_TO_DEVICE;
+	sdd->rx_dma.dmach = dmarx_res->start;
+	sdd->rx_dma.direction = DMA_FROM_DEVICE;
 
 	sdd->cur_bpw = 8;
 
@@ -1106,7 +1115,7 @@
 					pdev->id, master->num_chipselect);
 	dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
 					mem_res->end, mem_res->start,
-					sdd->rx_dmach, sdd->tx_dmach);
+					sdd->rx_dma.dmach, sdd->tx_dma.dmach);
 
 	return 0;
 
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index b9926ee..09de99f 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -556,7 +556,7 @@
 /*
  * Handler function for all zram I/O requests.
  */
-static int zram_make_request(struct request_queue *queue, struct bio *bio)
+static void zram_make_request(struct request_queue *queue, struct bio *bio)
 {
 	struct zram *zram = queue->queuedata;
 
@@ -575,13 +575,12 @@
 	__zram_make_request(zram, bio, bio_data_dir(bio));
 	up_read(&zram->init_lock);
 
-	return 0;
+	return;
 
 error_unlock:
 	up_read(&zram->init_lock);
 error:
 	bio_io_error(bio);
-	return 0;
 }
 
 void __zram_reset_device(struct zram *zram)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9871c57..1945c70 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1446,12 +1446,8 @@
 	dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
 		param->slave_id);
 
-	if (param->dma_dev == chan->device->dev) {
-		chan->private = param;
-		return true;
-	} else {
-		return false;
-	}
+	chan->private = param;
+	return true;
 }
 
 static void rx_timer_fn(unsigned long arg)
@@ -1477,10 +1473,10 @@
 	dma_cap_mask_t mask;
 	int nent;
 
-	dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
-		port->line, s->cfg->dma_dev);
+	dev_dbg(port->dev, "%s: port %d\n", __func__,
+		port->line);
 
-	if (!s->cfg->dma_dev)
+	if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
 		return;
 
 	dma_cap_zero(mask);
@@ -1490,7 +1486,6 @@
 
 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
 	param->slave_id = s->cfg->dma_slave_tx;
-	param->dma_dev = s->cfg->dma_dev;
 
 	s->cookie_tx = -EINVAL;
 	chan = dma_request_channel(mask, filter, param);
@@ -1519,7 +1514,6 @@
 
 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
 	param->slave_id = s->cfg->dma_slave_rx;
-	param->dma_dev = s->cfg->dma_dev;
 
 	chan = dma_request_channel(mask, filter, param);
 	dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
@@ -1564,9 +1558,6 @@
 {
 	struct sci_port *s = to_sci_port(port);
 
-	if (!s->cfg->dma_dev)
-		return;
-
 	if (s->chan_tx)
 		sci_tx_dma_release(s, false);
 	if (s->chan_rx)
@@ -1981,9 +1972,9 @@
 	port->serial_in		= sci_serial_in;
 	port->serial_out	= sci_serial_out;
 
-	if (p->dma_dev)
-		dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
-			p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
+	if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0)
+		dev_dbg(port->dev, "DMA tx %d, rx %d\n",
+			p->dma_slave_tx, p->dma_slave_rx);
 
 	return 0;
 }
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 64c6752..6285867 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -66,6 +66,7 @@
 config WM831X_WATCHDOG
 	tristate "WM831x watchdog"
 	depends on MFD_WM831X
+	select WATCHDOG_CORE
 	help
 	  Support for the watchdog in the WM831x AudioPlus PMICs.  When
 	  the watchdog triggers the system will be reset.
@@ -170,6 +171,7 @@
 config S3C2410_WATCHDOG
 	tristate "S3C2410 Watchdog"
 	depends on ARCH_S3C2410 || HAVE_S3C2410_WATCHDOG
+	select WATCHDOG_CORE
 	help
 	  Watchdog timer block in the Samsung SoCs. This will reboot
 	  the system when the timer expires with the watchdog enabled.
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 9291506..03f449a 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -429,7 +429,7 @@
 	writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);
 
 	irq = platform_get_irq(pdev, 0);
-	if (request_irq(irq, coh901327_interrupt, IRQF_DISABLED,
+	if (request_irq(irq, coh901327_interrupt, 0,
 			DRV_NAME " Bark", pdev)) {
 		ret = -EIO;
 		goto out_no_irq;
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index f1d1da6..41018d4 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -427,7 +427,7 @@
 {
 	int ret;
 
-	ret = request_irq(irq, eurwdt_interrupt, IRQF_DISABLED, "eurwdt", NULL);
+	ret = request_irq(irq, eurwdt_interrupt, 0, "eurwdt", NULL);
 	if (ret) {
 		printk(KERN_ERR "eurwdt: IRQ %d is not free.\n", irq);
 		goto out;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 751a591..ba6ad66 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
 /*
  *	intel TCO Watchdog Driver
  *
- *	(c) Copyright 2006-2010 Wim Van Sebroeck <wim@iguana.be>.
+ *	(c) Copyright 2006-2011 Wim Van Sebroeck <wim@iguana.be>.
  *
  *	This program is free software; you can redistribute it and/or
  *	modify it under the terms of the GNU General Public License
@@ -44,7 +44,7 @@
 
 /* Module and version information */
 #define DRV_NAME	"iTCO_wdt"
-#define DRV_VERSION	"1.06"
+#define DRV_VERSION	"1.07"
 #define PFX		DRV_NAME ": "
 
 /* Includes */
@@ -384,6 +384,11 @@
 	"Watchdog cannot be stopped once started (default="
 				__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+static int turn_SMI_watchdog_clear_off = 0;
+module_param(turn_SMI_watchdog_clear_off, int, 0);
+MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
+	"Turn off SMI clearing watchdog (default=0)");
+
 /*
  * Some TCO specific functions
  */
@@ -808,10 +813,12 @@
 		ret = -EIO;
 		goto out_unmap;
 	}
-	/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
-	val32 = inl(SMI_EN);
-	val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
-	outl(val32, SMI_EN);
+	if (turn_SMI_watchdog_clear_off) {
+		/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+		val32 = inl(SMI_EN);
+		val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
+		outl(val32, SMI_EN);
+	}
 
 	/* The TCO I/O registers reside in a 32-byte range pointed to
 	   by the TCOBASE value */
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 4dc3102..82ccd36 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -367,8 +367,7 @@
 		goto err_misc;
 	}
 
-	ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED,
-							"mpcore_wdt", wdt);
+	ret = request_irq(wdt->irq, mpcore_wdt_fire, 0, "mpcore_wdt", wdt);
 	if (ret) {
 		dev_printk(KERN_ERR, wdt->dev,
 			"cannot register IRQ%d for watchdog\n", wdt->irq);
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 945ee83..7c0d863 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -402,7 +402,7 @@
 	irq = OCTEON_IRQ_WDOG0 + core;
 
 	if (request_irq(irq, octeon_wdt_poke_irq,
-			IRQF_DISABLED, "octeon_wdt", octeon_wdt_poke_irq))
+			IRQF_NO_THREAD, "octeon_wdt", octeon_wdt_poke_irq))
 		panic("octeon_wdt: Couldn't obtain irq %d", irq);
 
 	cpumask_set_cpu(cpu, &irq_enabled_cpus);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 30da88f..5de7e4f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -27,9 +27,8 @@
 #include <linux/moduleparam.h>
 #include <linux/types.h>
 #include <linux/timer.h>
-#include <linux/miscdevice.h>
+#include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */
 #include <linux/watchdog.h>
-#include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
@@ -38,6 +37,7 @@
 #include <linux/io.h>
 #include <linux/cpufreq.h>
 #include <linux/slab.h>
+#include <linux/err.h>
 
 #include <mach/map.h>
 
@@ -74,14 +74,12 @@
 			"0 to reboot (default 0)");
 MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
 
-static unsigned long open_lock;
 static struct device    *wdt_dev;	/* platform device attached to */
 static struct resource	*wdt_mem;
 static struct resource	*wdt_irq;
 static struct clk	*wdt_clock;
 static void __iomem	*wdt_base;
 static unsigned int	 wdt_count;
-static char		 expect_close;
 static DEFINE_SPINLOCK(wdt_lock);
 
 /* watchdog control routines */
@@ -93,11 +91,13 @@
 
 /* functions */
 
-static void s3c2410wdt_keepalive(void)
+static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
 {
 	spin_lock(&wdt_lock);
 	writel(wdt_count, wdt_base + S3C2410_WTCNT);
 	spin_unlock(&wdt_lock);
+
+	return 0;
 }
 
 static void __s3c2410wdt_stop(void)
@@ -109,14 +109,16 @@
 	writel(wtcon, wdt_base + S3C2410_WTCON);
 }
 
-static void s3c2410wdt_stop(void)
+static int s3c2410wdt_stop(struct watchdog_device *wdd)
 {
 	spin_lock(&wdt_lock);
 	__s3c2410wdt_stop();
 	spin_unlock(&wdt_lock);
+
+	return 0;
 }
 
-static void s3c2410wdt_start(void)
+static int s3c2410wdt_start(struct watchdog_device *wdd)
 {
 	unsigned long wtcon;
 
@@ -142,6 +144,8 @@
 	writel(wdt_count, wdt_base + S3C2410_WTCNT);
 	writel(wtcon, wdt_base + S3C2410_WTCON);
 	spin_unlock(&wdt_lock);
+
+	return 0;
 }
 
 static inline int s3c2410wdt_is_running(void)
@@ -149,7 +153,7 @@
 	return readl(wdt_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE;
 }
 
-static int s3c2410wdt_set_heartbeat(int timeout)
+static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeout)
 {
 	unsigned long freq = clk_get_rate(wdt_clock);
 	unsigned int count;
@@ -182,8 +186,6 @@
 		}
 	}
 
-	tmr_margin = timeout;
-
 	DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
 	    __func__, timeout, divisor, count, count/divisor);
 
@@ -201,70 +203,6 @@
 	return 0;
 }
 
-/*
- *	/dev/watchdog handling
- */
-
-static int s3c2410wdt_open(struct inode *inode, struct file *file)
-{
-	if (test_and_set_bit(0, &open_lock))
-		return -EBUSY;
-
-	if (nowayout)
-		__module_get(THIS_MODULE);
-
-	expect_close = 0;
-
-	/* start the timer */
-	s3c2410wdt_start();
-	return nonseekable_open(inode, file);
-}
-
-static int s3c2410wdt_release(struct inode *inode, struct file *file)
-{
-	/*
-	 *	Shut off the timer.
-	 *	Lock it in if it's a module and we set nowayout
-	 */
-
-	if (expect_close == 42)
-		s3c2410wdt_stop();
-	else {
-		dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
-		s3c2410wdt_keepalive();
-	}
-	expect_close = 0;
-	clear_bit(0, &open_lock);
-	return 0;
-}
-
-static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
-				size_t len, loff_t *ppos)
-{
-	/*
-	 *	Refresh the timer.
-	 */
-	if (len) {
-		if (!nowayout) {
-			size_t i;
-
-			/* In case it was set long ago */
-			expect_close = 0;
-
-			for (i = 0; i != len; i++) {
-				char c;
-
-				if (get_user(c, data + i))
-					return -EFAULT;
-				if (c == 'V')
-					expect_close = 42;
-			}
-		}
-		s3c2410wdt_keepalive();
-	}
-	return len;
-}
-
 #define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
 
 static const struct watchdog_info s3c2410_wdt_ident = {
@@ -273,53 +211,17 @@
 	.identity         =	"S3C2410 Watchdog",
 };
 
-
-static long s3c2410wdt_ioctl(struct file *file,	unsigned int cmd,
-							unsigned long arg)
-{
-	void __user *argp = (void __user *)arg;
-	int __user *p = argp;
-	int new_margin;
-
-	switch (cmd) {
-	case WDIOC_GETSUPPORT:
-		return copy_to_user(argp, &s3c2410_wdt_ident,
-			sizeof(s3c2410_wdt_ident)) ? -EFAULT : 0;
-	case WDIOC_GETSTATUS:
-	case WDIOC_GETBOOTSTATUS:
-		return put_user(0, p);
-	case WDIOC_KEEPALIVE:
-		s3c2410wdt_keepalive();
-		return 0;
-	case WDIOC_SETTIMEOUT:
-		if (get_user(new_margin, p))
-			return -EFAULT;
-		if (s3c2410wdt_set_heartbeat(new_margin))
-			return -EINVAL;
-		s3c2410wdt_keepalive();
-		return put_user(tmr_margin, p);
-	case WDIOC_GETTIMEOUT:
-		return put_user(tmr_margin, p);
-	default:
-		return -ENOTTY;
-	}
-}
-
-/* kernel interface */
-
-static const struct file_operations s3c2410wdt_fops = {
-	.owner		= THIS_MODULE,
-	.llseek		= no_llseek,
-	.write		= s3c2410wdt_write,
-	.unlocked_ioctl	= s3c2410wdt_ioctl,
-	.open		= s3c2410wdt_open,
-	.release	= s3c2410wdt_release,
+static struct watchdog_ops s3c2410wdt_ops = {
+	.owner = THIS_MODULE,
+	.start = s3c2410wdt_start,
+	.stop = s3c2410wdt_stop,
+	.ping = s3c2410wdt_keepalive,
+	.set_timeout = s3c2410wdt_set_heartbeat,
 };
 
-static struct miscdevice s3c2410wdt_miscdev = {
-	.minor		= WATCHDOG_MINOR,
-	.name		= "watchdog",
-	.fops		= &s3c2410wdt_fops,
+static struct watchdog_device s3c2410_wdd = {
+	.info = &s3c2410_wdt_ident,
+	.ops = &s3c2410wdt_ops,
 };
 
 /* interrupt handler code */
@@ -328,7 +230,7 @@
 {
 	dev_info(wdt_dev, "watchdog timer expired (irq)\n");
 
-	s3c2410wdt_keepalive();
+	s3c2410wdt_keepalive(&s3c2410_wdd);
 	return IRQ_HANDLED;
 }
 
@@ -349,14 +251,14 @@
 		 * the watchdog is running.
 		 */
 
-		s3c2410wdt_keepalive();
+		s3c2410wdt_keepalive(&s3c2410_wdd);
 	} else if (val == CPUFREQ_POSTCHANGE) {
-		s3c2410wdt_stop();
+		s3c2410wdt_stop(&s3c2410_wdd);
 
-		ret = s3c2410wdt_set_heartbeat(tmr_margin);
+		ret = s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout);
 
 		if (ret >= 0)
-			s3c2410wdt_start();
+			s3c2410wdt_start(&s3c2410_wdd);
 		else
 			goto err;
 	}
@@ -365,7 +267,8 @@
 	return 0;
 
  err:
-	dev_err(wdt_dev, "cannot set new value for timeout %d\n", tmr_margin);
+	dev_err(wdt_dev, "cannot set new value for timeout %d\n",
+				s3c2410_wdd.timeout);
 	return ret;
 }
 
@@ -396,10 +299,6 @@
 }
 #endif
 
-
-
-/* device interface */
-
 static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
 {
 	struct device *dev;
@@ -466,8 +365,8 @@
 	/* see if we can actually set the requested timer margin, and if
 	 * not, try the default value */
 
-	if (s3c2410wdt_set_heartbeat(tmr_margin)) {
-		started = s3c2410wdt_set_heartbeat(
+	if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, tmr_margin)) {
+		started = s3c2410wdt_set_heartbeat(&s3c2410_wdd,
 					CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
 
 		if (started == 0)
@@ -479,22 +378,21 @@
 							"cannot start\n");
 	}
 
-	ret = misc_register(&s3c2410wdt_miscdev);
+	ret = watchdog_register_device(&s3c2410_wdd);
 	if (ret) {
-		dev_err(dev, "cannot register miscdev on minor=%d (%d)\n",
-			WATCHDOG_MINOR, ret);
+		dev_err(dev, "cannot register watchdog (%d)\n", ret);
 		goto err_cpufreq;
 	}
 
 	if (tmr_atboot && started == 0) {
 		dev_info(dev, "starting watchdog timer\n");
-		s3c2410wdt_start();
+		s3c2410wdt_start(&s3c2410_wdd);
 	} else if (!tmr_atboot) {
 		/* if we're not enabling the watchdog, then ensure it is
 		 * disabled if it has been left running from the bootloader
 		 * or other source */
 
-		s3c2410wdt_stop();
+		s3c2410wdt_stop(&s3c2410_wdd);
 	}
 
 	/* print out a statement of readiness */
@@ -530,7 +428,7 @@
 
 static int __devexit s3c2410wdt_remove(struct platform_device *dev)
 {
-	misc_deregister(&s3c2410wdt_miscdev);
+	watchdog_unregister_device(&s3c2410_wdd);
 
 	s3c2410wdt_cpufreq_deregister();
 
@@ -550,7 +448,7 @@
 
 static void s3c2410wdt_shutdown(struct platform_device *dev)
 {
-	s3c2410wdt_stop();
+	s3c2410wdt_stop(&s3c2410_wdd);
 }
 
 #ifdef CONFIG_PM
@@ -565,7 +463,7 @@
 	wtdat_save = readl(wdt_base + S3C2410_WTDAT);
 
 	/* Note that WTCNT doesn't need to be saved. */
-	s3c2410wdt_stop();
+	s3c2410wdt_stop(&s3c2410_wdd);
 
 	return 0;
 }
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index f31493e..b01a30e 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -300,7 +300,7 @@
 	 * get the resources
 	 */
 
-	ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
+	ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED,
 		ident.identity, (void *)user_dog);
 	if (ret) {
 		printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
@@ -350,7 +350,7 @@
 {
 	int ret;
 
-	ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
+	ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED,
 		"Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0));
 	if (ret) {
 		printk(KERN_CRIT
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 52b63f2..b284040 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -398,7 +398,7 @@
 							WATCHDOG_TIMEOUT);
 	}
 
-	wdtmrctl = ioremap((unsigned long)(MMCR_BASE + OFFS_WDTMRCTL), 2);
+	wdtmrctl = ioremap(MMCR_BASE + OFFS_WDTMRCTL, 2);
 	if (!wdtmrctl) {
 		printk(KERN_ERR PFX "Unable to remap memory\n");
 		rc = -ENOMEM;
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index e5c91d4..dd5d675 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -142,7 +142,7 @@
 	w83627hf_unselect_wd_register();
 }
 
-static void wdt_ctrl(int timeout)
+static void wdt_set_time(int timeout)
 {
 	spin_lock(&io_lock);
 
@@ -158,13 +158,13 @@
 
 static int wdt_ping(void)
 {
-	wdt_ctrl(timeout);
+	wdt_set_time(timeout);
 	return 0;
 }
 
 static int wdt_disable(void)
 {
-	wdt_ctrl(0);
+	wdt_set_time(0);
 	return 0;
 }
 
@@ -176,6 +176,24 @@
 	return 0;
 }
 
+static int wdt_get_time(void)
+{
+	int timeleft;
+
+	spin_lock(&io_lock);
+
+	w83627hf_select_wd_register();
+
+	outb_p(0xF6, WDT_EFER);    /* Select CRF6 */
+	timeleft = inb_p(WDT_EFDR); /* Read Timeout counter to CRF6 */
+
+	w83627hf_unselect_wd_register();
+
+	spin_unlock(&io_lock);
+
+	return timeleft;
+}
+
 static ssize_t wdt_write(struct file *file, const char __user *buf,
 						size_t count, loff_t *ppos)
 {
@@ -202,7 +220,7 @@
 {
 	void __user *argp = (void __user *)arg;
 	int __user *p = argp;
-	int new_timeout;
+	int timeval;
 	static const struct watchdog_info ident = {
 		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
 							WDIOF_MAGICCLOSE,
@@ -238,14 +256,17 @@
 		wdt_ping();
 		break;
 	case WDIOC_SETTIMEOUT:
-		if (get_user(new_timeout, p))
+		if (get_user(timeval, p))
 			return -EFAULT;
-		if (wdt_set_heartbeat(new_timeout))
+		if (wdt_set_heartbeat(timeval))
 			return -EINVAL;
 		wdt_ping();
 		/* Fall */
 	case WDIOC_GETTIMEOUT:
 		return put_user(timeout, p);
+	case WDIOC_GETTIMELEFT:
+		timeval = wdt_get_time();
+		return put_user(timeval, p);
 	default:
 		return -ENOTTY;
 	}
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index bb03e15..d2ef002 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -612,7 +612,7 @@
 		goto out;
 	}
 
-	ret = request_irq(irq, wdt_interrupt, IRQF_DISABLED, "wdt501p", NULL);
+	ret = request_irq(irq, wdt_interrupt, 0, "wdt501p", NULL);
 	if (ret) {
 		printk(KERN_ERR "wdt: IRQ %d is not free.\n", irq);
 		goto outreg;
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 172dad6..e0fc3baa 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -643,7 +643,7 @@
 	irq = dev->irq;
 	io = pci_resource_start(dev, 2);
 
-	if (request_irq(irq, wdtpci_interrupt, IRQF_DISABLED | IRQF_SHARED,
+	if (request_irq(irq, wdtpci_interrupt, IRQF_SHARED,
 			 "wdt_pci", &wdtpci_miscdev)) {
 		printk(KERN_ERR PFX "IRQ %d is not free\n", irq);
 		goto out_reg;
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 871caea..7be3855 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -12,8 +12,7 @@
 #include <linux/moduleparam.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
+#include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/watchdog.h>
 #include <linux/uaccess.h>
@@ -29,19 +28,19 @@
 		 "Watchdog cannot be stopped once started (default="
 		 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static unsigned long wm831x_wdt_users;
-static struct miscdevice wm831x_wdt_miscdev;
-static int wm831x_wdt_expect_close;
-static DEFINE_MUTEX(wdt_mutex);
-static struct wm831x *wm831x;
-static unsigned int update_gpio;
-static unsigned int update_state;
+struct wm831x_wdt_drvdata {
+	struct watchdog_device wdt;
+	struct wm831x *wm831x;
+	struct mutex lock;
+	int update_gpio;
+	int update_state;
+};
 
 /* We can't use the sub-second values here but they're included
  * for completeness.  */
 static struct {
-	int time;  /* Seconds */
-	u16 val;   /* WDOG_TO value */
+	unsigned int time;  /* Seconds */
+	u16 val;            /* WDOG_TO value */
 } wm831x_wdt_cfgs[] = {
 	{  1, 2 },
 	{  2, 3 },
@@ -52,32 +51,13 @@
 	{ 33, 7 },  /* Actually 32.768s so include both, others round down */
 };
 
-static int wm831x_wdt_set_timeout(struct wm831x *wm831x, u16 value)
+static int wm831x_wdt_start(struct watchdog_device *wdt_dev)
 {
+	struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+	struct wm831x *wm831x = driver_data->wm831x;
 	int ret;
 
-	mutex_lock(&wdt_mutex);
-
-	ret = wm831x_reg_unlock(wm831x);
-	if (ret == 0) {
-		ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
-				      WM831X_WDOG_TO_MASK, value);
-		wm831x_reg_lock(wm831x);
-	} else {
-		dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
-			ret);
-	}
-
-	mutex_unlock(&wdt_mutex);
-
-	return ret;
-}
-
-static int wm831x_wdt_start(struct wm831x *wm831x)
-{
-	int ret;
-
-	mutex_lock(&wdt_mutex);
+	mutex_lock(&driver_data->lock);
 
 	ret = wm831x_reg_unlock(wm831x);
 	if (ret == 0) {
@@ -89,16 +69,18 @@
 			ret);
 	}
 
-	mutex_unlock(&wdt_mutex);
+	mutex_unlock(&driver_data->lock);
 
 	return ret;
 }
 
-static int wm831x_wdt_stop(struct wm831x *wm831x)
+static int wm831x_wdt_stop(struct watchdog_device *wdt_dev)
 {
+	struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+	struct wm831x *wm831x = driver_data->wm831x;
 	int ret;
 
-	mutex_lock(&wdt_mutex);
+	mutex_lock(&driver_data->lock);
 
 	ret = wm831x_reg_unlock(wm831x);
 	if (ret == 0) {
@@ -110,26 +92,28 @@
 			ret);
 	}
 
-	mutex_unlock(&wdt_mutex);
+	mutex_unlock(&driver_data->lock);
 
 	return ret;
 }
 
-static int wm831x_wdt_kick(struct wm831x *wm831x)
+static int wm831x_wdt_ping(struct watchdog_device *wdt_dev)
 {
+	struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+	struct wm831x *wm831x = driver_data->wm831x;
 	int ret;
 	u16 reg;
 
-	mutex_lock(&wdt_mutex);
+	mutex_lock(&driver_data->lock);
 
-	if (update_gpio) {
-		gpio_set_value_cansleep(update_gpio, update_state);
-		update_state = !update_state;
+	if (driver_data->update_gpio) {
+		gpio_set_value_cansleep(driver_data->update_gpio,
+					driver_data->update_state);
+		driver_data->update_state = !driver_data->update_state;
 		ret = 0;
 		goto out;
 	}
 
-
 	reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
 
 	if (!(reg & WM831X_WDOG_RST_SRC)) {
@@ -150,182 +134,59 @@
 	}
 
 out:
-	mutex_unlock(&wdt_mutex);
+	mutex_unlock(&driver_data->lock);
 
 	return ret;
 }
 
-static int wm831x_wdt_open(struct inode *inode, struct file *file)
+static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
+				  unsigned int timeout)
 {
-	int ret;
+	struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+	struct wm831x *wm831x = driver_data->wm831x;
+	int ret, i;
 
-	if (!wm831x)
-		return -ENODEV;
+	for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
+		if (wm831x_wdt_cfgs[i].time == timeout)
+			break;
+	if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
+		ret = -EINVAL;
 
-	if (test_and_set_bit(0, &wm831x_wdt_users))
-		return -EBUSY;
-
-	ret = wm831x_wdt_start(wm831x);
-	if (ret != 0)
-		return ret;
-
-	return nonseekable_open(inode, file);
-}
-
-static int wm831x_wdt_release(struct inode *inode, struct file *file)
-{
-	if (wm831x_wdt_expect_close)
-		wm831x_wdt_stop(wm831x);
-	else {
-		dev_warn(wm831x->dev, "Watchdog device closed uncleanly\n");
-		wm831x_wdt_kick(wm831x);
+	ret = wm831x_reg_unlock(wm831x);
+	if (ret == 0) {
+		ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
+				      WM831X_WDOG_TO_MASK,
+				      wm831x_wdt_cfgs[i].val);
+		wm831x_reg_lock(wm831x);
+	} else {
+		dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
+			ret);
 	}
 
-	clear_bit(0, &wm831x_wdt_users);
-
-	return 0;
+	return ret;
 }
 
-static ssize_t wm831x_wdt_write(struct file *file,
-				const char __user *data, size_t count,
-				loff_t *ppos)
-{
-	size_t i;
-
-	if (count) {
-		wm831x_wdt_kick(wm831x);
-
-		if (!nowayout) {
-			/* In case it was set long ago */
-			wm831x_wdt_expect_close = 0;
-
-			/* scan to see whether or not we got the magic
-			   character */
-			for (i = 0; i != count; i++) {
-				char c;
-				if (get_user(c, data + i))
-					return -EFAULT;
-				if (c == 'V')
-					wm831x_wdt_expect_close = 42;
-			}
-		}
-	}
-	return count;
-}
-
-static const struct watchdog_info ident = {
+static const struct watchdog_info wm831x_wdt_info = {
 	.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
 	.identity = "WM831x Watchdog",
 };
 
-static long wm831x_wdt_ioctl(struct file *file, unsigned int cmd,
-			     unsigned long arg)
-{
-	int ret = -ENOTTY, time, i;
-	void __user *argp = (void __user *)arg;
-	int __user *p = argp;
-	u16 reg;
-
-	switch (cmd) {
-	case WDIOC_GETSUPPORT:
-		ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
-		break;
-
-	case WDIOC_GETSTATUS:
-	case WDIOC_GETBOOTSTATUS:
-		ret = put_user(0, p);
-		break;
-
-	case WDIOC_SETOPTIONS:
-	{
-		int options;
-
-		if (get_user(options, p))
-			return -EFAULT;
-
-		ret = -EINVAL;
-
-		/* Setting both simultaneously means at least one must fail */
-		if (options == WDIOS_DISABLECARD)
-			ret = wm831x_wdt_start(wm831x);
-
-		if (options == WDIOS_ENABLECARD)
-			ret = wm831x_wdt_stop(wm831x);
-		break;
-	}
-
-	case WDIOC_KEEPALIVE:
-		ret = wm831x_wdt_kick(wm831x);
-		break;
-
-	case WDIOC_SETTIMEOUT:
-		ret = get_user(time, p);
-		if (ret)
-			break;
-
-		if (time == 0) {
-			if (nowayout)
-				ret = -EINVAL;
-			else
-				wm831x_wdt_stop(wm831x);
-			break;
-		}
-
-		for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
-			if (wm831x_wdt_cfgs[i].time == time)
-				break;
-		if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
-			ret = -EINVAL;
-		else
-			ret = wm831x_wdt_set_timeout(wm831x,
-						     wm831x_wdt_cfgs[i].val);
-		break;
-
-	case WDIOC_GETTIMEOUT:
-		reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
-		reg &= WM831X_WDOG_TO_MASK;
-		for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
-			if (wm831x_wdt_cfgs[i].val == reg)
-				break;
-		if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) {
-			dev_warn(wm831x->dev,
-				 "Unknown watchdog configuration: %x\n", reg);
-			ret = -EINVAL;
-		} else
-			ret = put_user(wm831x_wdt_cfgs[i].time, p);
-
-	}
-
-	return ret;
-}
-
-static const struct file_operations wm831x_wdt_fops = {
+static const struct watchdog_ops wm831x_wdt_ops = {
 	.owner = THIS_MODULE,
-	.llseek = no_llseek,
-	.write = wm831x_wdt_write,
-	.unlocked_ioctl = wm831x_wdt_ioctl,
-	.open = wm831x_wdt_open,
-	.release = wm831x_wdt_release,
-};
-
-static struct miscdevice wm831x_wdt_miscdev = {
-	.minor = WATCHDOG_MINOR,
-	.name = "watchdog",
-	.fops = &wm831x_wdt_fops,
+	.start = wm831x_wdt_start,
+	.stop = wm831x_wdt_stop,
+	.ping = wm831x_wdt_ping,
+	.set_timeout = wm831x_wdt_set_timeout,
 };
 
 static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
 {
+	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *chip_pdata;
 	struct wm831x_watchdog_pdata *pdata;
-	int reg, ret;
-
-	if (wm831x) {
-		dev_err(&pdev->dev, "wm831x watchdog already registered\n");
-		return -EBUSY;
-	}
-
-	wm831x = dev_get_drvdata(pdev->dev.parent);
+	struct wm831x_wdt_drvdata *driver_data;
+	struct watchdog_device *wm831x_wdt;
+	int reg, ret, i;
 
 	ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
 	if (ret < 0) {
@@ -338,6 +199,36 @@
 	if (reg & WM831X_WDOG_DEBUG)
 		dev_warn(wm831x->dev, "Watchdog is paused\n");
 
+	driver_data = kzalloc(sizeof(*driver_data), GFP_KERNEL);
+	if (!driver_data) {
+		dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mutex_init(&driver_data->lock);
+	driver_data->wm831x = wm831x;
+
+	wm831x_wdt = &driver_data->wdt;
+
+	wm831x_wdt->info = &wm831x_wdt_info;
+	wm831x_wdt->ops = &wm831x_wdt_ops;
+	watchdog_set_drvdata(wm831x_wdt, driver_data);
+
+	if (nowayout)
+		wm831x_wdt->status |= WDOG_NO_WAY_OUT;
+
+	reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
+	reg &= WM831X_WDOG_TO_MASK;
+	for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
+		if (wm831x_wdt_cfgs[i].val == reg)
+			break;
+	if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
+		dev_warn(wm831x->dev,
+			 "Unknown watchdog timeout: %x\n", reg);
+	else
+		wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time;
+
 	/* Apply any configuration */
 	if (pdev->dev.parent->platform_data) {
 		chip_pdata = pdev->dev.parent->platform_data;
@@ -361,7 +252,7 @@
 				dev_err(wm831x->dev,
 					"Failed to request update GPIO: %d\n",
 					ret);
-				goto err;
+				goto err_alloc;
 			}
 
 			ret = gpio_direction_output(pdata->update_gpio, 0);
@@ -372,7 +263,7 @@
 				goto err_gpio;
 			}
 
-			update_gpio = pdata->update_gpio;
+			driver_data->update_gpio = pdata->update_gpio;
 
 			/* Make sure the watchdog takes hardware updates */
 			reg |= WM831X_WDOG_RST_SRC;
@@ -389,33 +280,34 @@
 		}
 	}
 
-	wm831x_wdt_miscdev.parent = &pdev->dev;
-
-	ret = misc_register(&wm831x_wdt_miscdev);
+	ret = watchdog_register_device(&driver_data->wdt);
 	if (ret != 0) {
-		dev_err(wm831x->dev, "Failed to register miscdev: %d\n", ret);
+		dev_err(wm831x->dev, "watchdog_register_device() failed: %d\n",
+			ret);
 		goto err_gpio;
 	}
 
+	dev_set_drvdata(&pdev->dev, driver_data);
+
 	return 0;
 
 err_gpio:
-	if (update_gpio) {
-		gpio_free(update_gpio);
-		update_gpio = 0;
-	}
+	if (driver_data->update_gpio)
+		gpio_free(driver_data->update_gpio);
+err_alloc:
+	kfree(driver_data);
 err:
 	return ret;
 }
 
 static int __devexit wm831x_wdt_remove(struct platform_device *pdev)
 {
-	if (update_gpio) {
-		gpio_free(update_gpio);
-		update_gpio = 0;
-	}
+	struct wm831x_wdt_drvdata *driver_data = dev_get_drvdata(&pdev->dev);
 
-	misc_deregister(&wm831x_wdt_miscdev);
+	watchdog_unregister_device(&driver_data->wdt);
+
+	if (driver_data->update_gpio)
+		gpio_free(driver_data->update_gpio);
 
 	return 0;
 }
diff --git a/fs/bio.c b/fs/bio.c
index 9bfade8..41c93c7 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -255,7 +255,6 @@
 {
 	memset(bio, 0, sizeof(*bio));
 	bio->bi_flags = 1 << BIO_UPTODATE;
-	bio->bi_comp_cpu = -1;
 	atomic_set(&bio->bi_cnt, 1);
 }
 EXPORT_SYMBOL(bio_init);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 95f786e..b07f1da 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -971,7 +971,7 @@
 
 	if (!bdev->bd_disk)
 		return;
-	if (disk_partitionable(bdev->bd_disk))
+	if (disk_part_scan_enabled(bdev->bd_disk))
 		bdev->bd_invalidated = 1;
 }
 
@@ -1085,6 +1085,7 @@
 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
 {
 	struct gendisk *disk;
+	struct module *owner;
 	int ret;
 	int partno;
 	int perm = 0;
@@ -1110,6 +1111,7 @@
 	disk = get_gendisk(bdev->bd_dev, &partno);
 	if (!disk)
 		goto out;
+	owner = disk->fops->owner;
 
 	disk_block_events(disk);
 	mutex_lock_nested(&bdev->bd_mutex, for_part);
@@ -1137,8 +1139,8 @@
 					bdev->bd_disk = NULL;
 					mutex_unlock(&bdev->bd_mutex);
 					disk_unblock_events(disk);
-					module_put(disk->fops->owner);
 					put_disk(disk);
+					module_put(owner);
 					goto restart;
 				}
 			}
@@ -1194,8 +1196,8 @@
 				goto out_unlock_bdev;
 		}
 		/* only one opener holds refs to the module and disk */
-		module_put(disk->fops->owner);
 		put_disk(disk);
+		module_put(owner);
 	}
 	bdev->bd_openers++;
 	if (for_part)
@@ -1215,8 +1217,8 @@
  out_unlock_bdev:
 	mutex_unlock(&bdev->bd_mutex);
 	disk_unblock_events(disk);
-	module_put(disk->fops->owner);
 	put_disk(disk);
+	module_put(owner);
  out:
 	bdput(bdev);
 
@@ -1442,14 +1444,15 @@
 	if (!bdev->bd_openers) {
 		struct module *owner = disk->fops->owner;
 
-		put_disk(disk);
-		module_put(owner);
 		disk_put_part(bdev->bd_part);
 		bdev->bd_part = NULL;
 		bdev->bd_disk = NULL;
 		if (bdev != bdev->bd_contains)
 			victim = bdev->bd_contains;
 		bdev->bd_contains = NULL;
+
+		put_disk(disk);
+		module_put(owner);
 	}
 	mutex_unlock(&bdev->bd_mutex);
 	bdput(bdev);
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index 048b59d..c70111e 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -78,6 +78,28 @@
 
 	  If unsure, say N.
 
+config SQUASHFS_4K_DEVBLK_SIZE
+	bool "Use 4K device block size?"
+	depends on SQUASHFS
+	help
+	  By default Squashfs sets the dev block size (sb_min_blocksize)
+	  to 1K or the smallest block size supported by the block device
+	  (if larger).  This, because blocks are packed together and
+	  unaligned in Squashfs, should reduce latency.
+
+	  This, however, gives poor performance on MTD NAND devices where
+	  the optimal I/O size is 4K (even though the devices can support
+	  smaller block sizes).
+
+	  Using a 4K device block size may also improve overall I/O
+	  performance for some file access patterns (e.g. sequential
+	  accesses of files in filesystem order) on all media.
+
+	  Setting this option will force Squashfs to use a 4K device block
+	  size by default.
+
+	  If unsure, say N.
+
 config SQUASHFS_EMBEDDED
 	bool "Additional option for memory-constrained systems"
 	depends on SQUASHFS
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index b4a4e53..e8e1464 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -36,6 +36,13 @@
 #define SQUASHFS_FILE_SIZE		131072
 #define SQUASHFS_FILE_LOG		17
 
+/* default size of block device I/O */
+#ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
+#define SQUASHFS_DEVBLK_SIZE 4096
+#else
+#define SQUASHFS_DEVBLK_SIZE 1024
+#endif
+
 #define SQUASHFS_FILE_MAX_SIZE		1048576
 #define SQUASHFS_FILE_MAX_LOG		20
 
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 7438850..2da1715 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -95,7 +95,7 @@
 	}
 	msblk = sb->s_fs_info;
 
-	msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE);
+	msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
 	msblk->devblksize_log2 = ffz(~msblk->devblksize);
 
 	mutex_init(&msblk->read_data_mutex);
diff --git a/fs/statfs.c b/fs/statfs.c
index 8244924..9cf04a1 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -76,7 +76,7 @@
 int user_statfs(const char __user *pathname, struct kstatfs *st)
 {
 	struct path path;
-	int error = user_path(pathname, &path);
+	int error = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
 	if (!error) {
 		error = vfs_statfs(&path, st);
 		path_put(&path);
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index e6e28f3..9eabffb 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -47,6 +47,9 @@
  * @muxval: a number usually used to poke into some mux regiser to
  * mux in the signal to this channel
  * @cctl_opt: default options for the channel control register
+ * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
+ * channels. Fill with 'true' if peripheral should be flow controller. Direction
+ * will be selected at Runtime.
  * @addr: source/target address in physical memory for this DMA channel,
  * can be the address of a FIFO register for burst requests for example.
  * This can be left undefined if the PrimeCell API is used for configuring
@@ -65,6 +68,7 @@
 	int max_signal;
 	u32 muxval;
 	u32 cctl;
+	bool device_fc;
 	dma_addr_t addr;
 	bool circular_buffer;
 	bool single;
@@ -77,13 +81,11 @@
  * @addr: current address
  * @maxwidth: the maximum width of a transfer on this bus
  * @buswidth: the width of this bus in bytes: 1, 2 or 4
- * @fill_bytes: bytes required to fill to the next bus memory boundary
  */
 struct pl08x_bus_data {
 	dma_addr_t addr;
 	u8 maxwidth;
 	u8 buswidth;
-	size_t fill_bytes;
 };
 
 /**
@@ -104,17 +106,35 @@
 };
 
 /**
+ * struct pl08x_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct pl08x_sg {
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	size_t len;
+	struct list_head node;
+};
+
+/**
  * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @tx: async tx descriptor
+ * @node: node for txd list for channels
+ * @dsg_list: list of children sg's
+ * @direction: direction of transfer
  * @llis_bus: DMA memory address (physical) start for the LLIs
  * @llis_va: virtual memory address start for the LLIs
+ * @cctl: control reg values for current txd
+ * @ccfg: config reg values for current txd
  */
 struct pl08x_txd {
 	struct dma_async_tx_descriptor tx;
 	struct list_head node;
+	struct list_head dsg_list;
 	enum dma_data_direction	direction;
-	dma_addr_t src_addr;
-	dma_addr_t dst_addr;
-	size_t len;
 	dma_addr_t llis_bus;
 	struct pl08x_lli *llis_va;
 	/* Default cctl value for LLIs */
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
index cbee7de..d12f077 100644
--- a/include/linux/amba/pl330.h
+++ b/include/linux/amba/pl330.h
@@ -19,12 +19,8 @@
 	 * Peri_Req i/f of the DMAC that is
 	 * peripheral could be reached from.
 	 */
-	u8 peri_id; /* {0, 31} */
+	u8 peri_id; /* specific dma id */
 	enum pl330_reqtype rqtype;
-
-	/* For M->D and D->M Channels */
-	int burst_sz; /* in power of 2 */
-	dma_addr_t fifo_addr;
 };
 
 struct dma_pl330_platdata {
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ce33e68..a3c071c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -269,14 +269,6 @@
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 /*
- * Allow queuer to specify a completion CPU for this bio
- */
-static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
-{
-	bio->bi_comp_cpu = cpu;
-}
-
-/*
  * bio_set is used to allow other portions of the IO system to
  * allocate their own private memory pools for bio and iovec structures.
  * These memory pools in turn all allocate from the bio_slab
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 71fc53b..4053cbd 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -59,8 +59,6 @@
 
 	unsigned int		bi_max_vecs;	/* max bvl_vecs we can hold */
 
-	unsigned int		bi_comp_cpu;	/* completion CPU */
-
 	atomic_t		bi_cnt;		/* pin count */
 
 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
@@ -93,11 +91,10 @@
 #define BIO_BOUNCED	5	/* bio is a bounce bio */
 #define BIO_USER_MAPPED 6	/* contains user pages */
 #define BIO_EOPNOTSUPP	7	/* not supported */
-#define BIO_CPU_AFFINE	8	/* complete bio on same CPU as submitted */
-#define BIO_NULL_MAPPED 9	/* contains invalid user pages */
-#define BIO_FS_INTEGRITY 10	/* fs owns integrity data, not block layer */
-#define BIO_QUIET	11	/* Make BIO Quiet */
-#define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */
+#define BIO_NULL_MAPPED 8	/* contains invalid user pages */
+#define BIO_FS_INTEGRITY 9	/* fs owns integrity data, not block layer */
+#define BIO_QUIET	10	/* Make BIO Quiet */
+#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
 #define bio_flagged(bio, flag)	((bio)->bi_flags & (1 << (flag)))
 
 /*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7fbaa91..5267cd2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -195,7 +195,7 @@
 #include <linux/elevator.h>
 
 typedef void (request_fn_proc) (struct request_queue *q);
-typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
 
@@ -680,6 +680,8 @@
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 			 struct scsi_ioctl_command __user *);
 
+extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
+
 /*
  * A queue has just exitted congestion.  Note this in the global counter of
  * congested queues, and wake up anyone who was waiting for requests to be
@@ -863,16 +865,22 @@
 extern void blk_put_queue(struct request_queue *);
 
 /*
- * Note: Code in between changing the blk_plug list/cb_list or element of such
- * lists is preemptable, but such code can't do sleep (or be very careful),
- * otherwise data is corrupted. For details, please check schedule() where
- * blk_schedule_flush_plug() is called.
+ * blk_plug permits building a queue of related requests by holding the I/O
+ * fragments for a short period. This allows merging of sequential requests
+ * into single larger request. As the requests are moved from a per-task list to
+ * the device's request_queue in a batch, this results in improved scalability
+ * as the lock contention for request_queue lock is reduced.
+ *
+ * It is ok not to disable preemption when adding the request to the plug list
+ * or when attempting a merge, because blk_schedule_flush_list() will only flush
+ * the plug list when the task sleeps by itself. For details, please see
+ * schedule() where blk_schedule_flush_plug() is called.
  */
 struct blk_plug {
-	unsigned long magic;
-	struct list_head list;
-	struct list_head cb_list;
-	unsigned int should_sort;
+	unsigned long magic; /* detect uninitialized use-cases */
+	struct list_head list; /* requests */
+	struct list_head cb_list; /* md requires an unplug callback */
+	unsigned int should_sort; /* list to be sorted before flushing? */
 };
 #define BLK_MAX_REQUEST_COUNT 16
 
@@ -1189,20 +1197,6 @@
 }
 #endif
 
-#ifdef CONFIG_BLK_DEV_THROTTLING
-extern int blk_throtl_init(struct request_queue *q);
-extern void blk_throtl_exit(struct request_queue *q);
-extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
-#else /* CONFIG_BLK_DEV_THROTTLING */
-static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
-{
-	return 0;
-}
-
-static inline int blk_throtl_init(struct request_queue *q) { return 0; }
-static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
-#endif /* CONFIG_BLK_DEV_THROTTLING */
-
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8fbf40e..ace51af 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -24,8 +24,7 @@
 #include <linux/device.h>
 #include <linux/uio.h>
 #include <linux/dma-direction.h>
-
-struct scatterlist;
+#include <linux/scatterlist.h>
 
 /**
  * typedef dma_cookie_t - an opaque DMA cookie
@@ -519,6 +518,16 @@
 			(unsigned long)config);
 }
 
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+	struct dma_chan *chan, void *buf, size_t len,
+	enum dma_data_direction dir, unsigned long flags)
+{
+	struct scatterlist sg;
+	sg_init_one(&sg, buf, len);
+
+	return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
+}
+
 static inline int dmaengine_terminate_all(struct dma_chan *chan)
 {
 	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index d800d51..1d0f7a2 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -38,6 +38,12 @@
 	elevator_merged_fn *elevator_merged_fn;
 	elevator_merge_req_fn *elevator_merge_req_fn;
 	elevator_allow_merge_fn *elevator_allow_merge_fn;
+
+	/*
+	 * Used for both plugged list and elevator merging and in the
+	 * former case called without queue_lock.  Read comment on top of
+	 * attempt_plug_merge() for details.
+	 */
 	elevator_bio_merged_fn *elevator_bio_merged_fn;
 
 	elevator_dispatch_fn *elevator_dispatch_fn;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6957350..9de31bc 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -131,6 +131,7 @@
 #define GENHD_FL_EXT_DEVT			64 /* allow extended devt */
 #define GENHD_FL_NATIVE_CAPACITY		128
 #define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE	256
+#define GENHD_FL_NO_PART_SCAN			512
 
 enum {
 	DISK_EVENT_MEDIA_CHANGE			= 1 << 0, /* media changed */
@@ -238,9 +239,10 @@
 	return disk->minors;
 }
 
-static inline bool disk_partitionable(struct gendisk *disk)
+static inline bool disk_part_scan_enabled(struct gendisk *disk)
 {
-	return disk_max_parts(disk) > 1;
+	return disk_max_parts(disk) > 1 &&
+		!(disk->flags & GENHD_FL_NO_PART_SCAN);
 }
 
 static inline dev_t disk_devt(struct gendisk *disk)
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 683d698..11a41a8 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -73,8 +73,8 @@
  */
 enum {
 	LO_FLAGS_READ_ONLY	= 1,
-	LO_FLAGS_USE_AOPS	= 2,
 	LO_FLAGS_AUTOCLEAR	= 4,
+	LO_FLAGS_PARTSCAN	= 8,
 };
 
 #include <asm/posix_types.h>	/* for __kernel_old_dev_t */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1679ff6..3fdf251 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2873,3 +2873,5 @@
 
 #define PCI_VENDOR_ID_XEN		0x5853
 #define PCI_DEVICE_ID_XEN_PLATFORM	0x0001
+
+#define PCI_VENDOR_ID_OCZ		0x1b85
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 8bffe9a..0efa1f1 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -131,8 +131,6 @@
 
 	struct plat_sci_port_ops	*ops;
 
-	struct device	*dma_dev;
-
 	unsigned int	dma_slave_tx;
 	unsigned int	dma_slave_rx;
 };
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 3d5d6db..9324488 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -57,6 +57,36 @@
  * "feature-flush-cache" node!
  */
 #define BLKIF_OP_FLUSH_DISKCACHE   3
+
+/*
+ * Recognised only if "feature-discard" is present in backend xenbus info.
+ * The "feature-discard" node contains a boolean indicating whether trim
+ * (ATA) or unmap (SCSI) - conviently called discard requests are likely
+ * to succeed or fail. Either way, a discard request
+ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
+ * the underlying block-device hardware. The boolean simply indicates whether
+ * or not it is worthwhile for the frontend to attempt discard requests.
+ * If a backend does not recognise BLKIF_OP_DISCARD, it should *not*
+ * create the "feature-discard" node!
+ *
+ * Discard operation is a request for the underlying block device to mark
+ * extents to be erased. However, discard does not guarantee that the blocks
+ * will be erased from the device - it is just a hint to the device
+ * controller that these blocks are no longer in use. What the device
+ * controller does with that information is left to the controller.
+ * Discard operations are passed with sector_number as the
+ * sector index to begin discard operations at and nr_sectors as the number of
+ * sectors to be discarded. The specified sectors should be discarded if the
+ * underlying block device supports trim (ATA) or unmap (SCSI) operations,
+ * or a BLKIF_RSP_EOPNOTSUPP  should be returned.
+ * More information about trim/unmap operations at:
+ * http://t13.org/Documents/UploadedDocuments/docs2008/
+ *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
+ * http://www.seagate.com/staticfiles/support/disc/manuals/
+ *     Interface%20manuals/100293068c.pdf
+ */
+#define BLKIF_OP_DISCARD           5
+
 /*
  * Maximum scatter/gather segments per request.
  * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
@@ -74,6 +104,11 @@
 	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 };
 
+struct blkif_request_discard {
+	blkif_sector_t sector_number;
+	uint64_t nr_sectors;
+};
+
 struct blkif_request {
 	uint8_t        operation;    /* BLKIF_OP_???                         */
 	uint8_t        nr_segments;  /* number of segments                   */
@@ -81,6 +116,7 @@
 	uint64_t       id;           /* private guest value, echoed in resp  */
 	union {
 		struct blkif_request_rw rw;
+		struct blkif_request_discard discard;
 	} u;
 };
 
diff --git a/mm/bounce.c b/mm/bounce.c
index 1481de6..434fb4f 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
+#include <linux/bootmem.h>
 #include <asm/tlbflush.h>
 
 #include <trace/events/block.h>
@@ -26,12 +27,10 @@
 #ifdef CONFIG_HIGHMEM
 static __init int init_emergency_pool(void)
 {
-	struct sysinfo i;
-	si_meminfo(&i);
-	si_swapinfo(&i);
-
-	if (!i.totalhigh)
+#ifndef CONFIG_MEMORY_HOTPLUG
+	if (max_pfn <= max_low_pfn)
 		return 0;
+#endif
 
 	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
 	BUG_ON(!page_pool);
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index b5e922f..bad91b4 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -271,7 +271,10 @@
 
 	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
 
-	s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
+	if (!dma_data->ops)
+		dma_data->ops = samsung_dma_get_ops();
+
+	dma_data->ops->started(dma_data->channel);
 
 	return 0;
 }
@@ -317,7 +320,10 @@
 
 	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
 
-	s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
+	if (!dma_data->ops)
+		dma_data->ops = samsung_dma_get_ops();
+
+	dma_data->ops->started(dma_data->channel);
 
 	return 0;
 }
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 9465588..851346f 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -54,7 +54,6 @@
 	spinlock_t lock;
 	int state;
 	unsigned int dma_loaded;
-	unsigned int dma_limit;
 	unsigned int dma_period;
 	dma_addr_t dma_start;
 	dma_addr_t dma_pos;
@@ -62,77 +61,79 @@
 	struct s3c_dma_params *params;
 };
 
+static void audio_buffdone(void *data);
+
 /* dma_enqueue
  *
  * place a dma buffer onto the queue for the dma system
  * to handle.
-*/
+ */
 static void dma_enqueue(struct snd_pcm_substream *substream)
 {
 	struct runtime_data *prtd = substream->runtime->private_data;
 	dma_addr_t pos = prtd->dma_pos;
 	unsigned int limit;
-	int ret;
+	struct samsung_dma_prep_info dma_info;
 
 	pr_debug("Entered %s\n", __func__);
 
-	if (s3c_dma_has_circular())
-		limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
-	else
-		limit = prtd->dma_limit;
+	limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
 
 	pr_debug("%s: loaded %d, limit %d\n",
 				__func__, prtd->dma_loaded, limit);
 
-	while (prtd->dma_loaded < limit) {
-		unsigned long len = prtd->dma_period;
+	dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE);
+	dma_info.direction =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
+		? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	dma_info.fp = audio_buffdone;
+	dma_info.fp_param = substream;
+	dma_info.period = prtd->dma_period;
+	dma_info.len = prtd->dma_period*limit;
 
+	while (prtd->dma_loaded < limit) {
 		pr_debug("dma_loaded: %d\n", prtd->dma_loaded);
 
-		if ((pos + len) > prtd->dma_end) {
-			len  = prtd->dma_end - pos;
-			pr_debug("%s: corrected dma len %ld\n", __func__, len);
+		if ((pos + dma_info.period) > prtd->dma_end) {
+			dma_info.period  = prtd->dma_end - pos;
+			pr_debug("%s: corrected dma len %ld\n",
+					__func__, dma_info.period);
 		}
 
-		ret = s3c2410_dma_enqueue(prtd->params->channel,
-			substream, pos, len);
+		dma_info.buf = pos;
+		prtd->params->ops->prepare(prtd->params->ch, &dma_info);
 
-		if (ret == 0) {
-			prtd->dma_loaded++;
-			pos += prtd->dma_period;
-			if (pos >= prtd->dma_end)
-				pos = prtd->dma_start;
-		} else
-			break;
+		prtd->dma_loaded++;
+		pos += prtd->dma_period;
+		if (pos >= prtd->dma_end)
+			pos = prtd->dma_start;
 	}
 
 	prtd->dma_pos = pos;
 }
 
-static void audio_buffdone(struct s3c2410_dma_chan *channel,
-				void *dev_id, int size,
-				enum s3c2410_dma_buffresult result)
+static void audio_buffdone(void *data)
 {
-	struct snd_pcm_substream *substream = dev_id;
-	struct runtime_data *prtd;
+	struct snd_pcm_substream *substream = data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 
 	pr_debug("Entered %s\n", __func__);
 
-	if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR)
-		return;
+	if (prtd->state & ST_RUNNING) {
+		prtd->dma_pos += prtd->dma_period;
+		if (prtd->dma_pos >= prtd->dma_end)
+			prtd->dma_pos = prtd->dma_start;
 
-	prtd = substream->runtime->private_data;
+		if (substream)
+			snd_pcm_period_elapsed(substream);
 
-	if (substream)
-		snd_pcm_period_elapsed(substream);
-
-	spin_lock(&prtd->lock);
-	if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) {
-		prtd->dma_loaded--;
-		dma_enqueue(substream);
+		spin_lock(&prtd->lock);
+		if (!samsung_dma_has_circular()) {
+			prtd->dma_loaded--;
+			dma_enqueue(substream);
+		}
+		spin_unlock(&prtd->lock);
 	}
-
-	spin_unlock(&prtd->lock);
 }
 
 static int dma_hw_params(struct snd_pcm_substream *substream,
@@ -144,8 +145,7 @@
 	unsigned long totbytes = params_buffer_bytes(params);
 	struct s3c_dma_params *dma =
 		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-	int ret = 0;
-
+	struct samsung_dma_info dma_info;
 
 	pr_debug("Entered %s\n", __func__);
 
@@ -163,30 +163,26 @@
 		pr_debug("params %p, client %p, channel %d\n", prtd->params,
 			prtd->params->client, prtd->params->channel);
 
-		ret = s3c2410_dma_request(prtd->params->channel,
-					  prtd->params->client, NULL);
+		prtd->params->ops = samsung_dma_get_ops();
 
-		if (ret < 0) {
-			printk(KERN_ERR "failed to get dma channel\n");
-			return ret;
-		}
-
-		/* use the circular buffering if we have it available. */
-		if (s3c_dma_has_circular())
-			s3c2410_dma_setflags(prtd->params->channel,
-					     S3C2410_DMAF_CIRCULAR);
+		dma_info.cap = (samsung_dma_has_circular() ?
+			DMA_CYCLIC : DMA_SLAVE);
+		dma_info.client = prtd->params->client;
+		dma_info.direction =
+			(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
+			? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		dma_info.width = prtd->params->dma_size;
+		dma_info.fifo = prtd->params->dma_addr;
+		prtd->params->ch = prtd->params->ops->request(
+				prtd->params->channel, &dma_info);
 	}
 
-	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
-				    audio_buffdone);
-
 	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
 
 	runtime->dma_bytes = totbytes;
 
 	spin_lock_irq(&prtd->lock);
 	prtd->dma_loaded = 0;
-	prtd->dma_limit = runtime->hw.periods_min;
 	prtd->dma_period = params_period_bytes(params);
 	prtd->dma_start = runtime->dma_addr;
 	prtd->dma_pos = prtd->dma_start;
@@ -206,7 +202,8 @@
 	snd_pcm_set_runtime_buffer(substream, NULL);
 
 	if (prtd->params) {
-		s3c2410_dma_free(prtd->params->channel, prtd->params->client);
+		prtd->params->ops->release(prtd->params->ch,
+					prtd->params->client);
 		prtd->params = NULL;
 	}
 
@@ -225,23 +222,9 @@
 	if (!prtd->params)
 		return 0;
 
-	/* channel needs configuring for mem=>device, increment memory addr,
-	 * sync to pclk, half-word transfers to the IIS-FIFO. */
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		s3c2410_dma_devconfig(prtd->params->channel,
-				      S3C2410_DMASRC_MEM,
-				      prtd->params->dma_addr);
-	} else {
-		s3c2410_dma_devconfig(prtd->params->channel,
-				      S3C2410_DMASRC_HW,
-				      prtd->params->dma_addr);
-	}
-
-	s3c2410_dma_config(prtd->params->channel,
-			   prtd->params->dma_size);
-
 	/* flush the DMA channel */
-	s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH);
+	prtd->params->ops->flush(prtd->params->ch);
+
 	prtd->dma_loaded = 0;
 	prtd->dma_pos = prtd->dma_start;
 
@@ -265,14 +248,14 @@
 	case SNDRV_PCM_TRIGGER_RESUME:
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 		prtd->state |= ST_RUNNING;
-		s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START);
+		prtd->params->ops->trigger(prtd->params->ch);
 		break;
 
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
 		prtd->state &= ~ST_RUNNING;
-		s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP);
+		prtd->params->ops->stop(prtd->params->ch);
 		break;
 
 	default:
@@ -291,21 +274,12 @@
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct runtime_data *prtd = runtime->private_data;
 	unsigned long res;
-	dma_addr_t src, dst;
 
 	pr_debug("Entered %s\n", __func__);
 
-	spin_lock(&prtd->lock);
-	s3c2410_dma_getposition(prtd->params->channel, &src, &dst);
+	res = prtd->dma_pos - prtd->dma_start;
 
-	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		res = dst - prtd->dma_start;
-	else
-		res = src - prtd->dma_start;
-
-	spin_unlock(&prtd->lock);
-
-	pr_debug("Pointer %x %x\n", src, dst);
+	pr_debug("Pointer offset: %lu\n", res);
 
 	/* we seem to be getting the odd error from the pcm library due
 	 * to out-of-bounds pointers. this is maybe due to the dma engine
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index c506592..7d1ead7 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -6,7 +6,7 @@
  *  Free Software Foundation;  either version 2 of the  License, or (at your
  *  option) any later version.
  *
- *  ALSA PCM interface for the Samsung S3C24xx CPU
+ *  ALSA PCM interface for the Samsung SoC
  */
 
 #ifndef _S3C_AUDIO_H
@@ -17,6 +17,8 @@
 	int channel;				/* Channel ID */
 	dma_addr_t dma_addr;
 	int dma_size;			/* Size of the DMA transfer */
+	unsigned ch;
+	struct samsung_dma_ops *ops;
 };
 
 #endif