Merge "mmc: block: Adding device attribute for minimum sectors to start BKOPS"
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
index b16d40f..7f2a21b 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
@@ -8,13 +8,25 @@
 that need to be monitored for usage requirement to check if a given low power
 state can be entered.Each resource is identified by a combination of the name,
 id,type and key which is also used by the RPM to identify a shared resource.
+The name and resource-type are required nodes; the type, id and key are
+optional nodes which are needed if the resource type is RPM shared resource
+(MSM_LPM_RPM_RS_TYPE).
 
-The required nodes for lpm-resources are:
+The nodes for lpm-resources are:
+
+Required Nodes:
 
 - compatible: "qcom,lpm-resources"
 - reg: The numeric level id
 - qcom,name: The name of the low power resource represented
              as a string.
+- qcom,resource-type: The type of the LPM resource.
+   MSM_LPM_RPM_RS_TYPE    = 0
+   MSM_LPM_LOCAL_RS_TYPE  = 1
+
+
+Optional Nodes:
+
 - qcom,type: The type of resource used like smps or pxo
              represented as a hex value.
 - qcom,id: The id representing a device within a resource type.
@@ -25,6 +37,7 @@
             qcom,lpm-resources@0 {
                         reg = <0x0>;
                         qcom,name = "vdd-dig";
+                        qcom,resource-type = <0>;
                         qcom,type = <0x62706d73>;   /* "smpb" */
                         qcom,id = <0x02>;
                         qcom,key = <0x6e726f63>;   /* "corn" */
diff --git a/Documentation/devicetree/bindings/gpio/gpio_keys.txt b/Documentation/devicetree/bindings/gpio/gpio_keys.txt
index 5c2c021..4e810e1 100644
--- a/Documentation/devicetree/bindings/gpio/gpio_keys.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio_keys.txt
@@ -4,6 +4,7 @@
 	- compatible = "gpio-keys";
 
 Optional properties:
+	- input-name: input name of the device
 	- autorepeat: Boolean, Enable auto repeat feature of Linux input
 	  subsystem.
 
@@ -28,6 +29,7 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			autorepeat;
+			input-name = "gpio-keys";
 			button@21 {
 				label = "GPIO Key UP";
 				linux,code = <103>;
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
index 8b17ba9..5e7c42a 100644
--- a/Documentation/devicetree/bindings/qseecom/qseecom.txt
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -2,9 +2,21 @@
 
 Required properties:
 - compatible : Should be "qcom,qseecom"
+- qcom, msm_bus,name: Should be "qseecom-noc"
+- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
+- qcom, msm_bus,num_paths: The paths for source and destination ports
+- qcom, msm_bus,vectors: Vectors for bus topology.
 
 Example:
-
 	qcom,qseecom@fe806000 {
 		compatible = "qcom,qseecom";
+		qcom,msm_bus,name = "qseecom-noc";
+		qcom,msm_bus,num_cases = <4>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,msm_bus,vectors =
+			<55 512 0 0>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>;
 	};
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 2864fd1..3c8cb1b 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -123,16 +123,18 @@
 
  - compatible :                            "qcom,msm-ocmem-audio"
 
- - qcom,msm-ocmem-audio-src-id:            Master port id
+ - qcom,msm_bus,name:                      Client name
 
- - qcom,msm-ocmem-audio-dst-id:            Slave port id
+ - qcom,msm_bus,num_cases:                 Total number of use cases
 
- - qcom,msm-ocmem-audio-ab:                arbitrated bandwidth
-                                           in Bytes/s
+ - qcom,msm_bus,active_only:               Context flag for requests in active or
+                                           dual (active & sleep) contex
 
- - qcom,msm-ocmem-audio-ib:                instantaneous bandwidth
-                                           in Bytes/s
+ - qcom,msm_bus,num_paths:                 Total number of master-slave pairs
 
+ - qcom,msm_bus,vectors:                   Arrays of unsigned integers representing:
+                                           master-id, slave-id, arbitrated bandwidth,
+                                           instantaneous bandwidth
 Example:
 
         qcom,msm-pcm {
@@ -234,10 +236,13 @@
 
 	qcom,msm-ocmem-audio {
 		compatible = "qcom,msm-ocmem-audio";
-		qcom,msm-ocmem-audio-src-id = <11>;
-		qcom,msm-ocmem-audio-dst-id = <604>;
-		qcom,msm-ocmem-audio-ab = <209715200>;
-		qcom,msm-ocmem-audio-ib = <471859200>;
+		qcom,msm_bus,name = "audio-ocmem";
+		qcom,msm_bus,num_cases = <2>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,msm_bus,vectors =
+			<11 604 0 0>,
+			<11 604 32505856 325058560>;
 	};
 
 * MSM8974 ASoC Machine driver
diff --git a/arch/arm/boot/dts/msm8226-sim.dts b/arch/arm/boot/dts/msm8226-sim.dts
index 4330849..aeda1d8 100644
--- a/arch/arm/boot/dts/msm8226-sim.dts
+++ b/arch/arm/boot/dts/msm8226-sim.dts
@@ -58,4 +58,21 @@
 		reg = <0xf995e000 0x1000>;
 		interrupts = <0 114 0>;
 	};
+
+        usb@f9a55000 {
+                compatible = "qcom,hsusb-otg";
+                reg = <0xf9a55000 0x400>;
+                interrupts = <0 134 0>;
+                interrupt-names = "core_irq";
+
+                qcom,hsusb-otg-phy-type = <2>;
+                qcom,hsusb-otg-mode = <1>;
+                qcom,hsusb-otg-otg-control = <1>;
+                qcom,hsusb-otg-disable-reset;
+        };
+
+	android_usb {
+		compatible = "qcom,android-usb";
+	};
+
 };
diff --git a/arch/arm/boot/dts/msm8974-cdp.dts b/arch/arm/boot/dts/msm8974-cdp.dts
index aff0adc..05fcc4f 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-cdp.dts
@@ -112,6 +112,7 @@
 
 	gpio_keys {
 		compatible = "gpio-keys";
+		input-name = "gpio-keys";
 
 		camera_snapshot {
 			label = "camera_snapshot";
diff --git a/arch/arm/boot/dts/msm8974-liquid.dts b/arch/arm/boot/dts/msm8974-liquid.dts
index 2abc1d5..bf4adaf 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dts
+++ b/arch/arm/boot/dts/msm8974-liquid.dts
@@ -22,27 +22,59 @@
 	serial@f991e000 {
 		status = "ok";
 	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+
+		home {
+			label = "home";
+			gpios = <&pm8941_gpios 1 0x1>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+
+		vol_down {
+			label = "volume_down";
+			gpios = <&pm8941_gpios 2 0x1>;
+			linux,input-type = <1>;
+			linux,code = <114>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8941_gpios 5 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+	};
 };
 
 &pm8941_gpios {
 	gpio@c000 { /* GPIO 1 */
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <2>;
+		qcom,select = <0>;
 	};
 
 	gpio@c100 { /* GPIO 2 */
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <2>;
+		qcom,select = <0>;
 	};
 
 	gpio@c200 { /* GPIO 3 */
-		qcom,mode = <0>;
-		qcom,pull = <0>;
-		qcom,vin-sel = <2>;
-		qcom,select = <0>;
 	};
 
 	gpio@c300 { /* GPIO 4 */
-		qcom,mode = <0>;
-		qcom,pull = <0>;
-		qcom,vin-sel = <2>;
-		qcom,select = <0>;
 	};
 
 	gpio@c400 { /* GPIO 5 */
diff --git a/arch/arm/boot/dts/msm8974-mtp.dts b/arch/arm/boot/dts/msm8974-mtp.dts
index 00aec9f..e0d6ad3 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-mtp.dts
@@ -112,6 +112,7 @@
 
 	gpio_keys {
 		compatible = "gpio-keys";
+		input-name = "gpio-keys";
 
 		camera_snapshot {
 			label = "camera_snapshot";
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index b992e86..8e6f1e6 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -761,10 +761,13 @@
 
 	qcom,msm-ocmem-audio {
 		compatible = "qcom,msm-ocmem-audio";
-		qcom,msm-ocmem-audio-src-id = <11>;
-		qcom,msm-ocmem-audio-dst-id = <604>;
-		qcom,msm-ocmem-audio-ab = <32505856>;
-		qcom,msm-ocmem-audio-ib = <32505856>;
+		qcom,msm_bus,name = "audio-ocmem";
+		qcom,msm_bus,num_cases = <2>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,msm_bus,vectors =
+			<11 604 0 0>,
+			<11 604 32505856 32505856>;
 	};
 
 	qcom,mss@fc880000 {
@@ -883,6 +886,15 @@
 
 	qcom,qseecom@fe806000 {
 		compatible = "qcom,qseecom";
+		qcom,msm_bus,name = "qseecom-noc";
+		qcom,msm_bus,num_cases = <4>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,msm_bus,vectors =
+				<55 512 0 0>,
+				<55 512 3936000000 393600000>,
+				<55 512 3936000000 393600000>,
+				<55 512 3936000000 393600000>;
 	};
 
 	qcom,wdt@f9017000 {
diff --git a/arch/arm/boot/dts/msm8974_pm.dtsi b/arch/arm/boot/dts/msm8974_pm.dtsi
index e39a72a..f108d37 100644
--- a/arch/arm/boot/dts/msm8974_pm.dtsi
+++ b/arch/arm/boot/dts/msm8974_pm.dtsi
@@ -130,6 +130,7 @@
 		qcom,lpm-resources@0 {
 			reg = <0x0>;
 			qcom,name = "vdd-dig";
+			qcom,resource-type = <0>;
 			qcom,type = <0x62706d73>;	/* "smpb" */
 			qcom,id = <0x02>;
 			qcom,key = <0x6e726f63>;	/* "corn" */
@@ -138,6 +139,7 @@
 		qcom,lpm-resources@1 {
 			reg = <0x1>;
 			qcom,name = "vdd-mem";
+			qcom,resource-type = <0>;
 			qcom,type = <0x62706d73>;	/* "smpb" */
 			qcom,id = <0x01>;
 			qcom,key = <0x7675>;		/* "uv" */
@@ -146,10 +148,17 @@
 		qcom,lpm-resources@2 {
 			reg = <0x2>;
 			qcom,name = "pxo";
+			qcom,resource-type = <0>;
 			qcom,type = <0x306b6c63>;	/* "clk0" */
 			qcom,id = <0x00>;
 			qcom,key = <0x62616e45>;	/* "Enab" */
 		};
+
+		qcom,lpm-resources@3 {
+			reg = <0x3>;
+			qcom,name = "l2";
+			qcom,resource-type = <1>;
+		};
 	};
 
 	qcom,lpm-levels {
diff --git a/arch/arm/configs/msm8910_defconfig b/arch/arm/configs/msm8910_defconfig
new file mode 100644
index 0000000..ebcc6f5
--- /dev/null
+++ b/arch/arm/configs/msm8910_defconfig
@@ -0,0 +1,100 @@
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_MSM=y
+CONFIG_ARCH_MSM8910=y
+# CONFIG_MSM_STACKED_MEMORY is not set
+CONFIG_CPU_HAS_L2_PMU=y
+# CONFIG_MSM_FIQ_SUPPORT is not set
+# CONFIG_MSM_PROC_COMM is not set
+CONFIG_MSM_DIRECT_SCLK_ACCESS=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_VMALLOC_RESERVE=0x19000000
+CONFIG_USE_OF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SUSPEND is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_ANDROID_PMEM is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_HSL=y
+CONFIG_SERIAL_MSM_HSL_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_USER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/msm9615_defconfig b/arch/arm/configs/msm9615_defconfig
index 2cc801e..7c3d5b0 100644
--- a/arch/arm/configs/msm9615_defconfig
+++ b/arch/arm/configs/msm9615_defconfig
@@ -49,6 +49,8 @@
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 # CONFIG_MSM_SYSMON_COMM is not set
 CONFIG_MSM_MODEM_8960=y
+CONFIG_MSM_PIL_LPASS_QDSP6V4=y
+CONFIG_MSM_PIL_MODEM_QDSP6V4=y
 CONFIG_MSM_LPASS_8960=y
 CONFIG_MSM_RPM_LOG=y
 CONFIG_MSM_RPM_STATS_LOG=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 60963c6..d7cdbc4 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -105,7 +105,11 @@
 # CONFIG_HWMON is not set
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_QPNP=y
-# CONFIG_HID_SUPPORT is not set
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_G_ANDROID=y
@@ -150,3 +154,12 @@
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=y
 CONFIG_LIBCRC32C=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=m
+CONFIG_MMC_MSM=y
+CONFIG_MMC_MSM_SPS_SUPPORT=y
+CONFIG_MMC_UNSAFE_RESUME=y
diff --git a/arch/arm/kernel/perf_event_msm.c b/arch/arm/kernel/perf_event_msm.c
index 90c9c9e..8f58adf 100644
--- a/arch/arm/kernel/perf_event_msm.c
+++ b/arch/arm/kernel/perf_event_msm.c
@@ -709,8 +709,6 @@
 
 static struct arm_pmu scorpion_pmu = {
 	.handle_irq		= armv7pmu_handle_irq,
-	.request_pmu_irq	= msm_request_irq,
-	.free_pmu_irq		= msm_free_irq,
 	.enable			= scorpion_pmu_enable_event,
 	.disable		= scorpion_pmu_disable_event,
 	.read_counter		= armv7pmu_read_counter,
@@ -731,6 +729,9 @@
 	scorpion_pmu.name	= "ARMv7 Scorpion";
 	scorpion_pmu.num_events	= armv7_read_num_pmnc_events();
 	scorpion_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
+	/* Unicore can't use the percpu IRQ API. */
+	scorpion_pmu.request_pmu_irq	= armpmu_generic_request_irq;
+	scorpion_pmu.free_pmu_irq	= armpmu_generic_free_irq;
 	scorpion_clear_pmuregs();
 	return &scorpion_pmu;
 }
@@ -741,6 +742,8 @@
 	scorpion_pmu.name	= "ARMv7 Scorpion-MP";
 	scorpion_pmu.num_events	= armv7_read_num_pmnc_events();
 	scorpion_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
+	scorpion_pmu.request_pmu_irq	= msm_request_irq;
+	scorpion_pmu.free_pmu_irq	= msm_free_irq;
 	scorpion_clear_pmuregs();
 	return &scorpion_pmu;
 }
diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c
index 8d8f47a..eec614b 100644
--- a/arch/arm/kernel/perf_event_msm_krait.c
+++ b/arch/arm/kernel/perf_event_msm_krait.c
@@ -540,8 +540,8 @@
         int err = 0;
         int cpu;
 
-        err = request_percpu_irq(irq, *handle_irq, "krait-l1-armpmu",
-                        &cpu_hw_events);
+	err = request_percpu_irq(irq, *handle_irq, "l1-armpmu",
+			&cpu_hw_events);
 
         if (!err) {
                 for_each_cpu(cpu, cpu_online_mask) {
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 6e841c7..9f0c30b 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -356,10 +356,13 @@
 	select CPU_V7
 	select MSM_GPIOMUX
 	select MSM_RPM_SMD
+	select MSM_NATIVE_RESTART
+	select MSM_RESTART_V2
 	select MULTI_IRQ_HANDLER
 	select GPIO_MSM_V3
 	select MAY_HAVE_SPARSE_IRQ
 	select SPARSE_IRQ
+	select MSM_MULTIMEDIA_USE_ION
 
 config ARCH_MSM8910
 	bool "MSM8910"
@@ -1932,7 +1935,7 @@
 
 config MSM_PIL_LPASS_QDSP6V4
 	tristate "LPASS QDSP6v4 (Hexagon) Boot Support"
-	depends on MSM_PIL
+	depends on MSM_SUBSYSTEM_RESTART
 	help
 	  Support for booting and shutting down QDSP6v4 processors (hexagon)
 	  in low power audio subsystems. If you would like to record or
@@ -1942,7 +1945,7 @@
 
 config MSM_PIL_MODEM_QDSP6V4
 	tristate "Modem QDSP6v4 (Hexagon) Boot Support"
-	depends on MSM_PIL
+	depends on MSM_SUBSYSTEM_RESTART
 	help
 	  Support for booting and shutting down QDSP6v4 processors (hexagon)
 	  in modem subsystems. If you would like to make or receive phone
@@ -2030,22 +2033,6 @@
 	bool "Secure Channel Manager (SCM) support"
 	default n
 
-config MSM_MODEM_8960
-	bool "MSM 8960 Modem driver"
-	depends on (ARCH_MSM8960 || ARCH_MSM9615)
-	help
-	 This option enables the modem driver for the MSM8960 and MSM9615, which monitors
-	 modem hardware watchdog interrupt lines and plugs into the subsystem
-	 restart and PIL drivers. For MSM9615, it only supports a full chip reset.
-
-config MSM_LPASS_8960
-	tristate "MSM 8960 Lpass driver"
-	depends on (ARCH_MSM8960 || ARCH_MSM9615)
-	help
-	 This option enables the lpass driver for the MSM8960 and MSM9615. This monitors
-	 lpass hardware watchdog interrupt lines and plugs into the subsystem
-	 restart and PIL drivers. For MSM9615, it only supports a full chip reset.
-
 config MSM_MODEM_SSR_8974
 	bool "MSM 8974 Modem restart driver"
 	depends on (ARCH_MSM8974)
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 862607f..2424b53 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -200,10 +200,9 @@
 	obj-y += ramdump.o
 endif
 obj-$(CONFIG_MSM_SYSMON_COMM) += sysmon.o
-obj-$(CONFIG_MSM_MODEM_8960) += modem-8960.o
 obj-$(CONFIG_MSM_MODEM_SSR_8974) += modem-ssr-8974.o
-obj-$(CONFIG_MSM_LPASS_8960) += lpass-8960.o
 obj-$(CONFIG_MSM_ADSP_SSR_8974) += adsp-8974.o
+obj-$(CONFIG_MSM_WCNSS_SSR_8974) += wcnss-ssr-8974.o
 
 ifdef CONFIG_CPU_IDLE
 	obj-$(CONFIG_ARCH_APQ8064) += cpuidle.o
diff --git a/arch/arm/mach-msm/board-8064-camera.c b/arch/arm/mach-msm/board-8064-camera.c
index c79f82f..0a95e51 100644
--- a/arch/arm/mach-msm/board-8064-camera.c
+++ b/arch/arm/mach-msm/board-8064-camera.c
@@ -503,6 +503,34 @@
 	.i2c_mux_mode = MODE_L,
 };
 
+static struct msm_camera_sensor_flash_data flash_imx135 = {
+	.flash_type = MSM_CAMERA_FLASH_NONE,
+};
+
+static struct msm_camera_csi_lane_params imx135_csi_lane_params = {
+	.csi_lane_assign = 0xE4,
+	.csi_lane_mask = 0xF,
+};
+
+static struct msm_camera_sensor_platform_info sensor_board_info_imx135 = {
+	.mount_angle    = 90,
+	.cam_vreg = apq_8064_cam_vreg,
+	.num_vreg = ARRAY_SIZE(apq_8064_cam_vreg),
+	.gpio_conf = &apq8064_back_cam_gpio_conf,
+	.i2c_conf = &apq8064_back_cam_i2c_conf,
+	.csi_lane_params = &imx135_csi_lane_params,
+};
+
+static struct msm_camera_sensor_info msm_camera_sensor_imx135_data = {
+	.sensor_name    = "imx135",
+	.pdata  = &msm_camera_csi_device_data[0],
+	.flash_data = &flash_imx135,
+	.sensor_platform_info = &sensor_board_info_imx135,
+	.csi_if = 1,
+	.camera_type = BACK_CAMERA_2D,
+	.sensor_type = BAYER_SENSOR,
+};
+
 static struct msm_camera_sensor_flash_data flash_imx074 = {
 	.flash_type	= MSM_CAMERA_FLASH_LED,
 	.flash_src	= &msm_flash_src
@@ -700,6 +728,10 @@
 	.platform_data = &msm_camera_sensor_imx074_data,
 	},
 	{
+	I2C_BOARD_INFO("imx135", 0x10),
+	.platform_data = &msm_camera_sensor_imx135_data,
+	},
+	{
 	I2C_BOARD_INFO("mt9m114", 0x48),
 	.platform_data = &msm_camera_sensor_mt9m114_data,
 	},
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 2bef087..851f7d9 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -64,6 +64,7 @@
 VREG_CONSUMERS(L8) = {
 	REGULATOR_SUPPLY("8921_l8",		NULL),
 	REGULATOR_SUPPLY("cam_vana",		"4-001a"),
+	REGULATOR_SUPPLY("cam_vana",		"4-0010"),
 	REGULATOR_SUPPLY("cam_vana",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vana",		"4-006c"),
 	REGULATOR_SUPPLY("cam_vana",		"4-0034"),
@@ -84,6 +85,7 @@
 };
 VREG_CONSUMERS(L12) = {
 	REGULATOR_SUPPLY("cam_vdig",		"4-001a"),
+	REGULATOR_SUPPLY("cam_vdig",		"4-0010"),
 	REGULATOR_SUPPLY("cam_vdig",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vdig",		"4-006c"),
 	REGULATOR_SUPPLY("cam_vdig",		"4-0034"),
@@ -102,6 +104,7 @@
 VREG_CONSUMERS(L16) = {
 	REGULATOR_SUPPLY("8921_l16",		NULL),
 	REGULATOR_SUPPLY("cam_vaf",		"4-001a"),
+	REGULATOR_SUPPLY("cam_vaf",		"4-0010"),
 	REGULATOR_SUPPLY("cam_vaf",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vaf",		"4-006c"),
 	REGULATOR_SUPPLY("cam_vaf",		"4-0034"),
@@ -214,6 +217,7 @@
 VREG_CONSUMERS(LVS5) = {
 	REGULATOR_SUPPLY("8921_lvs5",		NULL),
 	REGULATOR_SUPPLY("cam_vio",		"4-001a"),
+	REGULATOR_SUPPLY("cam_vio",		"4-0010"),
 	REGULATOR_SUPPLY("cam_vio",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vio",		"4-006c"),
 	REGULATOR_SUPPLY("cam_vio",		"4-0034"),
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 9545c7a..767736f 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -46,6 +46,8 @@
 static struct clk_lookup msm_clocks_dummy[] = {
 	CLK_DUMMY("core_clk",   BLSP1_UART_CLK, "msm_serial_hsl.0", OFF),
 	CLK_DUMMY("iface_clk",  BLSP1_UART_CLK, "msm_serial_hsl.0", OFF),
+	CLK_DUMMY("iface_clk",  HSUSB_IFACE_CLK, "msm_otg", OFF),
+	CLK_DUMMY("core_clk",	HSUSB_CORE_CLK, "msm_otg", OFF),
 };
 
 struct clock_init_data msm_dummy_clock_init_data __initdata = {
@@ -62,6 +64,8 @@
 static struct of_dev_auxdata msm8226_auxdata_lookup[] __initdata = {
 	OF_DEV_AUXDATA("qcom,msm-lsuart-v14", 0xF991F000, \
 			"msm_serial_hsl.0", NULL),
+	OF_DEV_AUXDATA("qcom,hsusb-otg", 0xF9A55000, \
+			"msm_otg", NULL),
 	{}
 };
 
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index 9bb6e09..88fd527 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -752,6 +752,33 @@
 	.eeprom_info = &imx091_eeprom_info,
 };
 
+static struct msm_camera_sensor_flash_data flash_imx135 = {
+	.flash_type = MSM_CAMERA_FLASH_NONE,
+};
+
+static struct msm_camera_csi_lane_params imx135_csi_lane_params = {
+	.csi_lane_assign = 0xE4,
+	.csi_lane_mask = 0xF,
+};
+
+static struct msm_camera_sensor_platform_info sensor_board_info_imx135 = {
+	.mount_angle = 90,
+	.cam_vreg = msm_8960_cam_vreg,
+	.num_vreg = ARRAY_SIZE(msm_8960_cam_vreg),
+	.gpio_conf = &msm_8960_back_cam_gpio_conf,
+	.csi_lane_params = &imx135_csi_lane_params,
+};
+
+static struct msm_camera_sensor_info msm_camera_sensor_imx135_data = {
+	.sensor_name = "imx135",
+	.pdata = &msm_camera_csi_device_data[0],
+	.flash_data = &flash_imx135,
+	.sensor_platform_info = &sensor_board_info_imx135,
+	.csi_if = 1,
+	.camera_type = BACK_CAMERA_2D,
+	.sensor_type = BAYER_SENSOR,
+};
+
 static struct pm8xxx_mpp_config_data privacy_light_on_config = {
 	.type		= PM8XXX_MPP_TYPE_SINK,
 	.level		= PM8XXX_MPP_CS_OUT_5MA,
@@ -838,6 +865,10 @@
 	.platform_data = &msm_camera_sensor_ov2720_data,
 	},
 	{
+	I2C_BOARD_INFO("imx135", 0x10),
+	.platform_data = &msm_camera_sensor_imx135_data,
+	},
+	{
 	I2C_BOARD_INFO("mt9m114", 0x48),
 	.platform_data = &msm_camera_sensor_mt9m114_data,
 	},
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index a6c0bc7..f9e2c8e 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -78,6 +78,7 @@
 	REGULATOR_SUPPLY("cam_vana",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vana",		"4-0020"),
 	REGULATOR_SUPPLY("cam_vana",		"4-0034"),
+	REGULATOR_SUPPLY("cam_vana",		"4-0010"),
 };
 VREG_CONSUMERS(L12) = {
 	REGULATOR_SUPPLY("8921_l12",		NULL),
@@ -86,6 +87,7 @@
 	REGULATOR_SUPPLY("cam_vdig",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vdig",		"4-0020"),
 	REGULATOR_SUPPLY("cam_vdig",		"4-0034"),
+	REGULATOR_SUPPLY("cam_vdig",		"4-0010"),
 };
 VREG_CONSUMERS(L14) = {
 	REGULATOR_SUPPLY("8921_l14",		NULL),
@@ -101,6 +103,7 @@
 	REGULATOR_SUPPLY("cam_vaf",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vaf",		"4-0020"),
 	REGULATOR_SUPPLY("cam_vaf",		"4-0034"),
+	REGULATOR_SUPPLY("cam_vaf",		"4-0010"),
 };
 VREG_CONSUMERS(L17) = {
 	REGULATOR_SUPPLY("8921_l17",		NULL),
@@ -220,6 +223,7 @@
 	REGULATOR_SUPPLY("cam_vio",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vio",		"4-0020"),
 	REGULATOR_SUPPLY("cam_vio",		"4-0034"),
+	REGULATOR_SUPPLY("cam_vio",		"4-0010"),
 };
 /* This mapping is used for CDP only. */
 VREG_CONSUMERS(CDP_LVS6) = {
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 43f34fe..ed5e2b7 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -870,6 +870,8 @@
 #ifdef CONFIG_LTC4088_CHARGER
 	&msm_device_charger,
 #endif
+	&msm_9615_q6_lpass,
+	&msm_9615_q6_mss,
 	&msm_device_otg,
 	&msm_device_hsic_peripheral,
 	&msm_device_gadget_peripheral,
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index ca9bdaa..2c641ed 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -28,6 +28,7 @@
 #include <asm/mach/time.h>
 #include <mach/socinfo.h>
 #include <mach/board.h>
+#include <mach/restart.h>
 #include <mach/gpio.h>
 #include <mach/clk-provider.h>
 #include <mach/qpnp-int.h>
@@ -308,4 +309,5 @@
 	.timer = &msm_dt_timer,
 	.dt_compat = msm9625_dt_match,
 	.reserve = msm9625_reserve,
+	.restart = msm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 3a72be3..285b037 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -5351,6 +5351,7 @@
 	CLK_LOOKUP("core_clk",		pmic_ssbi2_clk.c,	""),
 	CLK_LOOKUP("mem_clk",		rpm_msg_ram_p_clk.c,	""),
 	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-001a"),
+	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-0010"),
 	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-0034"),
 	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-0020"),
 	CLK_LOOKUP("cam_clk",		cam1_clk.c,	"4-0048"),
@@ -5711,6 +5712,7 @@
 	CLK_LOOKUP("cam_clk",		cam2_clk.c,		NULL),
 	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-0020"),
 	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-0034"),
+	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-0010"),
 	CLK_LOOKUP("csi_src_clk",	csi0_src_clk.c,		"msm_csid.0"),
 	CLK_LOOKUP("csi_src_clk",	csi1_src_clk.c,		"msm_csid.1"),
 	CLK_LOOKUP("csi_src_clk",	csi2_src_clk.c,		"msm_csid.2"),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 10de231..0adb9a2 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -789,7 +789,6 @@
 static DEFINE_CLK_VOTER(pnoc_sdcc4_clk, &pnoc_clk.c, 0);
 
 static DEFINE_CLK_VOTER(pnoc_sps_clk, &pnoc_clk.c, 0);
-static DEFINE_CLK_VOTER(pnoc_qseecom_clk, &pnoc_clk.c, 0);
 
 static struct clk_freq_tbl ftbl_gcc_usb30_master_clk[] = {
 	F(125000000,  gpll0,   1,   5,  24),
@@ -5079,6 +5078,11 @@
 	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "qcrypto.0"),
 	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "qcrypto.0"),
 
+	CLK_LOOKUP("core_clk",     gcc_ce1_clk.c,         "qseecom"),
+	CLK_LOOKUP("iface_clk",    gcc_ce1_ahb_clk.c,     "qseecom"),
+	CLK_LOOKUP("bus_clk",      gcc_ce1_axi_clk.c,     "qseecom"),
+	CLK_LOOKUP("core_clk_src", ce1_clk_src.c,         "qseecom"),
+
 	CLK_LOOKUP("core_clk", gcc_gp1_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp2_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp3_clk.c, ""),
@@ -5361,7 +5365,6 @@
 	CLK_LOOKUP("core_clk", gcc_prng_ahb_clk.c, "msm_rng"),
 
 	CLK_LOOKUP("dfab_clk", pnoc_sps_clk.c, "msm_sps"),
-	CLK_LOOKUP("bus_clk",  pnoc_qseecom_clk.c, "qseecom"),
 
 	CLK_LOOKUP("bus_clk", snoc_clk.c, ""),
 	CLK_LOOKUP("bus_clk", pnoc_clk.c, ""),
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index db2c525..5b04fa7 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -1341,6 +1341,11 @@
 		.end    = 0x28800000 + SZ_256 - 1,
 		.flags  = IORESOURCE_MEM,
 	},
+	{
+		.start  = LPASS_Q6SS_WDOG_EXPIRED,
+		.end    = LPASS_Q6SS_WDOG_EXPIRED,
+		.flags  = IORESOURCE_IRQ,
+	},
 };
 
 static struct pil_q6v4_pdata msm_8960_q6_lpass_data = {
@@ -1373,10 +1378,30 @@
 		.flags  = IORESOURCE_MEM,
 	},
 	{
+		.start  = 0x08882000,
+		.end    = 0x08882000 + SZ_256 - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
 		.start  = 0x08900000,
 		.end    = 0x08900000 + SZ_256 - 1,
 		.flags  = IORESOURCE_MEM,
 	},
+	{
+		.start  = 0x08982000,
+		.end    = 0x08982000 + SZ_256 - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.start  = Q6FW_WDOG_EXPIRED_IRQ,
+		.end    = Q6FW_WDOG_EXPIRED_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = Q6SW_WDOG_EXPIRED_IRQ,
+		.end    = Q6SW_WDOG_EXPIRED_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
 };
 
 static struct pil_q6v4_pdata msm_8960_q6_mss_data[2] = {
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 6a43206..8e8fa83 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -705,6 +705,41 @@
 	.id		= -1,
 };
 
+static struct resource msm_9615_q6_lpass_resources[] = {
+	{
+		.start  = LPASS_Q6SS_WDOG_EXPIRED,
+		.end    = LPASS_Q6SS_WDOG_EXPIRED,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+struct platform_device msm_9615_q6_lpass = {
+	.name = "pil-q6v4-lpass",
+	.id = -1,
+	.num_resources  = ARRAY_SIZE(msm_9615_q6_lpass_resources),
+	.resource       = msm_9615_q6_lpass_resources,
+};
+
+static struct resource msm_9615_q6_mss_resources[] = {
+	{
+		.start  = Q6FW_WDOG_EXPIRED_IRQ,
+		.end    = Q6FW_WDOG_EXPIRED_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = Q6SW_WDOG_EXPIRED_IRQ,
+		.end    = Q6SW_WDOG_EXPIRED_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+struct platform_device msm_9615_q6_mss = {
+	.name = "pil-q6v4-modem",
+	.id = -1,
+	.num_resources  = ARRAY_SIZE(msm_9615_q6_mss_resources),
+	.resource       = msm_9615_q6_mss_resources,
+};
+
 #ifdef CONFIG_HW_RANDOM_MSM
 /* PRNG device */
 #define MSM_PRNG_PHYS		0x1A500000
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 50ab26f..c0ad39b 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -1606,6 +1606,26 @@
 	},
 };
 
+static struct resource pl310_resources[] = {
+	{
+		.start = 0xC0400000,
+		.end   = 0xC0400000 + SZ_4K - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.name   = "l2_irq",
+		.start  = MSM8625_INT_L2CC_INTR,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device pl310_erp_device = {
+	.name           = "pl310_erp",
+	.id             = -1,
+	.resource       = pl310_resources,
+	.num_resources  = ARRAY_SIZE(pl310_resources),
+};
+
 enum {
 	MSM8625,
 	MSM8625A,
@@ -1954,6 +1974,11 @@
 			(SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2))
 		msm_cpr_init();
 
+	if (!cpu_is_msm8625())
+		pl310_resources[1].start = INT_L2CC_INTR;
+
+	platform_device_register(&pl310_erp_device);
+
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 84812b6..950a1be 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -251,7 +251,9 @@
 extern struct platform_device msm_pil_dsps;
 extern struct platform_device msm_pil_vidc;
 extern struct platform_device msm_8960_q6_lpass;
+extern struct platform_device msm_9615_q6_lpass;
 extern struct platform_device msm_8960_q6_mss;
+extern struct platform_device msm_9615_q6_mss;
 extern struct platform_device msm_8960_riva;
 extern struct platform_device msm_gss;
 
diff --git a/arch/arm/mach-msm/include/mach/msm_hsusb_hw.h b/arch/arm/mach-msm/include/mach/msm_hsusb_hw.h
index 82542b2..831b40e 100644
--- a/arch/arm/mach-msm/include/mach/msm_hsusb_hw.h
+++ b/arch/arm/mach-msm/include/mach/msm_hsusb_hw.h
@@ -100,6 +100,8 @@
 #define CONFIG_MAX_PKT(n)     ((n) << 16)
 #define CONFIG_ZLT            (1 << 29)    /* stop on zero-len xfer */
 #define CONFIG_IOS            (1 << 15)    /* IRQ on setup */
+#define CONFIG_MULT           (3 << 30)
+#define CONFIG_MULT_SHIFT     11
 
 struct ept_queue_item {
     unsigned next;
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-9625.h b/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
index 765de13..652563c 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
@@ -43,6 +43,9 @@
 #define MSM9625_IMEM_PHYS	0xFC42B000
 #define MSM9625_IMEM_SIZE	SZ_2K
 
+#define MSM9625_MPM2_PSHOLD_PHYS	0xFC4AB000
+#define MSM9625_MPM2_PSHOLD_SIZE	SZ_4K
+
 #ifdef CONFIG_DEBUG_MSM9625_UART
 #define MSM_DEBUG_UART_BASE	IOMEM(0xFA71E000)
 #define MSM_DEBUG_UART_PHYS	0xF991E000
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 8ebead8..4b81ce7 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -461,6 +461,7 @@
 static struct map_desc msm9625_io_desc[] __initdata = {
 	MSM_CHIP_DEVICE(APCS_GCC, MSM9625),
 	MSM_CHIP_DEVICE(TLMM, MSM9625),
+	MSM_CHIP_DEVICE(MPM2_PSHOLD, MSM9625),
 	MSM_CHIP_DEVICE(TMR, MSM9625),
 	MSM_CHIP_DEVICE(IMEM, MSM9625),
 	{
diff --git a/arch/arm/mach-msm/lpass-8960.c b/arch/arm/mach-msm/lpass-8960.c
deleted file mode 100644
index b714a7f..0000000
--- a/arch/arm/mach-msm/lpass-8960.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/err.h>
-
-#include <mach/irqs.h>
-#include <mach/scm.h>
-#include <mach/peripheral-loader.h>
-#include <mach/subsystem_restart.h>
-#include <mach/subsystem_notif.h>
-
-#include "smd_private.h"
-#include "ramdump.h"
-#include "sysmon.h"
-
-#define SCM_Q6_NMI_CMD                  0x1
-#define MODULE_NAME			"lpass_8960"
-#define MAX_BUF_SIZE			0x51
-
-/* Subsystem restart: QDSP6 data, functions */
-static void lpass_fatal_fn(struct work_struct *);
-static DECLARE_WORK(lpass_fatal_work, lpass_fatal_fn);
-struct lpass_ssr {
-	void *lpass_ramdump_dev;
-} lpass_ssr;
-
-static struct lpass_ssr lpass_ssr_8960;
-static int q6_crash_shutdown;
-
-static int riva_notifier_cb(struct notifier_block *this, unsigned long code,
-								void *ss_handle)
-{
-	int ret;
-	switch (code) {
-	case SUBSYS_BEFORE_SHUTDOWN:
-		pr_debug("%s: R-Notify: Shutdown started\n", __func__);
-		ret = sysmon_send_event(SYSMON_SS_LPASS, "wcnss",
-				SUBSYS_BEFORE_SHUTDOWN);
-		if (ret < 0)
-			pr_err("%s: sysmon_send_event error %d", __func__,
-				ret);
-		break;
-	}
-	return NOTIFY_DONE;
-}
-
-static void *ssr_notif_hdle;
-static struct notifier_block rnb = {
-	.notifier_call = riva_notifier_cb,
-};
-
-static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
-								void *ss_handle)
-{
-	int ret;
-	switch (code) {
-	case SUBSYS_BEFORE_SHUTDOWN:
-		pr_debug("%s: M-Notify: Shutdown started\n", __func__);
-		ret = sysmon_send_event(SYSMON_SS_LPASS, "modem",
-				SUBSYS_BEFORE_SHUTDOWN);
-		if (ret < 0)
-			pr_err("%s: sysmon_send_event error %d", __func__,
-				ret);
-		break;
-	}
-	return NOTIFY_DONE;
-}
-
-static void *ssr_modem_notif_hdle;
-static struct notifier_block mnb = {
-	.notifier_call = modem_notifier_cb,
-};
-
-static void lpass_log_failure_reason(void)
-{
-	char *reason;
-	char buffer[MAX_BUF_SIZE];
-	unsigned size;
-
-	reason = smem_get_entry(SMEM_SSR_REASON_LPASS0, &size);
-
-	if (!reason) {
-		pr_err("%s: subsystem failure reason: (unknown, smem_get_entry failed).",
-			 MODULE_NAME);
-		return;
-	}
-
-	if (reason[0] == '\0') {
-		pr_err("%s: subsystem failure reason: (unknown, init value found)",
-			 MODULE_NAME);
-		return;
-	}
-
-	size = size < MAX_BUF_SIZE ? size : (MAX_BUF_SIZE-1);
-	memcpy(buffer, reason, size);
-	buffer[size] = '\0';
-	pr_err("%s: subsystem failure reason: %s", MODULE_NAME, buffer);
-	memset((void *)reason, 0x0, size);
-	wmb();
-}
-
-static void lpass_fatal_fn(struct work_struct *work)
-{
-	pr_err("%s %s: Watchdog bite received from Q6!\n", MODULE_NAME,
-		__func__);
-	lpass_log_failure_reason();
-	panic(MODULE_NAME ": Resetting the SoC");
-}
-
-static void lpass_smsm_state_cb(void *data, uint32_t old_state,
-				uint32_t new_state)
-{
-	/* Ignore if we're the one that set SMSM_RESET */
-	if (q6_crash_shutdown)
-		return;
-
-	if (new_state & SMSM_RESET) {
-		pr_err("%s: LPASS SMSM state changed to SMSM_RESET,"
-			" new_state = 0x%x, old_state = 0x%x\n", __func__,
-			new_state, old_state);
-		lpass_log_failure_reason();
-		panic(MODULE_NAME ": Resetting the SoC");
-	}
-}
-
-static void send_q6_nmi(void)
-{
-	/* Send NMI to QDSP6 via an SCM call. */
-	uint32_t cmd = 0x1;
-
-	scm_call(SCM_SVC_UTIL, SCM_Q6_NMI_CMD,
-	&cmd, sizeof(cmd), NULL, 0);
-
-	/* Q6 requires worstcase 100ms to dump caches etc.*/
-	mdelay(100);
-	pr_debug("%s: Q6 NMI was sent.\n", __func__);
-}
-
-static int lpass_shutdown(const struct subsys_desc *subsys)
-{
-	send_q6_nmi();
-	pil_force_shutdown("q6");
-	disable_irq_nosync(LPASS_Q6SS_WDOG_EXPIRED);
-
-	return 0;
-}
-
-static int lpass_powerup(const struct subsys_desc *subsys)
-{
-	int ret = pil_force_boot("q6");
-	enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
-	return ret;
-}
-/* RAM segments - address and size for 8960 */
-static struct ramdump_segment q6_segments[] = { {0x8da00000, 0x8f200000 -
-					0x8da00000}, {0x28400000, 0x20000} };
-static int lpass_ramdump(int enable, const struct subsys_desc *subsys)
-{
-	pr_debug("%s: enable[%d]\n", __func__, enable);
-	if (enable)
-		return do_ramdump(lpass_ssr_8960.lpass_ramdump_dev,
-				q6_segments,
-				ARRAY_SIZE(q6_segments));
-	else
-		return 0;
-}
-
-static void lpass_crash_shutdown(const struct subsys_desc *subsys)
-{
-	q6_crash_shutdown = 1;
-	send_q6_nmi();
-}
-
-static irqreturn_t lpass_wdog_bite_irq(int irq, void *dev_id)
-{
-	int ret;
-
-	pr_debug("%s: rxed irq[0x%x]", __func__, irq);
-	disable_irq_nosync(LPASS_Q6SS_WDOG_EXPIRED);
-	ret = schedule_work(&lpass_fatal_work);
-
-	return IRQ_HANDLED;
-}
-
-static struct subsys_device *lpass_8960_dev;
-
-static struct subsys_desc lpass_8960 = {
-	.name = "lpass",
-	.shutdown = lpass_shutdown,
-	.powerup = lpass_powerup,
-	.ramdump = lpass_ramdump,
-	.crash_shutdown = lpass_crash_shutdown
-};
-
-static int __init lpass_restart_init(void)
-{
-	lpass_8960_dev = subsys_register(&lpass_8960);
-	if (IS_ERR(lpass_8960_dev))
-		return PTR_ERR(lpass_8960_dev);
-	return 0;
-}
-
-static int __init lpass_fatal_init(void)
-{
-	int ret;
-
-	ret = smsm_state_cb_register(SMSM_Q6_STATE, SMSM_RESET,
-		lpass_smsm_state_cb, 0);
-
-	if (ret < 0)
-		pr_err("%s: Unable to register SMSM callback! (%d)\n",
-				__func__, ret);
-
-	ret = request_irq(LPASS_Q6SS_WDOG_EXPIRED, lpass_wdog_bite_irq,
-			IRQF_TRIGGER_RISING, "q6_wdog", NULL);
-
-	if (ret < 0) {
-		pr_err("%s: Unable to request LPASS_Q6SS_WDOG_EXPIRED irq.",
-			__func__);
-		goto out;
-	}
-	ret = lpass_restart_init();
-	if (ret < 0) {
-		pr_err("%s: Unable to reg with lpass ssr. (%d)\n",
-				__func__, ret);
-		goto out;
-	}
-
-	lpass_ssr_8960.lpass_ramdump_dev = create_ramdump_device("lpass");
-
-	if (!lpass_ssr_8960.lpass_ramdump_dev) {
-		pr_err("%s: Unable to create ramdump device.\n",
-				__func__);
-		ret = -ENOMEM;
-		goto out;
-	}
-	ssr_notif_hdle = subsys_notif_register_notifier("riva",
-							&rnb);
-	if (IS_ERR(ssr_notif_hdle) < 0) {
-		ret = PTR_ERR(ssr_notif_hdle);
-		pr_err("%s: subsys_register_notifier for Riva: err = %d\n",
-			__func__, ret);
-		free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
-		goto out;
-	}
-
-	ssr_modem_notif_hdle = subsys_notif_register_notifier("modem",
-							&mnb);
-	if (IS_ERR(ssr_modem_notif_hdle) < 0) {
-		ret = PTR_ERR(ssr_modem_notif_hdle);
-		pr_err("%s: subsys_register_notifier for Modem: err = %d\n",
-			__func__, ret);
-		subsys_notif_unregister_notifier(ssr_notif_hdle, &rnb);
-		free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
-		goto out;
-	}
-
-	pr_info("%s: lpass SSR driver init'ed.\n", __func__);
-out:
-	return ret;
-}
-
-static void __exit lpass_fatal_exit(void)
-{
-	subsys_notif_unregister_notifier(ssr_notif_hdle, &rnb);
-	subsys_notif_unregister_notifier(ssr_modem_notif_hdle, &mnb);
-	subsys_unregister(lpass_8960_dev);
-	free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
-}
-
-module_init(lpass_fatal_init);
-module_exit(lpass_fatal_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 8218a42..4854fc48 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -22,6 +22,18 @@
 #include "pm.h"
 #include "rpm-notifier.h"
 
+
+enum {
+	MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+	MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+static int msm_lpm_lvl_dbg_msk;
+
+module_param_named(
+	debug_mask, msm_lpm_lvl_dbg_msk, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
 static struct msm_rpmrs_level *msm_lpm_levels;
 static int msm_lpm_level_count;
 
@@ -41,6 +53,7 @@
 		bool from_idle, bool notify_rpm)
 {
 	int ret = 0;
+	int debug_mask;
 	struct msm_rpmrs_limits *l = (struct msm_rpmrs_limits *)limits;
 
 	ret = msm_rpm_enter_sleep();
@@ -49,6 +62,21 @@
 				__func__, ret);
 		goto bail;
 	}
+	if (from_idle)
+		debug_mask = msm_lpm_lvl_dbg_msk &
+				MSM_LPM_LVL_DBG_IDLE_LIMITS;
+	else
+		debug_mask = msm_lpm_lvl_dbg_msk &
+				MSM_LPM_LVL_DBG_SUSPEND_LIMITS;
+
+	if (debug_mask)
+		pr_info("%s(): pxo:%d l2:%d mem:0x%x(0x%x) dig:0x%x(0x%x)\n",
+				__func__, l->pxo, l->l2_cache,
+				l->vdd_mem_lower_bound,
+				l->vdd_mem_upper_bound,
+				l->vdd_dig_lower_bound,
+				l->vdd_dig_upper_bound);
+
 	ret = msm_lpmrs_enter_sleep(sclk_count, l, from_idle, notify_rpm);
 bail:
 	return ret;
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
index 48d31f3..364f297 100644
--- a/arch/arm/mach-msm/lpm_resources.c
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -104,6 +104,11 @@
 	struct msm_rpm_request *handle;
 };
 
+enum {
+	MSM_LPM_RPM_RS_TYPE = 0,
+	MSM_LPM_LOCAL_RS_TYPE = 1,
+};
+
 struct msm_lpm_resource {
 	struct msm_lpm_rs_data rs_data;
 	uint32_t sleep_value;
@@ -126,7 +131,7 @@
 	.aggregate = msm_lpm_aggregate_l2,
 	.flush = msm_lpm_flush_l2,
 	.notify = NULL,
-	.valid = true,
+	.valid = false,
 	.rs_data = {
 		.value = MSM_LPM_L2_CACHE_ACTIVE,
 		.default_value = MSM_LPM_L2_CACHE_ACTIVE,
@@ -787,6 +792,7 @@
 		struct msm_lpm_resource *rs = NULL;
 		const char *val;
 		int i;
+		uint32_t resource_type;
 
 		key = "qcom,name";
 		ret = of_property_read_string(node, key, &val);
@@ -810,49 +816,73 @@
 			continue;
 		}
 
-		key = "qcom,type";
-		ret = of_property_read_u32(node, key, &rs->rs_data.type);
+		key = "qcom,resource-type";
+		ret = of_property_read_u32(node, key, &resource_type);
 		if (ret) {
-			pr_err("Failed to read type\n");
+			pr_err("Failed to read resource-type\n");
 			goto fail;
 		}
 
-		key = "qcom,id";
-		ret = of_property_read_u32(node, key, &rs->rs_data.id);
-		if (ret) {
-			pr_err("Failed to read id\n");
+		switch (resource_type) {
+		case MSM_LPM_RPM_RS_TYPE:
+			key = "qcom,type";
+			ret = of_property_read_u32(node, key,
+						&rs->rs_data.type);
+			if (ret) {
+				pr_err("Failed to read type\n");
+				goto fail;
+			}
+
+			key = "qcom,id";
+			ret = of_property_read_u32(node, key, &rs->rs_data.id);
+			if (ret) {
+				pr_err("Failed to read id\n");
+				goto fail;
+			}
+
+			key = "qcom,key";
+			ret = of_property_read_u32(node, key, &rs->rs_data.key);
+			if (ret) {
+				pr_err("Failed to read key\n");
+				goto fail;
+			}
+
+			rs->rs_data.handle = msm_lpm_create_rpm_request(
+						rs->rs_data.type,
+						rs->rs_data.id);
+
+			if (!rs->rs_data.handle) {
+				pr_err("%s: Failed to allocate handle for %s\n",
+						__func__, rs->name);
+				ret = -1;
+				goto fail;
+			}
+			/* fall through */
+
+		case MSM_LPM_LOCAL_RS_TYPE:
+			rs->valid = true;
+			break;
+		default:
+			pr_err("%s: Invalid resource type %d", __func__,
+					resource_type);
 			goto fail;
 		}
-
-		key = "qcom,key";
-		ret = of_property_read_u32(node, key, &rs->rs_data.key);
-		if (ret) {
-			pr_err("Failed to read key\n");
-			goto fail;
-		}
-
-		rs->rs_data.handle = msm_lpm_create_rpm_request(
-					rs->rs_data.type, rs->rs_data.id);
-
-		if (!rs->rs_data.handle) {
-			pr_err("%s: Failed to allocate handle for %s\n",
-					__func__, rs->name);
-			ret = -1;
-			goto fail;
-		}
-
-		rs->valid = true;
 	}
 	msm_rpm_register_notifier(&msm_lpm_rpm_nblk);
 	msm_lpm_init_rpm_ctl();
-	register_hotcpu_notifier(&msm_lpm_cpu_nblk);
-	/* For UP mode, set the default to HSFS OPEN*/
-	if (num_possible_cpus() == 1) {
-		msm_lpm_l2.rs_data.default_value = MSM_LPM_L2_CACHE_HSFS_OPEN;
-		msm_lpm_l2.rs_data.value = MSM_LPM_L2_CACHE_HSFS_OPEN;
-	}
-	msm_pm_set_l2_flush_flag(0);
-	return 0;
+
+	if (msm_lpm_l2.valid) {
+		register_hotcpu_notifier(&msm_lpm_cpu_nblk);
+		/* For UP mode, set the default to HSFS OPEN*/
+		if (num_possible_cpus() == 1) {
+			msm_lpm_l2.rs_data.default_value =
+					MSM_LPM_L2_CACHE_HSFS_OPEN;
+			msm_lpm_l2.rs_data.value = MSM_LPM_L2_CACHE_HSFS_OPEN;
+		}
+		msm_pm_set_l2_flush_flag(0);
+	} else
+		msm_pm_set_l2_flush_flag(1);
+
 fail:
 	return ret;
 }
diff --git a/arch/arm/mach-msm/modem-8960.c b/arch/arm/mach-msm/modem-8960.c
deleted file mode 100644
index 83b3bc4..0000000
--- a/arch/arm/mach-msm/modem-8960.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/stringify.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/debugfs.h>
-
-#include <mach/irqs.h>
-#include <mach/scm.h>
-#include <mach/peripheral-loader.h>
-#include <mach/subsystem_restart.h>
-#include <mach/subsystem_notif.h>
-#include <mach/socinfo.h>
-#include <mach/msm_smsm.h>
-
-#include "smd_private.h"
-#include "modem_notifier.h"
-#include "ramdump.h"
-
-static int crash_shutdown;
-
-static struct subsys_device *modem_8960_dev;
-
-#define MAX_SSR_REASON_LEN 81U
-
-static void log_modem_sfr(void)
-{
-	u32 size;
-	char *smem_reason, reason[MAX_SSR_REASON_LEN];
-
-	smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
-	if (!smem_reason || !size) {
-		pr_err("modem subsystem failure reason: (unknown, smem_get_entry failed).\n");
-		return;
-	}
-	if (!smem_reason[0]) {
-		pr_err("modem subsystem failure reason: (unknown, init string found).\n");
-		return;
-	}
-
-	size = min(size, MAX_SSR_REASON_LEN-1);
-	memcpy(reason, smem_reason, size);
-	reason[size] = '\0';
-	pr_err("modem subsystem failure reason: %s.\n", reason);
-
-	smem_reason[0] = '\0';
-	wmb();
-}
-
-static void restart_modem(void)
-{
-	log_modem_sfr();
-	subsystem_restart_dev(modem_8960_dev);
-}
-
-static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
-{
-	/* Ignore if we're the one that set SMSM_RESET */
-	if (crash_shutdown)
-		return;
-
-	if (new_state & SMSM_RESET) {
-		pr_err("Probable fatal error on the modem.\n");
-		restart_modem();
-	}
-}
-
-#define Q6_FW_WDOG_ENABLE		0x08882024
-#define Q6_SW_WDOG_ENABLE		0x08982024
-
-static int modem_shutdown(const struct subsys_desc *subsys)
-{
-	void __iomem *q6_fw_wdog_addr;
-	void __iomem *q6_sw_wdog_addr;
-
-	/*
-	 * Disable the modem watchdog since it keeps running even after the
-	 * modem is shutdown.
-	 */
-	q6_fw_wdog_addr = ioremap_nocache(Q6_FW_WDOG_ENABLE, 4);
-	if (!q6_fw_wdog_addr)
-		return -ENOMEM;
-
-	q6_sw_wdog_addr = ioremap_nocache(Q6_SW_WDOG_ENABLE, 4);
-	if (!q6_sw_wdog_addr) {
-		iounmap(q6_fw_wdog_addr);
-		return -ENOMEM;
-	}
-
-	writel_relaxed(0x0, q6_fw_wdog_addr);
-	writel_relaxed(0x0, q6_sw_wdog_addr);
-	mb();
-	iounmap(q6_sw_wdog_addr);
-	iounmap(q6_fw_wdog_addr);
-
-	pil_force_shutdown("modem");
-	pil_force_shutdown("modem_fw");
-	disable_irq_nosync(Q6FW_WDOG_EXPIRED_IRQ);
-	disable_irq_nosync(Q6SW_WDOG_EXPIRED_IRQ);
-
-	return 0;
-}
-
-#define MODEM_WDOG_CHECK_TIMEOUT_MS 10000
-
-static int modem_powerup(const struct subsys_desc *subsys)
-{
-	pil_force_boot("modem_fw");
-	pil_force_boot("modem");
-	enable_irq(Q6FW_WDOG_EXPIRED_IRQ);
-	enable_irq(Q6SW_WDOG_EXPIRED_IRQ);
-	return 0;
-}
-
-void modem_crash_shutdown(const struct subsys_desc *subsys)
-{
-	crash_shutdown = 1;
-	smsm_reset_modem(SMSM_RESET);
-}
-
-/* FIXME: Get address, size from PIL */
-static struct ramdump_segment modemsw_segments[] = {
-	{0x89000000, 0x8D400000 - 0x89000000},
-};
-
-static struct ramdump_segment modemfw_segments[] = {
-	{0x8D400000, 0x8DA00000 - 0x8D400000},
-};
-
-static struct ramdump_segment smem_segments[] = {
-	{0x80000000, 0x00200000},
-};
-
-static void *modemfw_ramdump_dev;
-static void *modemsw_ramdump_dev;
-static void *smem_ramdump_dev;
-
-static int modem_ramdump(int enable, const struct subsys_desc *crashed_subsys)
-{
-	int ret = 0;
-
-	if (enable) {
-		ret = do_ramdump(modemsw_ramdump_dev, modemsw_segments,
-			ARRAY_SIZE(modemsw_segments));
-
-		if (ret < 0) {
-			pr_err("Unable to dump modem sw memory (rc = %d).\n",
-			       ret);
-			goto out;
-		}
-
-		ret = do_ramdump(modemfw_ramdump_dev, modemfw_segments,
-			ARRAY_SIZE(modemfw_segments));
-
-		if (ret < 0) {
-			pr_err("Unable to dump modem fw memory (rc = %d).\n",
-				ret);
-			goto out;
-		}
-
-		ret = do_ramdump(smem_ramdump_dev, smem_segments,
-			ARRAY_SIZE(smem_segments));
-
-		if (ret < 0) {
-			pr_err("Unable to dump smem memory (rc = %d).\n", ret);
-			goto out;
-		}
-	}
-
-out:
-	return ret;
-}
-
-static irqreturn_t modem_wdog_bite_irq(int irq, void *dev_id)
-{
-	switch (irq) {
-
-	case Q6SW_WDOG_EXPIRED_IRQ:
-		pr_err("Watchdog bite received from modem software!\n");
-		restart_modem();
-		break;
-	case Q6FW_WDOG_EXPIRED_IRQ:
-		pr_err("Watchdog bite received from modem firmware!\n");
-		restart_modem();
-		break;
-	break;
-
-	default:
-		pr_err("%s: Unknown IRQ!\n", __func__);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static struct subsys_desc modem_8960 = {
-	.name = "modem",
-	.shutdown = modem_shutdown,
-	.powerup = modem_powerup,
-	.ramdump = modem_ramdump,
-	.crash_shutdown = modem_crash_shutdown
-};
-
-static int modem_subsystem_restart_init(void)
-{
-	modem_8960_dev = subsys_register(&modem_8960);
-	if (IS_ERR(modem_8960_dev))
-		return PTR_ERR(modem_8960_dev);
-	return 0;
-}
-
-static int modem_debug_set(void *data, u64 val)
-{
-	if (val == 1)
-		subsystem_restart_dev(modem_8960_dev);
-
-	return 0;
-}
-
-static int modem_debug_get(void *data, u64 *val)
-{
-	*val = 0;
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(modem_debug_fops, modem_debug_get, modem_debug_set,
-				"%llu\n");
-
-static int modem_debugfs_init(void)
-{
-	struct dentry *dent;
-	dent = debugfs_create_dir("modem_debug", 0);
-
-	if (IS_ERR(dent))
-		return PTR_ERR(dent);
-
-	debugfs_create_file("reset_modem", 0644, dent, NULL,
-		&modem_debug_fops);
-	return 0;
-}
-
-static int __init modem_8960_init(void)
-{
-	int ret;
-
-	if (cpu_is_apq8064() || cpu_is_apq8064ab())
-		return -ENODEV;
-
-	ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET,
-		smsm_state_cb, 0);
-
-	if (ret < 0)
-		pr_err("%s: Unable to register SMSM callback! (%d)\n",
-				__func__, ret);
-
-	ret = request_irq(Q6FW_WDOG_EXPIRED_IRQ, modem_wdog_bite_irq,
-			IRQF_TRIGGER_RISING, "modem_wdog_fw", NULL);
-
-	if (ret < 0) {
-		pr_err("%s: Unable to request q6fw watchdog IRQ. (%d)\n",
-				__func__, ret);
-		goto out;
-	}
-
-	ret = request_irq(Q6SW_WDOG_EXPIRED_IRQ, modem_wdog_bite_irq,
-			IRQF_TRIGGER_RISING, "modem_wdog_sw", NULL);
-
-	if (ret < 0) {
-		pr_err("%s: Unable to request q6sw watchdog IRQ. (%d)\n",
-				__func__, ret);
-		disable_irq_nosync(Q6FW_WDOG_EXPIRED_IRQ);
-		goto out;
-	}
-
-	ret = modem_subsystem_restart_init();
-
-	if (ret < 0) {
-		pr_err("%s: Unable to reg with subsystem restart. (%d)\n",
-				__func__, ret);
-		goto out;
-	}
-
-	modemfw_ramdump_dev = create_ramdump_device("modem_fw");
-
-	if (!modemfw_ramdump_dev) {
-		pr_err("%s: Unable to create modem fw ramdump device. (%d)\n",
-				__func__, -ENOMEM);
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	modemsw_ramdump_dev = create_ramdump_device("modem_sw");
-
-	if (!modemsw_ramdump_dev) {
-		pr_err("%s: Unable to create modem sw ramdump device. (%d)\n",
-				__func__, -ENOMEM);
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	smem_ramdump_dev = create_ramdump_device("smem-modem");
-
-	if (!smem_ramdump_dev) {
-		pr_err("%s: Unable to create smem ramdump device. (%d)\n",
-				__func__, -ENOMEM);
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	ret = modem_debugfs_init();
-
-	pr_info("%s: modem fatal driver init'ed.\n", __func__);
-out:
-	return ret;
-}
-
-module_init(modem_8960_init);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 2072cb1..97299a0 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -485,7 +485,7 @@
 };
 
 #define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
-	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000220)
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
 enum bimc_m_priolvl_override {
 	M_PRIOLVL_OVERRIDE_RMSK			= 0x301,
 	M_PRIOLVL_OVERRIDE_BMSK			= 0x300,
@@ -495,10 +495,10 @@
 };
 
 #define M_RD_CMD_OVERRIDE_ADDR(b, n) \
-	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
 enum bimc_m_read_command_override {
-	M_RD_CMD_OVERRIDE_RMSK			= 0x37f3f,
-	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x300000,
+	M_RD_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
 	M_RD_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
 	M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
 	M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
@@ -529,13 +529,15 @@
 };
 
 #define M_WR_CMD_OVERRIDE_ADDR(b, n) \
-	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
 enum bimc_m_write_command_override {
-	M_WR_CMD_OVERRIDE_RMSK			= 0x37f3f,
-	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x30000,
-	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x10,
-	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x7000,
-	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0xc,
+	M_WR_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK	= 0x1000,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT	= 0xc,
 	M_WR_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
 	M_WR_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
 	M_WR_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
@@ -544,8 +546,10 @@
 	M_WR_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
 	M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
 	M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
-	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x20,
-	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x5,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
 	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
 	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
 	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
@@ -1454,7 +1458,7 @@
 		 * boundary in future
 		 */
 		wmb();
-		set_qos_mode(binfo->base, mas_index, 1, 1, 1);
+		set_qos_mode(binfo->base, mas_index, 0, 1, 1);
 		break;
 
 	case BIMC_QOS_MODE_BYPASS:
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
index 1b8c07e..cfd84eb 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
@@ -641,6 +641,7 @@
 		.mode = NOC_QOS_MODE_FIXED,
 		.qport = qports_crypto_c0,
 		.mas_hw_id = MAS_CRYPTO_CORE0,
+		.hw_sel = MSM_BUS_NOC,
 	},
 	{
 		.id = MSM_BUS_MASTER_CRYPTO_CORE1,
@@ -651,6 +652,7 @@
 		.mode = NOC_QOS_MODE_FIXED,
 		.qport = qports_crypto_c1,
 		.mas_hw_id = MAS_CRYPTO_CORE1,
+		.hw_sel = MSM_BUS_NOC,
 	},
 	{
 		.id = MSM_BUS_MASTER_LPASS_PROC,
@@ -719,6 +721,7 @@
 		.mas_hw_id = MAS_USB3,
 		.prio_rd = 2,
 		.prio_wr = 2,
+		.hw_sel = MSM_BUS_NOC,
 	},
 	{
 		.id = MSM_BUS_SLAVE_AMPSS,
@@ -1046,9 +1049,8 @@
 		.qport = qports_kmpss,
 		.ws = 10000,
 		.mas_hw_id = MAS_APPSS_PROC,
-		.prio_lvl = 0,
-		.prio_rd = 2,
-		.prio_wr = 2,
+		.prio_rd = 1,
+		.prio_wr = 1,
 	},
 	{
 		.id = MSM_BUS_MASTER_AMPSS_M1,
@@ -1061,6 +1063,8 @@
 		.qport = qports_kmpss,
 		.ws = 10000,
 		.mas_hw_id = MAS_APPSS_PROC,
+		.prio_rd = 1,
+		.prio_wr = 1,
 	},
 	{
 		.id = MSM_BUS_MASTER_MSS_PROC,
diff --git a/arch/arm/mach-msm/pil-q6v4-lpass.c b/arch/arm/mach-msm/pil-q6v4-lpass.c
index 222bd10..a0432550 100644
--- a/arch/arm/mach-msm/pil-q6v4-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v4-lpass.c
@@ -17,11 +17,34 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
 
+#include <mach/scm.h>
+#include <mach/peripheral-loader.h>
+#include <mach/subsystem_restart.h>
+#include <mach/subsystem_notif.h>
+
+#include "smd_private.h"
+#include "ramdump.h"
+#include "sysmon.h"
 #include "peripheral-loader.h"
 #include "pil-q6v4.h"
 #include "scm-pas.h"
 
+struct lpass_q6v4 {
+	struct q6v4_data q6;
+	void *riva_notif_hdle;
+	void *modem_notif_hdle;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	int crash_shutdown;
+	void *ramdump_dev;
+	struct work_struct work;
+	int loadable;
+};
+
 static int pil_q6v4_lpass_boot(struct pil_desc *pil)
 {
 	struct q6v4_data *drv = pil_to_q6v4_data(pil);
@@ -62,59 +85,295 @@
 	.proxy_unvote = pil_q6v4_remove_proxy_votes,
 };
 
+static int riva_notifier_cb(struct notifier_block *this, unsigned long code,
+								void *ss_handle)
+{
+	int ret;
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		pr_debug("%s: R-Notify: Shutdown started\n", __func__);
+		ret = sysmon_send_event(SYSMON_SS_LPASS, "wcnss",
+				SUBSYS_BEFORE_SHUTDOWN);
+		if (ret < 0)
+			pr_err("%s: sysmon_send_event error %d", __func__, ret);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block rnb = {
+	.notifier_call = riva_notifier_cb,
+};
+
+static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
+								void *ss_handle)
+{
+	int ret;
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		pr_debug("%s: M-Notify: Shutdown started\n", __func__);
+		ret = sysmon_send_event(SYSMON_SS_LPASS, "modem",
+				SUBSYS_BEFORE_SHUTDOWN);
+		if (ret < 0)
+			pr_err("%s: sysmon_send_event error %d", __func__, ret);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block mnb = {
+	.notifier_call = modem_notifier_cb,
+};
+
+static void lpass_log_failure_reason(void)
+{
+	char *reason;
+	char buffer[81];
+	unsigned size;
+
+	reason = smem_get_entry(SMEM_SSR_REASON_LPASS0, &size);
+
+	if (!reason) {
+		pr_err("LPASS subsystem failure reason: (unknown, smem_get_entry failed).");
+		return;
+	}
+
+	if (reason[0] == '\0') {
+		pr_err("LPASS subsystem failure reason: (unknown, init value found)");
+		return;
+	}
+
+	size = min(size, sizeof(buffer) - 1);
+	memcpy(buffer, reason, size);
+	buffer[size] = '\0';
+	pr_err("LPASS subsystem failure reason: %s", buffer);
+	memset((void *)reason, 0x0, size);
+	wmb();
+}
+
+static void lpass_fatal_fn(struct work_struct *work)
+{
+	pr_err("Watchdog bite received from lpass Q6!\n");
+	lpass_log_failure_reason();
+	panic("Q6 Resetting the SoC");
+}
+
+static void lpass_smsm_state_cb(void *data, uint32_t old_state,
+				uint32_t new_state)
+{
+	struct lpass_q6v4 *drv = data;
+
+	/* Ignore if we're the one that set SMSM_RESET */
+	if (drv->crash_shutdown)
+		return;
+
+	if (new_state & SMSM_RESET) {
+		pr_err("%s: LPASS SMSM state changed to SMSM_RESET, new_state = %#x, old_state = %#x\n",
+				__func__, new_state, old_state);
+		lpass_log_failure_reason();
+		panic("Q6 Resetting the SoC");
+	}
+}
+
+#define SCM_Q6_NMI_CMD 0x1
+
+static void send_q6_nmi(void)
+{
+	/* Send NMI to QDSP6 via an SCM call. */
+	uint32_t cmd = 0x1;
+
+	scm_call(SCM_SVC_UTIL, SCM_Q6_NMI_CMD,
+	&cmd, sizeof(cmd), NULL, 0);
+
+	/* Q6 requires worstcase 100ms to dump caches etc.*/
+	mdelay(100);
+	pr_debug("%s: Q6 NMI was sent.\n", __func__);
+}
+
+#define subsys_to_lpass(d) container_of(d, struct lpass_q6v4, subsys_desc)
+
+static int lpass_shutdown(const struct subsys_desc *subsys)
+{
+	struct lpass_q6v4 *drv = subsys_to_lpass(subsys);
+
+	send_q6_nmi();
+	if (drv->loadable)
+		pil_force_shutdown("q6");
+	disable_irq_nosync(drv->q6.wdog_irq);
+
+	return 0;
+}
+
+static int lpass_powerup(const struct subsys_desc *subsys)
+{
+	struct lpass_q6v4 *drv = subsys_to_lpass(subsys);
+	int ret = 0;
+
+	if (drv->loadable)
+		ret = pil_force_boot("q6");
+	enable_irq(drv->q6.wdog_irq);
+
+	return ret;
+}
+
+static struct ramdump_segment segments[] = {
+	{0x8da00000, 0x8f200000 - 0x8da00000},
+	{0x28400000, 0x20000}
+};
+
+static int lpass_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct lpass_q6v4 *drv = subsys_to_lpass(subsys);
+
+	if (!enable)
+		return 0;
+	return do_ramdump(drv->ramdump_dev, segments, ARRAY_SIZE(segments));
+}
+
+static void lpass_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct lpass_q6v4 *drv = subsys_to_lpass(subsys);
+
+	drv->crash_shutdown = 1;
+	send_q6_nmi();
+}
+
+static irqreturn_t lpass_wdog_bite_irq(int irq, void *dev_id)
+{
+	struct lpass_q6v4 *drv = dev_id;
+
+	disable_irq_nosync(drv->q6.wdog_irq);
+	schedule_work(&drv->work);
+
+	return IRQ_HANDLED;
+}
+
 static int __devinit pil_q6v4_lpass_driver_probe(struct platform_device *pdev)
 {
 	const struct pil_q6v4_pdata *pdata = pdev->dev.platform_data;
-	struct q6v4_data *drv;
+	struct lpass_q6v4 *drv;
+	struct q6v4_data *q6;
 	struct pil_desc *desc;
 	struct resource *res;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
+	int ret;
 
 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 	if (!drv)
 		return -ENOMEM;
 	platform_set_drvdata(pdev, drv);
+	q6 = &drv->q6;
+	desc = &q6->desc;
 
-	drv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (!drv->base)
-		return -ENOMEM;
+	q6->wdog_irq = platform_get_irq(pdev, 0);
+	if (q6->wdog_irq < 0)
+		return q6->wdog_irq;
 
-	drv->vreg = devm_regulator_get(&pdev->dev, "core_vdd");
-	if (IS_ERR(drv->vreg))
-		return PTR_ERR(drv->vreg);
+	drv->loadable = !!pdata; /* No pdata = don't use PIL */
+	if (drv->loadable) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res)
+			return -EINVAL;
+		q6->base = devm_ioremap(&pdev->dev, res->start,
+				resource_size(res));
+		if (!q6->base)
+			return -ENOMEM;
 
-	drv->xo = devm_clk_get(&pdev->dev, "xo");
-	if (IS_ERR(drv->xo))
-		return PTR_ERR(drv->xo);
+		q6->vreg = devm_regulator_get(&pdev->dev, "core_vdd");
+		if (IS_ERR(q6->vreg))
+			return PTR_ERR(q6->vreg);
 
-	desc = &drv->desc;
-	desc->name = pdata->name;
-	desc->dev = &pdev->dev;
-	desc->owner = THIS_MODULE;
-	desc->proxy_timeout = 10000;
-	pil_q6v4_init(drv, pdata);
+		q6->xo = devm_clk_get(&pdev->dev, "xo");
+		if (IS_ERR(q6->xo))
+			return PTR_ERR(q6->xo);
 
-	if (pas_supported(pdata->pas_id) > 0) {
-		desc->ops = &pil_q6v4_lpass_ops_trusted;
-		dev_info(&pdev->dev, "using secure boot\n");
-	} else {
-		desc->ops = &pil_q6v4_lpass_ops;
-		dev_info(&pdev->dev, "using non-secure boot\n");
+		desc->name = pdata->name;
+		desc->dev = &pdev->dev;
+		desc->owner = THIS_MODULE;
+		desc->proxy_timeout = 10000;
+		pil_q6v4_init(q6, pdata);
+
+		if (pas_supported(pdata->pas_id) > 0) {
+			desc->ops = &pil_q6v4_lpass_ops_trusted;
+			dev_info(&pdev->dev, "using secure boot\n");
+		} else {
+			desc->ops = &pil_q6v4_lpass_ops;
+			dev_info(&pdev->dev, "using non-secure boot\n");
+		}
+
+		q6->pil = msm_pil_register(desc);
+		if (IS_ERR(q6->pil))
+			return PTR_ERR(q6->pil);
 	}
 
-	drv->pil = msm_pil_register(desc);
-	if (IS_ERR(drv->pil))
-		return PTR_ERR(drv->pil);
+	drv->subsys_desc.name = "lpass";
+	drv->subsys_desc.shutdown = lpass_shutdown;
+	drv->subsys_desc.powerup = lpass_powerup;
+	drv->subsys_desc.ramdump = lpass_ramdump;
+	drv->subsys_desc.crash_shutdown = lpass_crash_shutdown;
+
+	INIT_WORK(&drv->work, lpass_fatal_fn);
+
+	drv->ramdump_dev = create_ramdump_device("lpass");
+	if (!drv->ramdump_dev) {
+		ret = -ENOMEM;
+		goto err_ramdump;
+	}
+
+	drv->subsys = subsys_register(&drv->subsys_desc);
+	if (IS_ERR(drv->subsys)) {
+		ret = PTR_ERR(drv->subsys);
+		goto err_subsys;
+	}
+
+	ret = devm_request_irq(&pdev->dev, q6->wdog_irq, lpass_wdog_bite_irq,
+			IRQF_TRIGGER_RISING, dev_name(&pdev->dev), drv);
+	if (ret)
+		goto err_irq;
+
+	ret = smsm_state_cb_register(SMSM_Q6_STATE, SMSM_RESET,
+			lpass_smsm_state_cb, drv);
+	if (ret < 0)
+		goto err_smsm;
+
+	drv->riva_notif_hdle = subsys_notif_register_notifier("riva", &rnb);
+	if (IS_ERR(drv->riva_notif_hdle)) {
+		ret = PTR_ERR(drv->riva_notif_hdle);
+		goto err_notif_riva;
+	}
+
+	drv->modem_notif_hdle = subsys_notif_register_notifier("modem", &mnb);
+	if (IS_ERR(drv->modem_notif_hdle)) {
+		ret = PTR_ERR(drv->modem_notif_hdle);
+		goto err_notif_modem;
+	}
 	return 0;
+err_notif_modem:
+	subsys_notif_unregister_notifier(drv->riva_notif_hdle, &rnb);
+err_notif_riva:
+	smsm_state_cb_deregister(SMSM_Q6_STATE, SMSM_RESET,
+			lpass_smsm_state_cb, drv);
+err_smsm:
+err_irq:
+	subsys_unregister(drv->subsys);
+err_subsys:
+	destroy_ramdump_device(drv->ramdump_dev);
+err_ramdump:
+	if (drv->loadable)
+		msm_pil_unregister(q6->pil);
+	return ret;
 }
 
 static int __devexit pil_q6v4_lpass_driver_exit(struct platform_device *pdev)
 {
-	struct q6v4_data *drv = platform_get_drvdata(pdev);
-	msm_pil_unregister(drv->pil);
+	struct lpass_q6v4 *drv = platform_get_drvdata(pdev);
+	subsys_notif_unregister_notifier(drv->riva_notif_hdle, &rnb);
+	subsys_notif_unregister_notifier(drv->modem_notif_hdle, &mnb);
+	smsm_state_cb_deregister(SMSM_Q6_STATE, SMSM_RESET,
+			lpass_smsm_state_cb, drv);
+	subsys_unregister(drv->subsys);
+	destroy_ramdump_device(drv->ramdump_dev);
+	if (drv->loadable)
+		msm_pil_unregister(drv->q6.pil);
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/pil-q6v4-mss.c b/arch/arm/mach-msm/pil-q6v4-mss.c
index 65b56d3..61b5536 100644
--- a/arch/arm/mach-msm/pil-q6v4-mss.c
+++ b/arch/arm/mach-msm/pil-q6v4-mss.c
@@ -18,9 +18,15 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
+#include <linux/interrupt.h>
 
 #include <mach/msm_iomap.h>
+#include <mach/subsystem_restart.h>
+#include <mach/msm_smsm.h>
+#include <mach/peripheral-loader.h>
 
+#include "smd_private.h"
+#include "ramdump.h"
 #include "peripheral-loader.h"
 #include "pil-q6v4.h"
 #include "scm-pas.h"
@@ -35,6 +41,13 @@
 	struct q6v4_data q6_fw;
 	struct q6v4_data q6_sw;
 	void __iomem *modem_base;
+	void *fw_ramdump_dev;
+	void *sw_ramdump_dev;
+	void *smem_ramdump_dev;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	int crash_shutdown;
+	int loadable;
 };
 
 static DEFINE_MUTEX(pil_q6v4_modem_lock);
@@ -124,6 +137,138 @@
 	.proxy_unvote = pil_q6v4_remove_proxy_votes,
 };
 
+static void log_modem_sfr(void)
+{
+	u32 size;
+	char *smem_reason, reason[81];
+
+	smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
+	if (!smem_reason || !size) {
+		pr_err("modem subsystem failure reason: (unknown, smem_get_entry failed).\n");
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("modem subsystem failure reason: (unknown, init string found).\n");
+		return;
+	}
+
+	size = min(size, sizeof(reason)-1);
+	memcpy(reason, smem_reason, size);
+	reason[size] = '\0';
+	pr_err("modem subsystem failure reason: %s.\n", reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
+static void restart_modem(struct q6v4_modem *drv)
+{
+	log_modem_sfr();
+	subsystem_restart_dev(drv->subsys);
+}
+
+#define desc_to_modem(d) container_of(d, struct q6v4_modem, subsys_desc)
+
+static int modem_shutdown(const struct subsys_desc *subsys)
+{
+	struct q6v4_modem *drv = desc_to_modem(subsys);
+
+	/* The watchdogs keep running even after the modem is shutdown */
+	writel_relaxed(0x0, drv->q6_fw.wdog_base + 0x24);
+	writel_relaxed(0x0, drv->q6_sw.wdog_base + 0x24);
+	mb();
+
+	if (drv->loadable) {
+		pil_force_shutdown("modem");
+		pil_force_shutdown("modem_fw");
+	}
+
+	disable_irq_nosync(drv->q6_fw.wdog_irq);
+	disable_irq_nosync(drv->q6_sw.wdog_irq);
+
+	return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+	struct q6v4_modem *drv = desc_to_modem(subsys);
+
+	if (drv->loadable) {
+		pil_force_boot("modem_fw");
+		pil_force_boot("modem");
+	}
+	enable_irq(drv->q6_fw.wdog_irq);
+	enable_irq(drv->q6_sw.wdog_irq);
+	return 0;
+}
+
+void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct q6v4_modem *drv = desc_to_modem(subsys);
+
+	drv->crash_shutdown = 1;
+	smsm_reset_modem(SMSM_RESET);
+}
+
+static struct ramdump_segment sw_segments[] = {
+	{0x89000000, 0x8D400000 - 0x89000000},
+};
+
+static struct ramdump_segment fw_segments[] = {
+	{0x8D400000, 0x8DA00000 - 0x8D400000},
+};
+
+static struct ramdump_segment smem_segments[] = {
+	{0x80000000, 0x00200000},
+};
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct q6v4_modem *drv = desc_to_modem(subsys);
+	int ret;
+
+	if (!enable)
+		return 0;
+
+	ret = do_ramdump(drv->sw_ramdump_dev, sw_segments,
+		ARRAY_SIZE(sw_segments));
+	if (ret < 0)
+		return ret;
+
+	ret = do_ramdump(drv->fw_ramdump_dev, fw_segments,
+		ARRAY_SIZE(fw_segments));
+	if (ret < 0)
+		return ret;
+
+	ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
+		ARRAY_SIZE(smem_segments));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
+{
+	struct q6v4_modem *drv = data;
+
+	/* Ignore if we're the one that set SMSM_RESET */
+	if (drv->crash_shutdown)
+		return;
+
+	if (new_state & SMSM_RESET) {
+		pr_err("Probable fatal error on the modem.\n");
+		restart_modem(drv);
+	}
+}
+
+static irqreturn_t modem_wdog_bite_irq(int irq, void *dev_id)
+{
+	struct q6v4_modem *drv = dev_id;
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
 static int __devinit
 pil_q6v4_proc_init(struct q6v4_data *drv, struct platform_device *pdev, int i)
 {
@@ -134,7 +279,7 @@
 	struct pil_desc *desc;
 	struct resource *res;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + (i * 2));
 	if (!res)
 		return -EINVAL;
 
@@ -142,6 +287,15 @@
 	if (!drv->base)
 		return -ENOMEM;
 
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + (i * 2));
+	if (!res)
+		return -EINVAL;
+
+	drv->wdog_base = devm_ioremap(&pdev->dev, res->start,
+			resource_size(res));
+	if (!drv->wdog_base)
+		return -ENOMEM;
+
 	snprintf(reg_name, sizeof(reg_name), "%s_core_vdd", name[i]);
 	drv->vreg = devm_regulator_get(&pdev->dev, reg_name);
 	if (IS_ERR(drv->vreg))
@@ -176,6 +330,7 @@
 	struct resource *res;
 	struct regulator *pll_supply;
 	int ret;
+	const struct pil_q6v4_pdata *pdata = pdev->dev.platform_data;
 
 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 	if (!drv)
@@ -185,57 +340,139 @@
 	drv_fw = &drv->q6_fw;
 	drv_sw = &drv->q6_sw;
 
-	ret = pil_q6v4_proc_init(drv_fw, pdev, 0);
+	drv_fw->wdog_irq = platform_get_irq(pdev, 0);
+	if (drv_fw->wdog_irq < 0)
+		return drv_fw->wdog_irq;
+
+	drv_sw->wdog_irq = platform_get_irq(pdev, 1);
+	if (drv_sw->wdog_irq < 0)
+		return drv_sw->wdog_irq;
+
+	drv->loadable = !!pdata; /* No pdata = don't use PIL */
+	if (drv->loadable) {
+		ret = pil_q6v4_proc_init(drv_fw, pdev, 0);
+		if (ret)
+			return ret;
+
+		ret = pil_q6v4_proc_init(drv_sw, pdev, 1);
+		if (ret)
+			return ret;
+
+		pll_supply = devm_regulator_get(&pdev->dev, "pll_vdd");
+		drv_fw->pll_supply = drv_sw->pll_supply = pll_supply;
+		if (IS_ERR(pll_supply))
+			return PTR_ERR(pll_supply);
+
+		ret = regulator_set_voltage(pll_supply, 1800000, 1800000);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to set pll voltage\n");
+			return ret;
+		}
+
+		ret = regulator_set_optimum_mode(pll_supply, 100000);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to set pll optimum mode\n");
+			return ret;
+		}
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res)
+			return -EINVAL;
+
+		drv->modem_base = devm_ioremap(&pdev->dev, res->start,
+				resource_size(res));
+		if (!drv->modem_base)
+			return -ENOMEM;
+
+		drv_fw->pil = msm_pil_register(&drv_fw->desc);
+		if (IS_ERR(drv_fw->pil))
+			return PTR_ERR(drv_fw->pil);
+
+		drv_sw->pil = msm_pil_register(&drv_sw->desc);
+		if (IS_ERR(drv_sw->pil)) {
+			ret = PTR_ERR(drv_sw->pil);
+			goto err_pil_sw;
+		}
+	}
+
+	drv->subsys_desc.name = "modem";
+	drv->subsys_desc.shutdown = modem_shutdown;
+	drv->subsys_desc.powerup = modem_powerup;
+	drv->subsys_desc.ramdump = modem_ramdump;
+	drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+
+	drv->fw_ramdump_dev = create_ramdump_device("modem_fw");
+	if (!drv->fw_ramdump_dev) {
+		ret = -ENOMEM;
+		goto err_fw_ramdump;
+	}
+
+	drv->sw_ramdump_dev = create_ramdump_device("modem_sw");
+	if (!drv->sw_ramdump_dev) {
+		ret = -ENOMEM;
+		goto err_sw_ramdump;
+	}
+
+	drv->smem_ramdump_dev = create_ramdump_device("smem-modem");
+	if (!drv->smem_ramdump_dev) {
+		ret = -ENOMEM;
+		goto err_smem_ramdump;
+	}
+
+	drv->subsys = subsys_register(&drv->subsys_desc);
+	if (IS_ERR(drv->subsys)) {
+		ret = PTR_ERR(drv->subsys);
+		goto err_subsys;
+	}
+
+	ret = devm_request_irq(&pdev->dev, drv_fw->wdog_irq,
+			modem_wdog_bite_irq, IRQF_TRIGGER_RISING,
+			dev_name(&pdev->dev), drv);
 	if (ret)
-		return ret;
+		goto err_irq;
 
-	ret = pil_q6v4_proc_init(drv_sw, pdev, 1);
+	ret = devm_request_irq(&pdev->dev, drv_sw->wdog_irq,
+			modem_wdog_bite_irq, IRQF_TRIGGER_RISING,
+			dev_name(&pdev->dev), drv);
 	if (ret)
-		return ret;
+		goto err_irq;
 
-	pll_supply = devm_regulator_get(&pdev->dev, "pll_vdd");
-	drv_fw->pll_supply = drv_sw->pll_supply = pll_supply;
-	if (IS_ERR(pll_supply))
-		return PTR_ERR(pll_supply);
-
-	ret = regulator_set_voltage(pll_supply, 1800000, 1800000);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to set pll voltage\n");
-		return ret;
-	}
-
-	ret = regulator_set_optimum_mode(pll_supply, 100000);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to set pll optimum mode\n");
-		return ret;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
-
-	drv->modem_base = devm_ioremap(&pdev->dev, res->start,
-			resource_size(res));
-	if (!drv->modem_base)
-		return -ENOMEM;
-
-	drv_fw->pil = msm_pil_register(&drv_fw->desc);
-	if (IS_ERR(drv_fw->pil))
-		return PTR_ERR(drv_fw->pil);
-
-	drv_sw->pil = msm_pil_register(&drv_sw->desc);
-	if (IS_ERR(drv_sw->pil)) {
-		msm_pil_unregister(drv_fw->pil);
-		return PTR_ERR(drv_sw->pil);
-	}
+	ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET,
+			smsm_state_cb, drv);
+	if (ret)
+		goto err_irq;
 	return 0;
+
+err_irq:
+	subsys_unregister(drv->subsys);
+err_subsys:
+	destroy_ramdump_device(drv->smem_ramdump_dev);
+err_smem_ramdump:
+	destroy_ramdump_device(drv->sw_ramdump_dev);
+err_sw_ramdump:
+	destroy_ramdump_device(drv->fw_ramdump_dev);
+err_fw_ramdump:
+	if (drv->loadable)
+		msm_pil_unregister(drv_sw->pil);
+err_pil_sw:
+	msm_pil_unregister(drv_fw->pil);
+	return ret;
 }
 
 static int __devexit pil_q6v4_modem_driver_exit(struct platform_device *pdev)
 {
 	struct q6v4_modem *drv = platform_get_drvdata(pdev);
-	msm_pil_unregister(drv->q6_sw.pil);
-	msm_pil_unregister(drv->q6_fw.pil);
+
+	smsm_state_cb_deregister(SMSM_MODEM_STATE, SMSM_RESET,
+			smsm_state_cb, drv);
+	subsys_unregister(drv->subsys);
+	destroy_ramdump_device(drv->smem_ramdump_dev);
+	destroy_ramdump_device(drv->sw_ramdump_dev);
+	destroy_ramdump_device(drv->fw_ramdump_dev);
+	if (drv->loadable) {
+		msm_pil_unregister(drv->q6_sw.pil);
+		msm_pil_unregister(drv->q6_fw.pil);
+	}
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/pil-q6v4.h b/arch/arm/mach-msm/pil-q6v4.h
index d280d84..0395bed 100644
--- a/arch/arm/mach-msm/pil-q6v4.h
+++ b/arch/arm/mach-msm/pil-q6v4.h
@@ -35,6 +35,7 @@
  */
 struct q6v4_data {
 	void __iomem *base;
+	void __iomem *wdog_base;
 	unsigned long start_addr;
 	unsigned long strap_tcm_base;
 	unsigned long strap_ahb_upper;
@@ -43,6 +44,7 @@
 	void __iomem *jtag_clk_reg;
 	unsigned pas_id;
 	int bus_port;
+	int wdog_irq;
 
 	struct regulator *vreg;
 	struct regulator *pll_supply;
diff --git a/arch/arm/mach-msm/pm-boot.c b/arch/arm/mach-msm/pm-boot.c
index ed15a0c..7bc4fe0 100644
--- a/arch/arm/mach-msm/pm-boot.c
+++ b/arch/arm/mach-msm/pm-boot.c
@@ -211,7 +211,10 @@
 	char *key = NULL;
 	uint32_t val = 0;
 	int ret = 0;
-	int flag = 0;
+	uint32_t vaddr_val;
+
+	pdata.p_addr = 0;
+	vaddr_val = 0;
 
 	key = "qcom,mode";
 	ret = of_property_read_u32(pdev->dev.of_node, key, &val);
@@ -223,24 +226,43 @@
 
 	key = "qcom,phy-addr";
 	ret = of_property_read_u32(pdev->dev.of_node, key, &val);
-	if (ret && pdata.mode == MSM_PM_BOOT_CONFIG_RESET_VECTOR_PHYS)
-		goto fail;
-	if (!ret) {
+	if (!ret)
 		pdata.p_addr = val;
-		flag++;
-	}
+
 
 	key = "qcom,virt-addr";
-	ret = of_property_read_u32(pdev->dev.of_node, key, &val);
-	if (ret && pdata.mode == MSM_PM_BOOT_CONFIG_RESET_VECTOR_VIRT)
-		goto fail;
-	if (!ret) {
-		pdata.v_addr = (void *)val;
-		flag++;
-	}
+	ret = of_property_read_u32(pdev->dev.of_node, key, &vaddr_val);
 
-	if (pdata.mode == MSM_PM_BOOT_CONFIG_REMAP_BOOT_ADDR && (flag != 2)) {
-		key = "addresses for boot remap";
+	switch (pdata.mode) {
+	case MSM_PM_BOOT_CONFIG_RESET_VECTOR_PHYS:
+		if (!pdata.p_addr) {
+			key = "qcom,phy-addr";
+			goto fail;
+		}
+		break;
+	case MSM_PM_BOOT_CONFIG_RESET_VECTOR_VIRT:
+		if (!vaddr_val)
+			goto fail;
+
+		pdata.v_addr = (void *)vaddr_val;
+		break;
+	case MSM_PM_BOOT_CONFIG_REMAP_BOOT_ADDR:
+		if (!vaddr_val)
+			goto fail;
+
+		pdata.v_addr = ioremap_nocache(vaddr_val, SZ_8);
+
+		pdata.p_addr = allocate_contiguous_ebi_nomap(SZ_8, SZ_64K);
+		if (!pdata.p_addr) {
+			key = "qcom,phy-addr";
+			goto fail;
+		}
+		break;
+	case MSM_PM_BOOT_CONFIG_TZ:
+		break;
+	default:
+		pr_err("%s: Unsupported boot mode %d",
+			__func__, pdata.mode);
 		goto fail;
 	}
 
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 969af98..0beb952 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -35,6 +35,8 @@
 	HW_PLATFORM_LIQUID  = 9,
 	/* Dragonboard platform id is assigned as 10 in CDT */
 	HW_PLATFORM_DRAGON	= 10,
+	HW_PLATFORM_HRD	= 13,
+	HW_PLATFORM_DTV	= 14,
 	HW_PLATFORM_INVALID
 };
 
@@ -47,7 +49,9 @@
 	[HW_PLATFORM_SVLTE_SURF] = "SLVTE_SURF",
 	[HW_PLATFORM_MTP] = "MTP",
 	[HW_PLATFORM_LIQUID] = "Liquid",
-	[HW_PLATFORM_DRAGON] = "Dragon"
+	[HW_PLATFORM_DRAGON] = "Dragon",
+	[HW_PLATFORM_HRD] = "HRD",
+	[HW_PLATFORM_DTV] = "DTV",
 };
 
 enum {
diff --git a/arch/arm/mach-msm/wcnss-ssr-8974.c b/arch/arm/mach-msm/wcnss-ssr-8974.c
index b837efc..8841b52 100644
--- a/arch/arm/mach-msm/wcnss-ssr-8974.c
+++ b/arch/arm/mach-msm/wcnss-ssr-8974.c
@@ -14,7 +14,11 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/err.h>
-
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/wcnss_wlan.h>
+#include <linux/delay.h>
+#include <mach/peripheral-loader.h>
 #include <mach/subsystem_restart.h>
 #include <mach/msm_smsm.h>
 
@@ -24,6 +28,7 @@
 static int ss_restart_inprogress;
 static int wcnss_crash;
 static struct subsys_device *wcnss_ssr_dev;
+static struct delayed_work cancel_vote_work;
 
 #define WCNSS_APSS_WDOG_BITE_RESET_RDY_IRQ		181
 
@@ -98,14 +103,44 @@
 	return IRQ_HANDLED;
 }
 
+static void wcnss_post_bootup(struct work_struct *work)
+{
+	struct platform_device *pdev = wcnss_get_platform_device();
+	struct wcnss_wlan_config *pwlanconfig = wcnss_get_wlan_config();
+
+	pr_debug(MODULE_NAME ": Cancel APPS vote for Iris & Pronto\n");
+
+	wcnss_wlan_power(&pdev->dev, pwlanconfig,
+		WCNSS_WLAN_SWITCH_OFF);
+}
 
 static int wcnss_shutdown(const struct subsys_desc *subsys)
 {
+	pil_force_shutdown("wcnss");
+	flush_delayed_work(&cancel_vote_work);
+	wcnss_flush_delayed_boot_votes();
+	disable_irq_nosync(WCNSS_APSS_WDOG_BITE_RESET_RDY_IRQ);
+
 	return 0;
 }
 
 static int wcnss_powerup(const struct subsys_desc *subsys)
 {
+	struct platform_device *pdev = wcnss_get_platform_device();
+	struct wcnss_wlan_config *pwlanconfig = wcnss_get_wlan_config();
+	int    ret = -1;
+
+	if (pdev && pwlanconfig)
+		ret = wcnss_wlan_power(&pdev->dev, pwlanconfig,
+					WCNSS_WLAN_SWITCH_ON);
+	if (!ret) {
+		msleep(1000);
+		pil_force_boot("wcnss");
+	}
+	ss_restart_inprogress = false;
+	enable_irq(WCNSS_APSS_WDOG_BITE_RESET_RDY_IRQ);
+	schedule_delayed_work(&cancel_vote_work, msecs_to_jiffies(5000));
+
 	return 0;
 }
 
@@ -155,6 +190,8 @@
 	if (IS_ERR(wcnss_ssr_dev))
 		return PTR_ERR(wcnss_ssr_dev);
 
+	INIT_DELAYED_WORK(&cancel_vote_work, wcnss_post_bootup);
+
 	pr_info("%s: module initialized\n", MODULE_NAME);
 out:
 	return ret;
diff --git a/arch/arm/mach-msm/wdog_debug.c b/arch/arm/mach-msm/wdog_debug.c
index 82800cf..08dd9ce 100644
--- a/arch/arm/mach-msm/wdog_debug.c
+++ b/arch/arm/mach-msm/wdog_debug.c
@@ -110,7 +110,6 @@
 		goto err;
 	wdog_data->dev = &pdev->dev;
 	platform_set_drvdata(pdev, wdog_data);
-	msm_enable_wdog_debug();
 	return 0;
 err:
 	kzfree(wdog_data);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index cb245ee..e351eb0 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -880,6 +880,16 @@
 	  This option enables optimisations for the PL310 cache
 	  controller.
 
+config CACHE_PL310_ERP
+	tristate "PL310 CACHE Error Reporting"
+	depends on CACHE_PL310
+	help
+	  Say 'Y' here to enable reporting of external L2 cache errors.
+	  This feature can be used as a system debugging technique if cache
+	  corruption is suspected.
+	  Cache error statistics will also be reported in sysfs
+	  /sys/devices/platform/pl310_erp/cache_erp
+
 config CACHE_TAUROS2
 	bool "Enable the Tauros2 L2 cache controller"
 	depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 1c415af..6314e94 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -100,6 +100,7 @@
 
 obj-$(CONFIG_CACHE_FEROCEON_L2)	+= cache-feroceon-l2.o
 obj-$(CONFIG_CACHE_L2X0)	+= cache-l2x0.o
+obj-$(CONFIG_CACHE_PL310_ERP)   += cache-pl310-erp.o
 obj-$(CONFIG_CACHE_XSC3L2)	+= cache-xsc3l2.o
 obj-$(CONFIG_CACHE_TAUROS2)	+= cache-tauros2.o
 obj-$(CONFIG_VCM)		+= vcm.o vcm_alloc.o
diff --git a/arch/arm/mm/cache-pl310-erp.c b/arch/arm/mm/cache-pl310-erp.c
new file mode 100644
index 0000000..ad75143
--- /dev/null
+++ b/arch/arm/mm/cache-pl310-erp.c
@@ -0,0 +1,283 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <asm/cputype.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#define MODULE_NAME "pl310_erp"
+
+struct pl310_drv_data {
+	unsigned int irq;
+	unsigned int ecntr;
+	unsigned int parrt;
+	unsigned int parrd;
+	unsigned int errwd;
+	unsigned int errwt;
+	unsigned int errrt;
+	unsigned int errrd;
+	unsigned int slverr;
+	unsigned int decerr;
+	void __iomem *base;
+};
+
+#define ECNTR	BIT(0)
+#define PARRT	BIT(1)
+#define PARRD	BIT(2)
+#define ERRWT	BIT(3)
+#define ERRWD	BIT(4)
+#define ERRRT	BIT(5)
+#define ERRRD	BIT(6)
+#define SLVERR	BIT(7)
+#define DECERR	BIT(8)
+
+static irqreturn_t pl310_erp_irq(int irq, void *dev_id)
+{
+	struct pl310_drv_data *p = platform_get_drvdata(dev_id);
+	uint16_t mask_int_stat, int_clear = 0, error = 0;
+
+	mask_int_stat = readl_relaxed(p->base + L2X0_MASKED_INTR_STAT);
+
+	if (mask_int_stat & ECNTR) {
+		pr_alert("Event Counter1/0 Overflow Increment error\n");
+		p->ecntr++;
+		int_clear = mask_int_stat & ECNTR;
+	}
+
+	if (mask_int_stat & PARRT) {
+		pr_alert("Read parity error on L2 Tag RAM\n");
+		p->parrt++;
+		error = 1;
+		int_clear = mask_int_stat & PARRT;
+	}
+
+	if (mask_int_stat & PARRD) {
+		pr_alert("Read parity error on L2 Tag RAM\n");
+		p->parrd++;
+		error = 1;
+		int_clear = mask_int_stat & PARRD;
+	}
+
+	if (mask_int_stat & ERRWT) {
+		pr_alert("Write error on L2 Tag RAM\n");
+		p->errwt++;
+		int_clear = mask_int_stat & ERRWT;
+	}
+
+	if (mask_int_stat & ERRWD) {
+		pr_alert("Write error on L2 Data RAM\n");
+		p->errwd++;
+		int_clear = mask_int_stat & ERRWD;
+	}
+
+	if (mask_int_stat & ERRRT) {
+		pr_alert("Read error on L2 Tag RAM\n");
+		p->errrt++;
+		int_clear = mask_int_stat & ERRRT;
+	}
+
+	if (mask_int_stat & ERRRD) {
+		pr_alert("Read error on L2 Data RAM\n");
+		p->errrd++;
+		int_clear = mask_int_stat & ERRRD;
+	}
+
+	if (mask_int_stat & DECERR) {
+		pr_alert("L2 master port decode error\n");
+		p->decerr++;
+		int_clear = mask_int_stat & DECERR;
+	}
+
+	if (mask_int_stat & SLVERR) {
+		pr_alert("L2 slave port error\n");
+		p->slverr++;
+		int_clear = mask_int_stat & SLVERR;
+	}
+
+	writel_relaxed(int_clear, p->base + L2X0_INTR_CLEAR);
+
+	/* Make sure the interrupts are cleared */
+	mb();
+
+	/* WARNING will be thrown whenever we receive any L2 interrupt.
+	 * Other than parity on tag/data ram, irrespective of the bits
+	 * set we will throw a warning.
+	 */
+	WARN_ON(!error);
+
+	/* Panic in case we encounter parity error in TAG/DATA Ram */
+	BUG_ON(error);
+
+	return IRQ_HANDLED;
+}
+
+static void pl310_mask_int(struct pl310_drv_data *p, bool enable)
+{
+	uint16_t mask;
+
+	if (enable)
+		mask = 0x1FF;
+	else
+		mask = 0x0;
+
+	writel_relaxed(mask, p->base + L2X0_INTR_MASK);
+
+	/* Make sure Mask is updated */
+	mb();
+
+	pr_debug("Mask interrupt %x\n",
+			readl_relaxed(p->base + L2X0_INTR_MASK));
+}
+
+static int pl310_erp_show(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct pl310_drv_data *p = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE,
+		"L2CC Interrupt Number:\t\t\t%d\n"\
+		"Event Counter1/0 Overflow Increment:\t%u\n"\
+		"Parity Error on L2 Tag RAM (Read):\t%u\n"\
+		"Parity Error on L2 Data RAM (Read):\t%u\n"\
+		"Error on L2 Tag RAM (Write):\t\t%u\n"\
+		"Error on L2 Data RAM (Write):\t\t%u\n"\
+		"Error on L2 Tag RAM (Read):\t\t%u\n"\
+		"Error on L2 Data RAM (Read):\t\t%u\n"\
+		"SLave Error from L3 Port:\t\t%u\n"\
+		"Decode Error from L3 Port:\t\t%u\n",
+		p->irq, p->ecntr, p->parrt, p->parrd, p->errwt, p->errwd,
+		p->errrt, p->errrd, p->slverr, p->decerr);
+}
+
+static DEVICE_ATTR(cache_erp, 0664, pl310_erp_show, NULL);
+
+static int __init pl310_create_sysfs(struct device *dev)
+{
+	/* create a sysfs entry at
+	 * /sys/devices/platform/pl310_erp/cache_erp
+	 */
+	return device_create_file(dev, &dev_attr_cache_erp);
+}
+
+static int __devinit pl310_cache_erp_probe(struct platform_device *pdev)
+{
+	struct resource *r;
+	struct pl310_drv_data *drv_data;
+	int ret;
+
+	drv_data = devm_kzalloc(&pdev->dev, sizeof(struct pl310_drv_data),
+						GFP_KERNEL);
+	if  (drv_data == NULL) {
+		dev_err(&pdev->dev, "cannot allocate memory\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		dev_err(&pdev->dev, "No L2 base address\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
+					"erp")) {
+		ret = -EBUSY;
+		goto error;
+	}
+
+	drv_data->base = devm_ioremap_nocache(&pdev->dev, r->start,
+						resource_size(r));
+	if (!drv_data->base) {
+		dev_err(&pdev->dev, "errored to ioremap 0x%x\n", r->start);
+		ret = -ENOMEM;
+		goto error;
+	}
+	dev_dbg(&pdev->dev, "L2CC base 0x%p\n", drv_data->base);
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "l2_irq");
+	if (!r) {
+		dev_err(&pdev->dev, "No L2 IRQ resource\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	drv_data->irq = r->start;
+
+	ret = devm_request_irq(&pdev->dev, drv_data->irq, pl310_erp_irq,
+			IRQF_TRIGGER_RISING, "l2cc_intr", pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "request irq for L2 interrupt failed\n");
+		goto error;
+	}
+
+	platform_set_drvdata(pdev, drv_data);
+
+	pl310_mask_int(drv_data, true);
+
+	ret = pl310_create_sysfs(&pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to create sysfs entry\n");
+		goto sysfs_err;
+	}
+
+	return 0;
+
+sysfs_err:
+	platform_set_drvdata(pdev, NULL);
+	pl310_mask_int(drv_data, false);
+error:
+	return  ret;
+}
+
+static int __devexit pl310_cache_erp_remove(struct platform_device *pdev)
+{
+	struct pl310_drv_data *p = platform_get_drvdata(pdev);
+
+	pl310_mask_int(p, false);
+
+	device_remove_file(&pdev->dev, &dev_attr_cache_erp);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver pl310_cache_erp_driver = {
+	.probe = pl310_cache_erp_probe,
+	.remove = __devexit_p(pl310_cache_erp_remove),
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pl310_cache_erp_init(void)
+{
+	return platform_driver_register(&pl310_cache_erp_driver);
+}
+module_init(pl310_cache_erp_init);
+
+static void __exit pl310_cache_erp_exit(void)
+{
+	platform_driver_unregister(&pl310_cache_erp_driver);
+}
+module_exit(pl310_cache_erp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PL310 cache error reporting driver");
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 785ba6c..78161b6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -869,7 +869,11 @@
 static void dbs_input_event(struct input_handle *handle, unsigned int type,
 		unsigned int code, int value)
 {
-	int i;
+	int i, j;
+	struct cpumask cpus_scheduled;
+	struct cpu_dbs_info_s *dbs_info;
+	cpumask_clear(&cpus_scheduled);
+
 
 	if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) ||
 		(dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) {
@@ -878,7 +882,23 @@
 	}
 
 	for_each_online_cpu(i) {
+		dbs_info = &per_cpu(od_cpu_dbs_info, i);
+
+		if (!dbs_info->cur_policy) {
+			pr_err("Dbs policy is NULL\n");
+			continue;
+		}
+
+		for_each_cpu(j, &cpus_scheduled) {
+			if (cpumask_test_cpu(j, dbs_info->cur_policy->cpus))
+				goto skip_schedule;
+		}
+		cpumask_set_cpu(i, &cpus_scheduled);
 		queue_work_on(i, input_wq, &per_cpu(dbs_refresh_work, i));
+
+		/* This CPU is already running at new frequency */
+skip_schedule:
+		;
 	}
 }
 
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 0c61d7f..afe384b 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1481,6 +1481,8 @@
 		return -ENOMEM;
 
 	memdesc->sglen = sglen;
+	memdesc->sglen_alloc = sglen;
+
 	sg_init_table(memdesc->sg, sglen);
 
 	spin_lock(&current->mm->page_table_lock);
@@ -1771,6 +1773,11 @@
 
 	entry->memdesc.priv |= param->flags & KGSL_MEMTYPE_MASK;
 
+	if (entry->memdesc.size >= SZ_1M)
+		entry->memdesc.priv |= ilog2(SZ_1M) << KGSL_MEMALIGN_SHIFT;
+	else if (entry->memdesc.size >= SZ_64K)
+		entry->memdesc.priv |= ilog2(SZ_64K) << KGSL_MEMALIGN_SHIFT;
+
 	result = kgsl_mmu_map(private->pagetable,
 			      &entry->memdesc,
 			      GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 472474b..2861117 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -135,7 +135,8 @@
 	unsigned int size;
 	unsigned int priv;
 	struct scatterlist *sg;
-	unsigned int sglen;
+	unsigned int sglen; /* Active entries in the sglist */
+	unsigned int sglen_alloc;  /* Allocated entries in the sglist */
 	struct kgsl_memdesc_ops *ops;
 	int flags;
 };
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 40ed7ca..b49c260 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -168,8 +168,9 @@
 	struct kgsl_mem_entry *entry;
 	struct rb_node *node;
 	struct kgsl_process_private *private = s->private;
-	char flags[3];
+	char flags[4];
 	char usage[16];
+	unsigned int align;
 
 	spin_lock(&private->mem_lock);
 	seq_printf(s, "%8s %8s %5s %10s %16s %5s\n",
@@ -182,7 +183,16 @@
 
 		flags[0] = m->priv & KGSL_MEMFLAGS_GLOBAL ?  'g' : '-';
 		flags[1] = m->priv & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
-		flags[2] = '\0';
+
+		align = (m->priv & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
+		if (align >= ilog2(SZ_1M))
+			flags[2] = 'L';
+		else if (align >= ilog2(SZ_64K))
+			flags[2] = 'l';
+		else
+			flags[2] = '-';
+
+		flags[3] = '\0';
 
 		kgsl_get_memory_usage(usage, sizeof(usage), m->priv);
 
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 54ba5ad..dbb88ee 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -606,6 +606,7 @@
 	int ret;
 	struct gen_pool *pool;
 	int size;
+	int page_align = ilog2(PAGE_SIZE);
 
 	if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
 		if (memdesc->sglen == 1) {
@@ -630,7 +631,17 @@
 	/* Allocate from kgsl pool if it exists for global mappings */
 	pool = _get_pool(pagetable, memdesc->priv);
 
-	memdesc->gpuaddr = gen_pool_alloc(pool, size);
+	/* Allocate aligned virtual addresses for iommu. This allows
+	 * more efficient pagetable entries if the physical memory
+	 * is also aligned. Don't do this for GPUMMU, because
+	 * the address space is so small.
+	 */
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype() &&
+	    (memdesc->priv & KGSL_MEMALIGN_MASK)) {
+		page_align = (memdesc->priv & KGSL_MEMALIGN_MASK)
+				>> KGSL_MEMALIGN_SHIFT;
+	}
+	memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size, page_align);
 	if (memdesc->gpuaddr == 0) {
 		KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
 			size,
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
index 2e4ac3b..b302bee 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -126,13 +126,14 @@
 	return;
 }
 
-static void msm_remove_io_fraction(struct kgsl_device *device)
+static void msm_set_io_fraction(struct kgsl_device *device,
+				unsigned int value)
 {
 	int i;
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 
 	for (i = 0; i < pwr->num_pwrlevels; i++)
-		pwr->pwrlevels[i].io_fraction = 100;
+		pwr->pwrlevels[i].io_fraction = value;
 
 }
 
@@ -196,7 +197,7 @@
 		} else {
 			priv->gpu_busy = 1;
 		}
-		msm_remove_io_fraction(device);
+		msm_set_io_fraction(device, 0);
 		return 0;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index d48337a..77617ba 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -317,21 +317,42 @@
 				struct vm_area_struct *vma,
 				struct vm_fault *vmf)
 {
-	unsigned long offset;
-	struct page *page;
-	int i;
+	int i, pgoff;
+	struct scatterlist *s = memdesc->sg;
+	unsigned int offset;
 
-	offset = (unsigned long) vmf->virtual_address - vma->vm_start;
+	offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
 
-	i = offset >> PAGE_SHIFT;
-	page = sg_page(&memdesc->sg[i]);
-	if (page == NULL)
+	if (offset >= memdesc->size)
 		return VM_FAULT_SIGBUS;
 
-	get_page(page);
+	pgoff = offset >> PAGE_SHIFT;
 
-	vmf->page = page;
-	return 0;
+	/*
+	 * The sglist might be comprised of mixed blocks of memory depending
+	 * on how many 64K pages were allocated.  This means we have to do math
+	 * to find the actual 4K page to map in user space
+	 */
+
+	for (i = 0; i < memdesc->sglen; i++) {
+		int npages = s->length >> PAGE_SHIFT;
+
+		if (pgoff < npages) {
+			struct page *page = sg_page(s);
+
+			page = nth_page(page, pgoff);
+
+			get_page(page);
+			vmf->page = page;
+
+			return 0;
+		}
+
+		pgoff -= npages;
+		s = sg_next(s);
+	}
+
+	return VM_FAULT_SIGBUS;
 }
 
 static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
@@ -357,7 +378,7 @@
 	}
 	if (memdesc->sg)
 		for_each_sg(memdesc->sg, sg, sglen, i)
-			__free_page(sg_page(sg));
+			__free_pages(sg_page(sg), get_order(sg->length));
 }
 
 static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
@@ -379,23 +400,32 @@
 		pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
 		struct page **pages = NULL;
 		struct scatterlist *sg;
+		int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
 		int sglen = memdesc->sglen;
-		int i;
+		int i, count = 0;
 
 		/* Don't map the guard page if it exists */
 		if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
 			sglen--;
 
 		/* create a list of pages to call vmap */
-		pages = vmalloc(sglen * sizeof(struct page *));
+		pages = vmalloc(npages * sizeof(struct page *));
 		if (!pages) {
 			KGSL_CORE_ERR("vmalloc(%d) failed\n",
-				sglen * sizeof(struct page *));
+				npages * sizeof(struct page *));
 			return -ENOMEM;
 		}
-		for_each_sg(memdesc->sg, sg, sglen, i)
-			pages[i] = sg_page(sg);
-		memdesc->hostptr = vmap(pages, sglen,
+
+		for_each_sg(memdesc->sg, sg, sglen, i) {
+			struct page *page = sg_page(sg);
+			int j;
+
+			for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
+				pages[count++] = page++;
+		}
+
+
+		memdesc->hostptr = vmap(pages, count,
 					VM_IOREMAP, page_prot);
 		KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
 				kgsl_driver.stats.vmalloc_max);
@@ -503,14 +533,15 @@
 static int
 _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
 			struct kgsl_pagetable *pagetable,
-			size_t size, unsigned int protflags)
+			size_t size, unsigned int flags, unsigned int protflags)
 {
-	int i, order, ret = 0;
-	int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
+	int pcount = 0, order, ret = 0;
+	int j, len, page_size, sglen_alloc, sglen = 0;
 	struct page **pages = NULL;
 	pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
 	void *ptr;
 	struct sysinfo si;
+	unsigned int align;
 
 	/*
 	 * Get the current memory information to be used in deciding if we
@@ -530,23 +561,36 @@
 	if (size >= ((si.freeram << PAGE_SHIFT) - SZ_32M))
 		return -ENOMEM;
 
+	align = (flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
+
+	page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
+			? SZ_64K : PAGE_SIZE;
+
+	/*
+	 * There needs to be enough room in the sg structure to be able to
+	 * service the allocation entirely with PAGE_SIZE sized chunks
+	 */
+
+	sglen_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
 	/*
 	 * Add guard page to the end of the allocation when the
 	 * IOMMU is in use.
 	 */
 
 	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
-		sglen++;
+		sglen_alloc++;
 
 	memdesc->size = size;
 	memdesc->pagetable = pagetable;
+	memdesc->priv |= (flags & KGSL_MEMALIGN_MASK);
 	memdesc->ops = &kgsl_page_alloc_ops;
 
-	memdesc->sg = kgsl_sg_alloc(sglen);
+	memdesc->sg = kgsl_sg_alloc(sglen_alloc);
 
 	if (memdesc->sg == NULL) {
 		KGSL_CORE_ERR("vmalloc(%d) failed\n",
-			sglen * sizeof(struct scatterlist));
+			sglen_alloc * sizeof(struct scatterlist));
 		ret = -ENOMEM;
 		goto done;
 	}
@@ -558,38 +602,52 @@
 	 * two pages; well within the acceptable limits for using kmalloc.
 	 */
 
-	pages = kmalloc(sglen * sizeof(struct page *), GFP_KERNEL);
+	pages = kmalloc(sglen_alloc * sizeof(struct page *), GFP_KERNEL);
 
 	if (pages == NULL) {
 		KGSL_CORE_ERR("kmalloc (%d) failed\n",
-			sglen * sizeof(struct page *));
+			sglen_alloc * sizeof(struct page *));
 		ret = -ENOMEM;
 		goto done;
 	}
 
 	kmemleak_not_leak(memdesc->sg);
 
-	memdesc->sglen = sglen;
-	sg_init_table(memdesc->sg, sglen);
+	memdesc->sglen_alloc = sglen_alloc;
+	sg_init_table(memdesc->sg, sglen_alloc);
 
-	for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
+	len = size;
 
-		/*
-		 * Don't use GFP_ZERO here because it is faster to memset the
-		 * range ourselves (see below)
-		 */
+	while (len > 0) {
+		struct page *page;
+		unsigned int gfp_mask = GFP_KERNEL | __GFP_HIGHMEM |
+			__GFP_NOWARN;
+		int j;
 
-		pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
-		if (pages[i] == NULL) {
-			ret = -ENOMEM;
-			memdesc->sglen = i;
-			goto done;
+		/* don't waste space at the end of the allocation*/
+		if (len < page_size)
+			page_size = PAGE_SIZE;
+
+		if (page_size != PAGE_SIZE)
+			gfp_mask |= __GFP_COMP;
+
+		page = alloc_pages(gfp_mask, get_order(page_size));
+
+		if (page == NULL) {
+			if (page_size != PAGE_SIZE) {
+				page_size = PAGE_SIZE;
+				continue;
+			}
 		}
 
-		sg_set_page(&memdesc->sg[i], pages[i], PAGE_SIZE, 0);
+		for (j = 0; j < page_size >> PAGE_SHIFT; j++)
+			pages[pcount++] = nth_page(page, j);
+
+		sg_set_page(&memdesc->sg[sglen++], page, page_size, 0);
+		len -= page_size;
 	}
 
-	/* ADd the guard page to the end of the sglist */
+	/* Add the guard page to the end of the sglist */
 
 	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
 		/*
@@ -603,13 +661,14 @@
 				__GFP_HIGHMEM);
 
 		if (kgsl_guard_page != NULL) {
-			sg_set_page(&memdesc->sg[sglen - 1], kgsl_guard_page,
+			sg_set_page(&memdesc->sg[sglen++], kgsl_guard_page,
 				PAGE_SIZE, 0);
 			memdesc->flags |= KGSL_MEMDESC_GUARD_PAGE;
-		} else
-			memdesc->sglen--;
+		}
 	}
 
+	memdesc->sglen = sglen;
+
 	/*
 	 * All memory that goes to the user has to be zeroed out before it gets
 	 * exposed to userspace. This means that the memory has to be mapped in
@@ -629,18 +688,16 @@
 	 * path
 	 */
 
-	ptr = vmap(pages, i, VM_IOREMAP, page_prot);
+	ptr = vmap(pages, pcount, VM_IOREMAP, page_prot);
 
 	if (ptr != NULL) {
 		memset(ptr, 0, memdesc->size);
 		dmac_flush_range(ptr, ptr + memdesc->size);
 		vunmap(ptr);
 	} else {
-		int j;
-
 		/* Very, very, very slow path */
 
-		for (j = 0; j < i; j++) {
+		for (j = 0; j < pcount; j++) {
 			ptr = kmap_atomic(pages[j]);
 			memset(ptr, 0, PAGE_SIZE);
 			dmac_flush_range(ptr, ptr + PAGE_SIZE);
@@ -683,7 +740,7 @@
 	size = ALIGN(size, PAGE_SIZE * 2);
 
 	ret =  _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
-		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+		0, GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
 	if (!ret)
 		ret = kgsl_page_alloc_map_kernel(memdesc);
 	if (ret)
@@ -707,7 +764,7 @@
 		protflags |= GSL_PT_PAGE_WV;
 
 	return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
-		protflags);
+		flags, protflags);
 }
 EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
 
@@ -757,7 +814,7 @@
 	if (memdesc->ops && memdesc->ops->free)
 		memdesc->ops->free(memdesc);
 
-	kgsl_sg_free(memdesc->sg, memdesc->sglen);
+	kgsl_sg_free(memdesc->sg, memdesc->sglen_alloc);
 
 	memset(memdesc, 0, sizeof(*memdesc));
 }
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 62bfce4..ce1a18e 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -557,6 +557,7 @@
 	memset(pdata, 0, sizeof *pdata);
 
 	pdata->rep = !!of_get_property(node, "autorepeat", NULL);
+	pdata->name = of_get_property(node, "input-name", NULL);
 
 	/* First count the subnodes */
 	pdata->nbuttons = 0;
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e17e1f8..df66a3a 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -467,6 +467,92 @@
 	return pgprot;
 }
 
+static unsigned long *make_second_level(struct msm_priv *priv,
+					unsigned long *fl_pte)
+{
+	unsigned long *sl;
+	sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
+			get_order(SZ_4K));
+
+	if (!sl) {
+		pr_debug("Could not allocate second level table\n");
+		goto fail;
+	}
+	memset(sl, 0, SZ_4K);
+	clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
+
+	*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+			FL_TYPE_TABLE);
+
+	clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+fail:
+	return sl;
+}
+
+static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+	int ret = 0;
+
+	if (*sl_pte) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+		| SL_TYPE_SMALL | pgprot;
+fail:
+	return ret;
+}
+
+static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+	int ret = 0;
+
+	int i;
+
+	for (i = 0; i < 16; i++)
+		if (*(sl_pte+i)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+
+	for (i = 0; i < 16; i++)
+		*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+				| SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+	return ret;
+}
+
+
+static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
+{
+	if (*fl_pte)
+		return -EBUSY;
+
+	*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+		| pgprot;
+
+	return 0;
+}
+
+
+static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
+{
+	int i;
+	int ret = 0;
+	for (i = 0; i < 16; i++)
+		if (*(fl_pte+i)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+	for (i = 0; i < 16; i++)
+		*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+			| FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+	return ret;
+}
+
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
 			 phys_addr_t pa, size_t len, int prot)
 {
@@ -514,28 +600,16 @@
 	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
 
 	if (len == SZ_16M) {
-		int i = 0;
-
-		for (i = 0; i < 16; i++)
-			if (*(fl_pte+i)) {
-				ret = -EBUSY;
-				goto fail;
-			}
-
-		for (i = 0; i < 16; i++)
-			*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
-				  | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+		ret = fl_16m(fl_pte, pa, pgprot);
+		if (ret)
+			goto fail;
 		clean_pte(fl_pte, fl_pte + 16, priv->redirect);
 	}
 
 	if (len == SZ_1M) {
-		if (*fl_pte) {
-			ret = -EBUSY;
+		ret = fl_1m(fl_pte, pa, pgprot);
+		if (ret)
 			goto fail;
-		}
-
-		*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
-					    | pgprot;
 		clean_pte(fl_pte, fl_pte + 1, priv->redirect);
 	}
 
@@ -543,22 +617,10 @@
 	if (len == SZ_4K || len == SZ_64K) {
 
 		if (*fl_pte == 0) {
-			unsigned long *sl;
-			sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
-							get_order(SZ_4K));
-
-			if (!sl) {
-				pr_debug("Could not allocate second level table\n");
+			if (make_second_level(priv, fl_pte) == NULL) {
 				ret = -ENOMEM;
 				goto fail;
 			}
-			memset(sl, 0, SZ_4K);
-			clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
-
-			*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
-						      FL_TYPE_TABLE);
-
-			clean_pte(fl_pte, fl_pte + 1, priv->redirect);
 		}
 
 		if (!(*fl_pte & FL_TYPE_TABLE)) {
@@ -572,29 +634,17 @@
 	sl_pte = sl_table + sl_offset;
 
 	if (len == SZ_4K) {
-		if (*sl_pte) {
-			ret = -EBUSY;
+		ret = sl_4k(sl_pte, pa, pgprot);
+		if (ret)
 			goto fail;
-		}
 
-		*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
-						    | SL_TYPE_SMALL | pgprot;
 		clean_pte(sl_pte, sl_pte + 1, priv->redirect);
 	}
 
 	if (len == SZ_64K) {
-		int i;
-
-		for (i = 0; i < 16; i++)
-			if (*(sl_pte+i)) {
-				ret = -EBUSY;
-				goto fail;
-			}
-
-		for (i = 0; i < 16; i++)
-			*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
-					  | SL_SHARED | SL_TYPE_LARGE | pgprot;
-
+		ret = sl_64k(sl_pte, pa, pgprot);
+		if (ret)
+			goto fail;
 		clean_pte(sl_pte, sl_pte + 16, priv->redirect);
 	}
 
@@ -712,22 +762,28 @@
 	return pa;
 }
 
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+				   int align)
+{
+	return  IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+		&& (len >= align);
+}
+
 static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
 			       struct scatterlist *sg, unsigned int len,
 			       int prot)
 {
 	unsigned int pa;
 	unsigned int offset = 0;
-	unsigned int pgprot;
 	unsigned long *fl_table;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
-	unsigned long *sl_table;
+	unsigned long *sl_table = NULL;
 	unsigned long sl_offset, sl_start;
-	unsigned int chunk_offset = 0;
-	unsigned int chunk_pa;
+	unsigned int chunk_size, chunk_offset = 0;
 	int ret = 0;
 	struct msm_priv *priv;
+	unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
 
 	mutex_lock(&msm_iommu_lock);
 
@@ -736,49 +792,78 @@
 	priv = domain->priv;
 	fl_table = priv->pgtable;
 
-	pgprot = __get_pgprot(prot, SZ_4K);
+	pgprot4k = __get_pgprot(prot, SZ_4K);
+	pgprot64k = __get_pgprot(prot, SZ_64K);
+	pgprot1m = __get_pgprot(prot, SZ_1M);
+	pgprot16m = __get_pgprot(prot, SZ_16M);
 
-	if (!pgprot) {
+	if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
 		ret = -EINVAL;
 		goto fail;
 	}
 
 	fl_offset = FL_OFFSET(va);	/* Upper 12 bits */
 	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
-
-	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
-	sl_offset = SL_OFFSET(va);
-
-	chunk_pa = get_phys_addr(sg);
-	if (chunk_pa == 0) {
-		pr_debug("No dma address for sg %p\n", sg);
-		ret = -EINVAL;
-		goto fail;
-	}
+	pa = get_phys_addr(sg);
 
 	while (offset < len) {
-		/* Set up a 2nd level page table if one doesn't exist */
-		if (*fl_pte == 0) {
-			sl_table = (unsigned long *)
-				 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
+		chunk_size = SZ_4K;
 
-			if (!sl_table) {
-				pr_debug("Could not allocate second level table\n");
+		if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+				     SZ_16M))
+			chunk_size = SZ_16M;
+		else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+					  SZ_1M))
+			chunk_size = SZ_1M;
+		/* 64k or 4k determined later */
+
+		/* for 1M and 16M, only first level entries are required */
+		if (chunk_size >= SZ_1M) {
+			if (chunk_size == SZ_16M) {
+				ret = fl_16m(fl_pte, pa, pgprot16m);
+				if (ret)
+					goto fail;
+				clean_pte(fl_pte, fl_pte + 16, priv->redirect);
+				fl_pte += 16;
+			} else if (chunk_size == SZ_1M) {
+				ret = fl_1m(fl_pte, pa, pgprot1m);
+				if (ret)
+					goto fail;
+				clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+				fl_pte++;
+			}
+
+			offset += chunk_size;
+			chunk_offset += chunk_size;
+			va += chunk_size;
+			pa += chunk_size;
+
+			if (chunk_offset >= sg->length && offset < len) {
+				chunk_offset = 0;
+				sg = sg_next(sg);
+				pa = get_phys_addr(sg);
+				if (pa == 0) {
+					pr_debug("No dma address for sg %p\n",
+							sg);
+					ret = -EINVAL;
+					goto fail;
+				}
+			}
+			continue;
+		}
+		/* for 4K or 64K, make sure there is a second level table */
+		if (*fl_pte == 0) {
+			if (!make_second_level(priv, fl_pte)) {
 				ret = -ENOMEM;
 				goto fail;
 			}
-
-			memset(sl_table, 0, SZ_4K);
-			clean_pte(sl_table, sl_table + NUM_SL_PTE,
-				  priv->redirect);
-
-			*fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
-							    FL_TYPE_TABLE);
-			clean_pte(fl_pte, fl_pte + 1, priv->redirect);
-		} else
-			sl_table = (unsigned long *)
-					       __va(((*fl_pte) & FL_BASE_MASK));
-
+		}
+		if (!(*fl_pte & FL_TYPE_TABLE)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+		sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+		sl_offset = SL_OFFSET(va);
 		/* Keep track of initial position so we
 		 * don't clean more than we have to
 		 */
@@ -786,21 +871,39 @@
 
 		/* Build the 2nd level page table */
 		while (offset < len && sl_offset < NUM_SL_PTE) {
-			pa = chunk_pa + chunk_offset;
-			sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
-				     pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
-			sl_offset++;
-			offset += SZ_4K;
 
-			chunk_offset += SZ_4K;
+			/* Map a large 64K page if the chunk is large enough and
+			 * the pa and va are aligned
+			 */
+
+			if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+					     SZ_64K))
+				chunk_size = SZ_64K;
+			else
+				chunk_size = SZ_4K;
+
+			if (chunk_size == SZ_4K) {
+				sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+				sl_offset++;
+			} else {
+				BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+				sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+				sl_offset += 16;
+			}
+
+
+			offset += chunk_size;
+			chunk_offset += chunk_size;
+			va += chunk_size;
+			pa += chunk_size;
 
 			if (chunk_offset >= sg->length && offset < len) {
 				chunk_offset = 0;
 				sg = sg_next(sg);
-				chunk_pa = get_phys_addr(sg);
-				if (chunk_pa == 0) {
+				pa = get_phys_addr(sg);
+				if (pa == 0) {
 					pr_debug("No dma address for sg %p\n",
-						 sg);
+							sg);
 					ret = -EINVAL;
 					goto fail;
 				}
@@ -808,7 +911,7 @@
 		}
 
 		clean_pte(sl_table + sl_start, sl_table + sl_offset,
-			  priv->redirect);
+				priv->redirect);
 
 		fl_pte++;
 		sl_offset = 0;
@@ -842,45 +945,53 @@
 	fl_offset = FL_OFFSET(va);	/* Upper 12 bits */
 	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
 
-	sl_start = SL_OFFSET(va);
-
 	while (offset < len) {
-		sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
-		sl_end = ((len - offset) / SZ_4K) + sl_start;
+		if (*fl_pte & FL_TYPE_TABLE) {
+			sl_start = SL_OFFSET(va);
+			sl_table =  __va(((*fl_pte) & FL_BASE_MASK));
+			sl_end = ((len - offset) / SZ_4K) + sl_start;
 
-		if (sl_end > NUM_SL_PTE)
-			sl_end = NUM_SL_PTE;
+			if (sl_end > NUM_SL_PTE)
+				sl_end = NUM_SL_PTE;
 
-		memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
-		clean_pte(sl_table + sl_start, sl_table + sl_end,
-			  priv->redirect);
+			memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
+			clean_pte(sl_table + sl_start, sl_table + sl_end,
+					priv->redirect);
 
-		offset += (sl_end - sl_start) * SZ_4K;
+			offset += (sl_end - sl_start) * SZ_4K;
+			va += (sl_end - sl_start) * SZ_4K;
 
-		/* Unmap and free the 2nd level table if all mappings in it
-		 * were removed. This saves memory, but the table will need
-		 * to be re-allocated the next time someone tries to map these
-		 * VAs.
-		 */
-		used = 0;
+			/* Unmap and free the 2nd level table if all mappings
+			 * in it were removed. This saves memory, but the table
+			 * will need to be re-allocated the next time someone
+			 * tries to map these VAs.
+			 */
+			used = 0;
 
-		/* If we just unmapped the whole table, don't bother
-		 * seeing if there are still used entries left.
-		 */
-		if (sl_end - sl_start != NUM_SL_PTE)
-			for (i = 0; i < NUM_SL_PTE; i++)
-				if (sl_table[i]) {
-					used = 1;
-					break;
-				}
-		if (!used) {
-			free_page((unsigned long)sl_table);
+			/* If we just unmapped the whole table, don't bother
+			 * seeing if there are still used entries left.
+			 */
+			if (sl_end - sl_start != NUM_SL_PTE)
+				for (i = 0; i < NUM_SL_PTE; i++)
+					if (sl_table[i]) {
+						used = 1;
+						break;
+					}
+			if (!used) {
+				free_page((unsigned long)sl_table);
+				*fl_pte = 0;
+
+				clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+			}
+
+			sl_start = 0;
+		} else {
 			*fl_pte = 0;
-
 			clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+			va += SZ_1M;
+			offset += SZ_1M;
+			sl_start = 0;
 		}
-
-		sl_start = 0;
 		fl_pte++;
 	}
 
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index e9b4e2b..be9c43c 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -208,6 +208,15 @@
 	  two mipi lanes, required for msm8625 platform.
 	  Say Y here if this is msm8625 variant platform.
 
+config IMX135
+	bool "Sensor imx135 (Sony 13MP)"
+	depends on MSM_CAMERA
+	---help---
+	  Support for IMX135 sensor driver.
+	  This is a Sony 13MP Bayer Sensor with autofocus and video HDR
+	  support.
+	  Say Y if the platform uses IMX135 sensor.
+
 config VB6801
 	bool "Sensor vb6801"
 	depends on MSM_CAMERA && !ARCH_MSM8X60 && !MSM_CAMERA_V4L2
diff --git a/drivers/media/video/msm/msm_mctl_pp.c b/drivers/media/video/msm/msm_mctl_pp.c
index 105426e..7155d4c 100644
--- a/drivers/media/video/msm/msm_mctl_pp.c
+++ b/drivers/media/video/msm/msm_mctl_pp.c
@@ -681,6 +681,7 @@
 	ret_frame.dirty = 0;
 	ret_frame.node_type = frame.node_type;
 	ret_frame.timestamp = frame.timestamp;
+	ret_frame.frame_id  = frame.frame_id;
 	D("%s Frame done id: %d\n", __func__, frame.frame_id);
 	rc = msm_mctl_buf_done_pp(p_mctl, &buf_handle, &buf, &ret_frame);
 	return rc;
diff --git a/drivers/media/video/msm/sensors/Makefile b/drivers/media/video/msm/sensors/Makefile
index cd228a1..a70a632 100644
--- a/drivers/media/video/msm/sensors/Makefile
+++ b/drivers/media/video/msm/sensors/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_OV8825) += ov8825_v4l2.o
 obj-$(CONFIG_IMX074) += imx074_v4l2.o
 obj-$(CONFIG_S5K3L1YX) += s5k3l1yx.o
+obj-$(CONFIG_IMX135) += imx135_v4l2.o
 obj-$(CONFIG_OV2720) += ov2720.o
 obj-$(CONFIG_MT9M114) += mt9m114_v4l2.o
 obj-$(CONFIG_S5K4E1) += s5k4e1_v4l2.o
diff --git a/drivers/media/video/msm/sensors/imx135_v4l2.c b/drivers/media/video/msm/sensors/imx135_v4l2.c
new file mode 100644
index 0000000..f480923
--- /dev/null
+++ b/drivers/media/video/msm/sensors/imx135_v4l2.c
@@ -0,0 +1,553 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_sensor.h"
+#define SENSOR_NAME "imx135"
+#define PLATFORM_DRIVER_NAME "msm_camera_imx135"
+#define imx135_obj imx135_##obj
+
+DEFINE_MUTEX(imx135_mut);
+static struct msm_sensor_ctrl_t imx135_s_ctrl;
+
+static struct msm_camera_i2c_reg_conf imx135_start_settings[] = {
+	{0x0100, 0x01},
+};
+
+static struct msm_camera_i2c_reg_conf imx135_stop_settings[] = {
+	{0x0100, 0x00},
+};
+
+static struct msm_camera_i2c_reg_conf imx135_groupon_settings[] = {
+	{0x104, 0x01},
+};
+
+static struct msm_camera_i2c_reg_conf imx135_groupoff_settings[] = {
+	{0x104, 0x00},
+};
+
+static struct msm_camera_i2c_reg_conf imx135_recommend_settings[] = {
+/* Recommended global settings */
+	{0x0220, 0x01},
+	{0x3008, 0xB0},
+	{0x320A, 0x01},
+	{0x320D, 0x10},
+	{0x3216, 0x2E},
+	{0x3230, 0x0A},
+	{0x3228, 0x05},
+	{0x3229, 0x02},
+	{0x322C, 0x02},
+	{0x3302, 0x10},
+	{0x3390, 0x45},
+	{0x3409, 0x0C},
+	{0x340B, 0xF5},
+	{0x340C, 0x2D},
+	{0x3412, 0x41},
+	{0x3413, 0xAD},
+	{0x3414, 0x1E},
+	{0x3427, 0x04},
+	{0x3480, 0x1E},
+	{0x3484, 0x1E},
+	{0x3488, 0x1E},
+	{0x348C, 0x1E},
+	{0x3490, 0x1E},
+	{0x3494, 0x1E},
+	{0x349C, 0x38},
+	{0x34A3, 0x38},
+	{0x3511, 0x8F},
+	{0x3518, 0x00},
+	{0x3519, 0x94},
+	{0x3833, 0x20},
+	{0x3893, 0x01},
+	{0x38C2, 0x08},
+	{0x3C09, 0x01},
+	{0x4300, 0x00},
+	{0x4316, 0x12},
+	{0x4317, 0x22},
+	{0x431A, 0x00},
+	{0x4324, 0x03},
+	{0x4325, 0x20},
+	{0x4326, 0x03},
+	{0x4327, 0x84},
+	{0x4328, 0x03},
+	{0x4329, 0x20},
+	{0x432A, 0x03},
+	{0x432B, 0x84},
+	{0x4401, 0x3F},
+	{0x4412, 0x3F},
+	{0x4413, 0xFF},
+	{0x4446, 0x3F},
+	{0x4447, 0xFF},
+	{0x4452, 0x00},
+	{0x4453, 0xA0},
+	{0x4454, 0x08},
+	{0x4455, 0x00},
+	{0x4458, 0x18},
+	{0x4459, 0x18},
+	{0x445A, 0x3F},
+	{0x445B, 0x3A},
+	{0x4463, 0x00},
+	{0x4465, 0x00},
+	{0x446E, 0x01},
+/* Image Quality Settings */
+/* Bypass Settings */
+	{0x4203, 0x48},
+/* Defect Correction Recommended Setting */
+	{0x4100, 0xE0},
+	{0x4102, 0x0B},
+/* RGB Filter Recommended Setting */
+	{0x4281, 0x22},
+	{0x4282, 0x82},
+	{0x4284, 0x00},
+	{0x4287, 0x18},
+	{0x4288, 0x00},
+	{0x428B, 0x1E},
+	{0x428C, 0x00},
+	{0x428F, 0x08},
+/* DLC/ADP Recommended Setting */
+	{0x4207, 0x00},
+	{0x4218, 0x02},
+	{0x421B, 0x00},
+	{0x4222, 0x04},
+	{0x4223, 0x44},
+	{0x4224, 0x46},
+	{0x4225, 0xFF},
+	{0x4226, 0x14},
+	{0x4227, 0xF2},
+	{0x4228, 0xFC},
+	{0x4229, 0x60},
+	{0x422A, 0xFA},
+	{0x422B, 0xFE},
+	{0x422C, 0xFE},
+/* Color Artifact Recommended Setting */
+	{0x4243, 0xAA}
+};
+
+/* IMX135 mode 1/2 HV at 24MHz */
+static struct msm_camera_i2c_reg_conf imx135_prev_settings[] = {
+/* Clock Setting */
+	{0x011E, 0x18},
+	{0x011F, 0x00},
+	{0x0301, 0x05},
+	{0x0303, 0x01},
+	{0x0304, 0x0B},
+	{0x0305, 0x03},
+	{0x0306, 0x01},
+	{0x0307, 0x5E},
+	{0x0309, 0x05},
+	{0x030B, 0x02},
+	{0x030C, 0x00},
+	{0x030D, 0x71},
+	{0x030E, 0x01},
+	{0x3A06, 0x12},
+/* Mode setting */
+	{0x0101, 0x00},
+	{0x0105, 0x00},
+	{0x0108, 0x03},
+	{0x0109, 0x30},
+	{0x010B, 0x32},
+	{0x0112, 0x0A},
+	{0x0113, 0x0A},
+	{0x0381, 0x01},
+	{0x0383, 0x01},
+	{0x0385, 0x01},
+	{0x0387, 0x01},
+	{0x0390, 0x01}, /* binning_en = 1 */
+	{0x0391, 0x22}, /* binning_type */
+	{0x0392, 0x00}, /* binning_mode = 0 (average) */
+	{0x0401, 0x00},
+	{0x0404, 0x00},
+	{0x0405, 0x10},
+	{0x4083, 0x01},
+/* Size setting*/
+	{0x0340, 0x0A}, /* frame_length_lines = 2680*/
+	{0x0341, 0x78},
+	{0x034C, 0x08},
+	{0x034D, 0x38},
+	{0x034E, 0x06},
+	{0x034F, 0x18},
+	{0x0354, 0x08},
+	{0x0355, 0x38},
+	{0x0356, 0x06},
+	{0x0357, 0x18},
+	{0x3310, 0x08},
+	{0x3311, 0x38},
+	{0x3312, 0x06},
+	{0x3313, 0x18},
+	{0x331C, 0x02},
+	{0x331D, 0xC0},
+	{0x33B0, 0x04},
+	{0x33B1, 0x00},
+	{0x33B3, 0x00},
+	{0x7006, 0x04},
+/* Global Timing Setting */
+	{0x0830, 0x67},
+	{0x0831, 0x27},
+	{0x0832, 0x47},
+	{0x0833, 0x27},
+	{0x0834, 0x27},
+	{0x0835, 0x1F},
+	{0x0836, 0x87},
+	{0x0837, 0x2F},
+	{0x0839, 0x1F},
+	{0x083A, 0x17},
+	{0x083B, 0x02},
+/* Integration Time Setting */
+	{0x0254, 0x00},
+/* Gain Setting */
+	{0x0205, 0x33}
+};
+
+/* IMX135 Mode Fullsize at 24MHz */
+static struct msm_camera_i2c_reg_conf imx135_snap_settings[] = {
+/* Clock Setting */
+	{0x011E, 0x18},
+	{0x011F, 0x00},
+	{0x0301, 0x05},
+	{0x0303, 0x01},
+	{0x0304, 0x0B},
+	{0x0305, 0x03},
+	{0x0306, 0x01},
+	{0x0307, 0x5E},
+	{0x0309, 0x05},
+	{0x030B, 0x01},
+	{0x030C, 0x00},
+	{0x030D, 0x60}, /* pll_multiplier = 96 */
+	{0x030E, 0x01},
+	{0x3A06, 0x11},
+/* Mode setting */
+	{0x0101, 0x00},
+	{0x0105, 0x00},
+	{0x0108, 0x03},
+	{0x0109, 0x30},
+	{0x010B, 0x32},
+	{0x0112, 0x0A},
+	{0x0113, 0x0A},
+	{0x0381, 0x01},
+	{0x0383, 0x01},
+	{0x0385, 0x01},
+	{0x0387, 0x01},
+	{0x0390, 0x00},
+	{0x0391, 0x11},
+	{0x0392, 0x00},
+	{0x0401, 0x00},
+	{0x0404, 0x00},
+	{0x0405, 0x10},
+	{0x4083, 0x01},
+/* Size setting */
+	{0x0340, 0x0C},
+	{0x0341, 0x46},
+	{0x034C, 0x10},
+	{0x034D, 0x70},
+	{0x034E, 0x0C},
+	{0x034F, 0x30},
+	{0x0354, 0x10},
+	{0x0355, 0x70},
+	{0x0356, 0x0C},
+	{0x0357, 0x30},
+	{0x3310, 0x10},
+	{0x3311, 0x70},
+	{0x3312, 0x0C},
+	{0x3313, 0x30},
+	{0x331C, 0x06},
+	{0x331D, 0x00},
+	{0x33B0, 0x04},
+	{0x33B1, 0x00},
+	{0x33B3, 0x00},
+	{0x7006, 0x04},
+/* Global Timing Setting */
+	{0x0830, 0x7F},
+	{0x0831, 0x37},
+	{0x0832, 0x5F},
+	{0x0833, 0x37},
+	{0x0834, 0x37},
+	{0x0835, 0x3F},
+	{0x0836, 0xC7},
+	{0x0837, 0x3F},
+	{0x0839, 0x1F},
+	{0x083A, 0x17},
+	{0x083B, 0x02},
+/* Integration Time Setting */
+	{0x0250, 0x0B},
+/* Gain Setting */
+	{0x0205, 0x33}
+};
+
+
+static struct msm_camera_i2c_reg_conf imx135_hdr_settings[] = {
+/* Clock Setting */
+	{0x011E, 0x18},
+	{0x011F, 0x00},
+	{0x0301, 0x05},
+	{0x0303, 0x01},
+	{0x0304, 0x0B},
+	{0x0305, 0x03},
+	{0x0306, 0x01},
+	{0x0307, 0x5E},
+	{0x0309, 0x05},
+	{0x030B, 0x02},
+	{0x030C, 0x00},
+	{0x030D, 0x71},
+	{0x030E, 0x01},
+	{0x3A06, 0x12},
+/* Mode setting */
+	{0x0101, 0x00},
+	{0x0105, 0x00},
+	{0x0108, 0x03},
+	{0x0109, 0x30},
+	{0x010B, 0x32},
+	{0x0112, 0x0E},
+	{0x0113, 0x0A},
+	{0x0381, 0x01},
+	{0x0383, 0x01},
+	{0x0385, 0x01},
+	{0x0387, 0x01},
+	{0x0390, 0x00},
+	{0x0391, 0x11},
+	{0x0392, 0x00},
+	{0x0401, 0x00},
+	{0x0404, 0x00},
+	{0x0405, 0x10},
+	{0x4083, 0x01},
+/* Size setting */
+	{0x0340, 0x0C},
+	{0x0341, 0x48},
+	{0x034C, 0x08},
+	{0x034D, 0x38},
+	{0x034E, 0x06},
+	{0x034F, 0x18},
+	{0x0354, 0x08},
+	{0x0355, 0x38},
+	{0x0356, 0x06},
+	{0x0357, 0x18},
+	{0x3310, 0x08},
+	{0x3311, 0x38},
+	{0x3312, 0x06},
+	{0x3313, 0x18},
+	{0x331C, 0x02},
+	{0x331D, 0xA0},
+	{0x33B0, 0x08},
+	{0x33B1, 0x38},
+	{0x33B3, 0x01},
+	{0x7006, 0x04},
+/* Global Timing Setting */
+	{0x0830, 0x67},
+	{0x0831, 0x27},
+	{0x0832, 0x47},
+	{0x0833, 0x27},
+	{0x0834, 0x27},
+	{0x0835, 0x1F},
+	{0x0836, 0x87},
+	{0x0837, 0x2F},
+	{0x0839, 0x1F},
+	{0x083A, 0x17},
+	{0x083B, 0x02},
+/* Integration Time Setting */
+	{0x0250, 0x0B},
+/* Gain Setting */
+	{0x0205, 0x33}
+};
+
+static struct v4l2_subdev_info imx135_subdev_info[] = {
+	{
+	.code		= V4L2_MBUS_FMT_SBGGR10_1X10,
+	.colorspace	= V4L2_COLORSPACE_JPEG,
+	.fmt		= 1,
+	.order		= 0,
+	},
+	/* more can be supported, to be added later */
+};
+
+static struct msm_camera_i2c_conf_array imx135_init_conf[] = {
+	{&imx135_recommend_settings[0],
+	ARRAY_SIZE(imx135_recommend_settings), 0, MSM_CAMERA_I2C_BYTE_DATA}
+};
+
+static struct msm_camera_i2c_conf_array imx135_confs[] = {
+	{&imx135_snap_settings[0],
+	ARRAY_SIZE(imx135_snap_settings), 0, MSM_CAMERA_I2C_BYTE_DATA},
+	{&imx135_prev_settings[0],
+	ARRAY_SIZE(imx135_prev_settings), 0, MSM_CAMERA_I2C_BYTE_DATA},
+	{&imx135_hdr_settings[0],
+	ARRAY_SIZE(imx135_hdr_settings), 0, MSM_CAMERA_I2C_BYTE_DATA},
+};
+
+static struct msm_sensor_output_info_t imx135_dimensions[] = {
+	/* RES0 snapshot(FULL SIZE) */
+	{
+		.x_output = 4208,
+		.y_output = 3120,
+		.line_length_pclk = 4572,
+		.frame_length_lines = 3142,
+		.vt_pixel_clk = 307200000,
+		.op_pixel_clk = 307200000,
+		.binning_factor = 1,
+	},
+	/* RES1 4:3 preview(1/2HV QTR SIZE) */
+	{
+		.x_output = 2104,
+		.y_output = 1560,
+		.line_length_pclk = 4572,
+		.frame_length_lines = 2680,
+		.vt_pixel_clk = 361600000,
+		.op_pixel_clk = 180800000,
+		.binning_factor = 1,
+	},
+	/* RES2 4:3 HDR movie mode */
+	{
+		.x_output = 2104,
+		.y_output = 1560,
+		.line_length_pclk = 4572,
+		.frame_length_lines = 3144,
+		.vt_pixel_clk = 361600000,
+		.op_pixel_clk = 180800000,
+		.binning_factor = 1,
+	},
+};
+
+static struct msm_sensor_output_reg_addr_t imx135_reg_addr = {
+	.x_output = 0x34C,
+	.y_output = 0x34E,
+	.line_length_pclk = 0x342,
+	.frame_length_lines = 0x340,
+};
+
+static struct msm_sensor_id_info_t imx135_id_info = {
+	.sensor_id_reg_addr = 0x0000,
+	.sensor_id = 0x0087,
+};
+
+static struct msm_sensor_exp_gain_info_t imx135_exp_gain_info = {
+	.coarse_int_time_addr = 0x202,
+	.global_gain_addr = 0x205,
+	.vert_offset = 4,
+};
+
+static const struct i2c_device_id imx135_i2c_id[] = {
+	{SENSOR_NAME, (kernel_ulong_t)&imx135_s_ctrl},
+	{ }
+};
+
+static struct i2c_driver imx135_i2c_driver = {
+	.id_table = imx135_i2c_id,
+	.probe  = msm_sensor_i2c_probe,
+	.driver = {
+		.name = SENSOR_NAME,
+	},
+};
+
+static struct msm_camera_i2c_client imx135_sensor_i2c_client = {
+	.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+static int __init msm_sensor_init_module(void)
+{
+	return i2c_add_driver(&imx135_i2c_driver);
+}
+
+static struct v4l2_subdev_core_ops imx135_subdev_core_ops = {
+	.ioctl = msm_sensor_subdev_ioctl,
+	.s_power = msm_sensor_power,
+};
+
+static struct v4l2_subdev_video_ops imx135_subdev_video_ops = {
+	.enum_mbus_fmt = msm_sensor_v4l2_enum_fmt,
+};
+
+static struct v4l2_subdev_ops imx135_subdev_ops = {
+	.core = &imx135_subdev_core_ops,
+	.video  = &imx135_subdev_video_ops,
+};
+
+int32_t imx135_write_exp_gain(struct msm_sensor_ctrl_t *s_ctrl,
+		uint16_t gain, uint32_t line)
+{
+	uint32_t fl_lines;
+	uint8_t offset;
+	fl_lines = s_ctrl->curr_frame_length_lines;
+	fl_lines = (fl_lines * s_ctrl->fps_divider) / Q10;
+	offset = s_ctrl->sensor_exp_gain_info->vert_offset;
+	if (line > (fl_lines - offset))
+		fl_lines = line + offset;
+
+	s_ctrl->func_tbl->sensor_group_hold_on(s_ctrl);
+	msm_camera_i2c_write(s_ctrl->sensor_i2c_client,
+		s_ctrl->sensor_output_reg_addr->frame_length_lines, fl_lines,
+		MSM_CAMERA_I2C_WORD_DATA);
+	msm_camera_i2c_write(s_ctrl->sensor_i2c_client,
+		s_ctrl->sensor_exp_gain_info->coarse_int_time_addr, line,
+		MSM_CAMERA_I2C_WORD_DATA);
+	msm_camera_i2c_write(s_ctrl->sensor_i2c_client,
+		s_ctrl->sensor_exp_gain_info->global_gain_addr, gain,
+		MSM_CAMERA_I2C_BYTE_DATA);
+	s_ctrl->func_tbl->sensor_group_hold_off(s_ctrl);
+	return 0;
+}
+
+static struct msm_sensor_fn_t imx135_func_tbl = {
+	.sensor_start_stream = msm_sensor_start_stream,
+	.sensor_stop_stream = msm_sensor_stop_stream,
+	.sensor_group_hold_on = msm_sensor_group_hold_on,
+	.sensor_group_hold_off = msm_sensor_group_hold_off,
+	.sensor_set_fps = msm_sensor_set_fps,
+	.sensor_write_exp_gain = imx135_write_exp_gain,
+	.sensor_write_snapshot_exp_gain = imx135_write_exp_gain,
+	.sensor_setting = msm_sensor_setting,
+	.sensor_csi_setting = msm_sensor_setting1,
+	.sensor_set_sensor_mode = msm_sensor_set_sensor_mode,
+	.sensor_mode_init = msm_sensor_mode_init,
+	.sensor_get_output_info = msm_sensor_get_output_info,
+	.sensor_config = msm_sensor_config,
+	.sensor_power_up = msm_sensor_power_up,
+	.sensor_power_down = msm_sensor_power_down,
+	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines1,
+	.sensor_get_csi_params = msm_sensor_get_csi_params,
+};
+
+static struct msm_sensor_reg_t imx135_regs = {
+	.default_data_type = MSM_CAMERA_I2C_BYTE_DATA,
+	.start_stream_conf = imx135_start_settings,
+	.start_stream_conf_size = ARRAY_SIZE(imx135_start_settings),
+	.stop_stream_conf = imx135_stop_settings,
+	.stop_stream_conf_size = ARRAY_SIZE(imx135_stop_settings),
+	.group_hold_on_conf = imx135_groupon_settings,
+	.group_hold_on_conf_size = ARRAY_SIZE(imx135_groupon_settings),
+	.group_hold_off_conf = imx135_groupoff_settings,
+	.group_hold_off_conf_size =
+		ARRAY_SIZE(imx135_groupoff_settings),
+	.init_settings = &imx135_init_conf[0],
+	.init_size = ARRAY_SIZE(imx135_init_conf),
+	.mode_settings = &imx135_confs[0],
+	.output_settings = &imx135_dimensions[0],
+	.num_conf = ARRAY_SIZE(imx135_confs),
+};
+
+static struct msm_sensor_ctrl_t imx135_s_ctrl = {
+	.msm_sensor_reg = &imx135_regs,
+	.sensor_i2c_client = &imx135_sensor_i2c_client,
+	.sensor_i2c_addr = 0x20,
+	.sensor_output_reg_addr = &imx135_reg_addr,
+	.sensor_id_info = &imx135_id_info,
+	.sensor_exp_gain_info = &imx135_exp_gain_info,
+	.cam_mode = MSM_SENSOR_MODE_INVALID,
+	.msm_sensor_mutex = &imx135_mut,
+	.sensor_i2c_driver = &imx135_i2c_driver,
+	.sensor_v4l2_subdev_info = imx135_subdev_info,
+	.sensor_v4l2_subdev_info_size = ARRAY_SIZE(imx135_subdev_info),
+	.sensor_v4l2_subdev_ops = &imx135_subdev_ops,
+	.func_tbl = &imx135_func_tbl,
+	.clk_rate = MSM_SENSOR_MCLK_24HZ,
+};
+
+module_init(msm_sensor_init_module);
+MODULE_DESCRIPTION("Sony 13MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index f0d0e73..8ac35d9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -1437,6 +1437,7 @@
 				prop->multi_slice);
 			break;
 		}
+		hfi->slice_size = prop->slice_size;
 		pkt->size += sizeof(u32) + sizeof(struct
 					hfi_multi_slice_control);
 		break;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index bc8eccf..12f896e 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -38,6 +38,7 @@
 #include <mach/msm_bus_board.h>
 #include <mach/scm.h>
 #include <mach/peripheral-loader.h>
+#include <mach/socinfo.h>
 #include "qseecom_legacy.h"
 
 #define QSEECOM_DEV			"qseecom"
@@ -179,6 +180,7 @@
 	int               send_resp_flag;
 
 	uint32_t          qseos_version;
+	struct device *pdev;
 };
 
 struct qseecom_client_handle {
@@ -208,9 +210,16 @@
 	atomic_t          ioctl_count;
 };
 
+struct clk *ce_core_clk;
+struct clk *ce_clk;
+struct clk *ce_core_src_clk;
+struct clk *ce_bus_clk;
+
 /* Function proto types */
 static int qsee_vote_for_clock(int32_t);
 static void qsee_disable_clock_vote(int32_t);
+static int __qseecom_init_clk(void);
+static void __qseecom_disable_clk(void);
 
 static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
 		struct qseecom_register_listener_req *svc)
@@ -1735,9 +1744,131 @@
 		.release = qseecom_release
 };
 
+static int __qseecom_init_clk()
+{
+	int rc = 0;
+	struct device *pdev;
+
+	pdev = qseecom.pdev;
+	/* Get CE3 src core clk. */
+	ce_core_src_clk = clk_get(pdev, "core_clk_src");
+	if (!IS_ERR(ce_core_src_clk)) {
+		ce_core_src_clk = ce_core_src_clk;
+
+		/* Set the core src clk @100Mhz */
+		rc = clk_set_rate(ce_core_src_clk, 100000000);
+		if (rc) {
+			clk_put(ce_core_src_clk);
+			pr_err("Unable to set the core src clk @100Mhz.\n");
+			goto err_clk;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	ce_core_clk = clk_get(pdev, "core_clk");
+	if (IS_ERR(ce_core_clk)) {
+		rc = PTR_ERR(ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (ce_core_src_clk != NULL)
+			clk_put(ce_core_src_clk);
+		goto err_clk;
+	}
+
+	/* Get CE Interface clk */
+	ce_clk = clk_get(pdev, "iface_clk");
+	if (IS_ERR(ce_clk)) {
+		rc = PTR_ERR(ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (ce_core_src_clk != NULL)
+			clk_put(ce_core_src_clk);
+		clk_put(ce_core_clk);
+		goto err_clk;
+	}
+
+	/* Get CE AXI clk */
+	ce_bus_clk = clk_get(pdev, "bus_clk");
+	if (IS_ERR(ce_bus_clk)) {
+		rc = PTR_ERR(ce_bus_clk);
+		pr_err("Unable to get CE BUS interface clk\n");
+		if (ce_core_src_clk != NULL)
+			clk_put(ce_core_src_clk);
+		clk_put(ce_core_clk);
+		clk_put(ce_clk);
+		goto err_clk;
+	}
+
+	/* Enable CE core clk */
+	rc = clk_prepare_enable(ce_core_clk);
+	if (rc) {
+		pr_err("Unable to enable/prepare CE core clk\n");
+		if (ce_core_src_clk != NULL)
+			clk_put(ce_core_src_clk);
+		clk_put(ce_core_clk);
+		clk_put(ce_clk);
+		goto err_clk;
+	} else {
+		/* Enable CE clk */
+		rc = clk_prepare_enable(ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			clk_disable_unprepare(ce_core_clk);
+			if (ce_core_src_clk != NULL)
+				clk_put(ce_core_src_clk);
+			clk_put(ce_core_clk);
+			clk_put(ce_clk);
+			goto err_clk;
+		} else {
+			/* Enable AXI clk */
+			rc = clk_prepare_enable(ce_bus_clk);
+			if (rc) {
+				pr_err("Unable to enable/prepare CE iface clk\n");
+				clk_disable_unprepare(ce_core_clk);
+				clk_disable_unprepare(ce_clk);
+				if (ce_core_src_clk != NULL)
+					clk_put(ce_core_src_clk);
+				clk_put(ce_core_clk);
+				clk_put(ce_clk);
+				goto err_clk;
+			}
+		}
+	}
+	return rc;
+
+err_clk:
+	if (rc)
+		pr_err("Unable to init CE clks, rc = %d\n", rc);
+	clk_disable_unprepare(ce_clk);
+	clk_disable_unprepare(ce_core_clk);
+	clk_disable_unprepare(ce_bus_clk);
+	if (ce_core_src_clk != NULL)
+		clk_put(ce_core_src_clk);
+	clk_put(ce_clk);
+	clk_put(ce_core_clk);
+	clk_put(ce_bus_clk);
+	return rc;
+}
+
+
+
+static void __qseecom_disable_clk()
+{
+	clk_disable_unprepare(ce_clk);
+	clk_disable_unprepare(ce_core_clk);
+	clk_disable_unprepare(ce_bus_clk);
+	if (ce_core_src_clk != NULL)
+		clk_put(ce_core_src_clk);
+	clk_put(ce_clk);
+	clk_put(ce_core_clk);
+	clk_put(ce_bus_clk);
+}
+
 static int __devinit qseecom_probe(struct platform_device *pdev)
 {
 	int rc;
+	int ret;
 	struct device *class_dev;
 	char qsee_not_legacy = 0;
 	struct msm_bus_scale_pdata *qseecom_platform_support;
@@ -1796,6 +1927,8 @@
 		pil = NULL;
 		pil_ref_cnt = 0;
 	}
+
+	qseecom.pdev = class_dev;
 	/* Create ION msm client */
 	qseecom.ion_clnt = msm_ion_client_create(0x03, "qseecom-kernel");
 	if (qseecom.ion_clnt == NULL) {
@@ -1805,17 +1938,23 @@
 	}
 
 	/* register client for bus scaling */
-	if (!pdev->dev.of_node) {
+	if (pdev->dev.of_node) {
+		ret = __qseecom_init_clk();
+		if (ret)
+			goto err;
+		qseecom_platform_support = (struct msm_bus_scale_pdata *)
+						msm_bus_cl_get_pdata(pdev);
+	} else {
 		qseecom_platform_support = (struct msm_bus_scale_pdata *)
 						pdev->dev.platform_data;
-		qsee_perf_client = msm_bus_scale_register_client(
-						qseecom_platform_support);
-
-		if (!qsee_perf_client)
-			pr_err("Unable to register bus client\n");
 	}
-	return 0;
 
+	qsee_perf_client = msm_bus_scale_register_client(
+					qseecom_platform_support);
+
+	if (!qsee_perf_client)
+		pr_err("Unable to register bus client\n");
+	return 0;
 err:
 	device_destroy(driver_class, qseecom_device_no);
 class_destroy:
@@ -1856,6 +1995,9 @@
 
 static void __devexit qseecom_exit(void)
 {
+
+	__qseecom_disable_clk();
+
 	device_destroy(driver_class, qseecom_device_no);
 	class_destroy(driver_class);
 	unregister_chrdev_region(qseecom_device_no, 1);
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index ebb4afe..33f0600 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -76,3 +76,13 @@
 
 	  This driver is only of interest to those developing or
 	  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+	tristate "MMC block test"
+	depends on MMC_BLOCK && IOSCHED_TEST
+	help
+	  MMC block test can be used with test iosched to test the MMC block
+	  device.
+	  Currently used to test eMMC 4.5 features (packed commands, sanitize,
+	  BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@
 
 obj-$(CONFIG_SDIO_UART)		+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)		+= mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a39014c..7387d9a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -127,17 +127,6 @@
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-	MMC_BLK_SUCCESS = 0,
-	MMC_BLK_PARTIAL,
-	MMC_BLK_CMD_ERR,
-	MMC_BLK_RETRY,
-	MMC_BLK_ABORT,
-	MMC_BLK_DATA_ERR,
-	MMC_BLK_ECC_ERR,
-	MMC_BLK_NOMEDIUM,
-};
-
 enum {
 	MMC_PACKED_N_IDX = -1,
 	MMC_PACKED_N_ZERO,
@@ -1480,6 +1469,64 @@
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+	int i;
+	int max_num_of_packed_reqs = 0;
+
+	if ((!card) || (!card->wr_pack_stats.packing_events))
+		return;
+
+	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+	spin_lock(&card->wr_pack_stats.lock);
+
+	pr_info("%s: write packing statistics:\n",
+		mmc_hostname(card->host));
+
+	for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+		if (card->wr_pack_stats.packing_events[i] != 0)
+			pr_info("%s: Packed %d reqs - %d times\n",
+				mmc_hostname(card->host), i,
+				card->wr_pack_stats.packing_events[i]);
+	}
+
+	pr_info("%s: stopped packing due to the following reasons:\n",
+		mmc_hostname(card->host));
+
+	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+		pr_info("%s: %d times: exceedmax num of segments\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+		pr_info("%s: %d times: exceeding the max num of sectors\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+	if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+		pr_info("%s: %d times: wrong data direction\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+	if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+		pr_info("%s: %d times: flush or discard\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+	if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+		pr_info("%s: %d times: empty queue\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+	if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
+		pr_info("%s: %d times: rel write\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
+	if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
+		pr_info("%s: %d times: Threshold\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+	spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
 	struct request_queue *q = mq->queue;
@@ -1698,7 +1745,18 @@
 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
 	mqrq->mmc_active.mrq = &brq->mrq;
-	mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+	/*
+	 * This is intended for packed commands tests usage - in case these
+	 * functions are not in use the respective pointers are NULL
+	 */
+	if (mq->err_check_fn)
+		mqrq->mmc_active.err_check = mq->err_check_fn;
+	else
+		mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+	if (mq->packed_test_fn)
+		mq->packed_test_fn(mq->queue, mqrq);
 
 	mmc_queue_bounce_pre(mqrq);
 }
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
new file mode 100644
index 0000000..bdda8d5
--- /dev/null
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -0,0 +1,1945 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* MMC block test */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/delay.h>
+#include <linux/test-iosched.h>
+#include "queue.h"
+
+#define MODULE_NAME "mmc_block_test"
+#define TEST_MAX_SECTOR_RANGE		(600*1024*1024) /* 600 MB */
+#define TEST_MAX_BIOS_PER_REQ		120
+#define CMD23_PACKED_BIT	(1 << 30)
+#define LARGE_PRIME_1	1103515367
+#define LARGE_PRIME_2	35757
+#define PACKED_HDR_VER_MASK 0x000000FF
+#define PACKED_HDR_RW_MASK 0x0000FF00
+#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
+#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+enum is_random {
+	NON_RANDOM_TEST,
+	RANDOM_TEST,
+};
+
+enum mmc_block_test_testcases {
+	/* Start of send write packing test group */
+	SEND_WRITE_PACKING_MIN_TESTCASE,
+	TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
+	TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
+	TEST_STOP_DUE_TO_FLUSH,
+	TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
+	TEST_STOP_DUE_TO_EMPTY_QUEUE,
+	TEST_STOP_DUE_TO_MAX_REQ_NUM,
+	TEST_STOP_DUE_TO_THRESHOLD,
+	SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
+
+	/* Start of err check test group */
+	ERR_CHECK_MIN_TESTCASE,
+	TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
+	TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
+	TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
+	TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
+	TEST_RET_PARTIAL_MAX_FAIL_IDX,
+	TEST_RET_RETRY,
+	TEST_RET_CMD_ERR,
+	TEST_RET_DATA_ERR,
+	ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
+
+	/* Start of send invalid test group */
+	INVALID_CMD_MIN_TESTCASE,
+	TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
+	TEST_HDR_WRONG_WRITE_CODE,
+	TEST_HDR_INVALID_RW_CODE,
+	TEST_HDR_DIFFERENT_ADDRESSES,
+	TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
+	TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
+	TEST_HDR_CMD23_PACKED_BIT_SET,
+	TEST_CMD23_MAX_PACKED_WRITES,
+	TEST_CMD23_ZERO_PACKED_WRITES,
+	TEST_CMD23_PACKED_BIT_UNSET,
+	TEST_CMD23_REL_WR_BIT_SET,
+	TEST_CMD23_BITS_16TO29_SET,
+	TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+	INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+
+	/*
+	 * Start of packing control test group.
+	 * in these next testcases the abbreviation FB = followed by
+	 */
+	PACKING_CONTROL_MIN_TESTCASE,
+	TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
+				PACKING_CONTROL_MIN_TESTCASE,
+	TEST_PACKING_EXP_N_OVER_TRIGGER,
+	TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
+	TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
+	TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
+	TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
+	TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
+	TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
+	TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
+	TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
+	TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+	PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+};
+
+enum mmc_block_test_group {
+	TEST_NO_GROUP,
+	TEST_GENERAL_GROUP,
+	TEST_SEND_WRITE_PACKING_GROUP,
+	TEST_ERR_CHECK_GROUP,
+	TEST_SEND_INVALID_GROUP,
+	TEST_PACKING_CONTROL_GROUP,
+};
+
+struct mmc_block_test_debug {
+	struct dentry *send_write_packing_test;
+	struct dentry *err_check_test;
+	struct dentry *send_invalid_packed_test;
+	struct dentry *random_test_seed;
+	struct dentry *packing_control_test;
+};
+
+struct mmc_block_test_data {
+	/* The number of write requests that the test will issue */
+	int num_requests;
+	/* The expected write packing statistics for the current test */
+	struct mmc_wr_pack_stats exp_packed_stats;
+	/*
+	 * A user-defined seed for random choices of number of bios written in
+	 * a request, and of number of requests issued in a test
+	 * This field is randomly updated after each use
+	 */
+	unsigned int random_test_seed;
+	/* A retry counter used in err_check tests */
+	int err_check_counter;
+	/* Can be one of the values of enum test_group */
+	enum mmc_block_test_group test_group;
+	/*
+	 * Indicates if the current testcase is running with random values of
+	 * num_requests and num_bios (in each request)
+	 */
+	int is_random;
+	/* Data structure for debugfs dentrys */
+	struct mmc_block_test_debug debug;
+	/*
+	 * Data structure containing individual test information, including
+	 * self-defined specific data
+	 */
+	struct test_info test_info;
+	/* mmc block device test */
+	struct blk_dev_test_type bdt;
+};
+
+static struct mmc_block_test_data *mbtd;
+
+/*
+ * A callback assigned to the packed_test_fn field.
+ * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
+ * Here we alter the packed header or CMD23 in order to send an invalid
+ * packed command to the card.
+ */
+static void test_invalid_packed_cmd(struct request_queue *q,
+				    struct mmc_queue_req *mqrq)
+{
+	struct mmc_queue *mq = q->queuedata;
+	u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
+	struct request *req = mqrq->req;
+	struct request *second_rq;
+	struct test_request *test_rq;
+	struct mmc_blk_request *brq = &mqrq->brq;
+	int num_requests;
+	int max_packed_reqs;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return;
+	}
+
+	test_rq = (struct test_request *)req->elv.priv[0];
+	if (!test_rq) {
+		test_pr_err("%s: NULL test_rq", __func__);
+		return;
+	}
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	switch (mbtd->test_info.testcase) {
+	case TEST_HDR_INVALID_VERSION:
+		test_pr_info("%s: set invalid header version", __func__);
+		/* Put 0 in header version field (1 byte, offset 0 in header) */
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
+		break;
+	case TEST_HDR_WRONG_WRITE_CODE:
+		test_pr_info("%s: wrong write code", __func__);
+		/* Set R/W field with R value (1 byte, offset 1 in header) */
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
+		break;
+	case TEST_HDR_INVALID_RW_CODE:
+		test_pr_info("%s: invalid r/w code", __func__);
+		/* Set R/W field with invalid value */
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
+		break;
+	case TEST_HDR_DIFFERENT_ADDRESSES:
+		test_pr_info("%s: different addresses", __func__);
+		second_rq = list_entry(req->queuelist.next, struct request,
+				queuelist);
+		test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
+			      __func__, (long)req->__sector,
+			     (long)second_rq->__sector);
+		/*
+		 * Put start sector of second write request in the first write
+		 * request's cmd25 argument in the packed header
+		 */
+		packed_cmd_hdr[3] = second_rq->__sector;
+		break;
+	case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+		test_pr_info("%s: request num smaller than actual" , __func__);
+		num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+									>> 16;
+		/* num of entries is decremented by 1 */
+		num_requests = (num_requests - 1) << 16;
+		/*
+		 * Set number of requests field in packed write header to be
+		 * smaller than the actual number (1 byte, offset 2 in header)
+		 */
+		packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+				     ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+		break;
+	case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+		test_pr_info("%s: request num larger than actual" , __func__);
+		num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+									>> 16;
+		/* num of entries is incremented by 1 */
+		num_requests = (num_requests + 1) << 16;
+		/*
+		 * Set number of requests field in packed write header to be
+		 * larger than the actual number (1 byte, offset 2 in header).
+		 */
+		packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+				     ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+		break;
+	case TEST_HDR_CMD23_PACKED_BIT_SET:
+		test_pr_info("%s: header CMD23 packed bit set" , __func__);
+		/*
+		 * Set packed bit (bit 30) in cmd23 argument of first and second
+		 * write requests in packed write header.
+		 * These are located at bytes 2 and 4 in packed write header
+		 */
+		packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
+		packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
+		break;
+	case TEST_CMD23_MAX_PACKED_WRITES:
+		test_pr_info("%s: CMD23 request num > max_packed_reqs",
+			      __func__);
+		/*
+		 * Set the individual packed cmd23 request num to
+		 * max_packed_reqs + 1
+		 */
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
+		break;
+	case TEST_CMD23_ZERO_PACKED_WRITES:
+		test_pr_info("%s: CMD23 request num = 0", __func__);
+		/* Set the individual packed cmd23 request num to zero */
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED;
+		break;
+	case TEST_CMD23_PACKED_BIT_UNSET:
+		test_pr_info("%s: CMD23 packed bit unset", __func__);
+		/*
+		 * Set the individual packed cmd23 packed bit to 0,
+		 *  although there is a packed write request
+		 */
+		brq->sbc.arg &= ~CMD23_PACKED_BIT;
+		break;
+	case TEST_CMD23_REL_WR_BIT_SET:
+		test_pr_info("%s: CMD23 REL WR bit set", __func__);
+		/* Set the individual packed cmd23 reliable write bit */
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
+		break;
+	case TEST_CMD23_BITS_16TO29_SET:
+		test_pr_info("%s: CMD23 bits [16-29] set", __func__);
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+			PACKED_HDR_BITS_16_TO_29_SET;
+		break;
+	case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+		test_pr_info("%s: CMD23 hdr not in block count", __func__);
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+		((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
+		break;
+	default:
+		test_pr_err("%s: unexpected testcase %d",
+			__func__, mbtd->test_info.testcase);
+		break;
+	}
+}
+
+/*
+ * A callback assigned to the err_check_fn field of the mmc_request by the
+ * MMC/card/block layer.
+ * Called upon request completion by the MMC/core layer.
+ * Here we emulate an error return value from the card.
+ */
+static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
+{
+	struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+			mmc_active);
+	struct request_queue *req_q = test_iosched_get_req_queue();
+	struct mmc_queue *mq;
+	int max_packed_reqs;
+	int ret = 0;
+
+	if (req_q)
+		mq = req_q->queuedata;
+	else {
+		test_pr_err("%s: NULL request_queue", __func__);
+		return 0;
+	}
+
+	if (!mq) {
+		test_pr_err("%s: %s: NULL mq", __func__,
+			mmc_hostname(card->host));
+		return 0;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	if (!mq_rq) {
+		test_pr_err("%s: %s: NULL mq_rq", __func__,
+			mmc_hostname(card->host));
+		return 0;
+	}
+
+	switch (mbtd->test_info.testcase) {
+	case TEST_RET_ABORT:
+		test_pr_info("%s: return abort", __func__);
+		ret = MMC_BLK_ABORT;
+		break;
+	case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+		test_pr_info("%s: return partial followed by success",
+			      __func__);
+		/*
+		 * Since in this testcase num_requests is always >= 2,
+		 * we can be sure that packed_fail_idx is always >= 1
+		 */
+		mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
+		test_pr_info("%s: packed_fail_idx = %d"
+			, __func__, mq_rq->packed_fail_idx);
+		mq->err_check_fn = NULL;
+		ret = MMC_BLK_PARTIAL;
+		break;
+	case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+		if (!mbtd->err_check_counter) {
+			test_pr_info("%s: return partial followed by abort",
+				      __func__);
+			mbtd->err_check_counter++;
+			/*
+			 * Since in this testcase num_requests is always >= 3,
+			 * we have that packed_fail_idx is always >= 1
+			 */
+			mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
+			test_pr_info("%s: packed_fail_idx = %d"
+				, __func__, mq_rq->packed_fail_idx);
+			ret = MMC_BLK_PARTIAL;
+			break;
+		}
+		mbtd->err_check_counter = 0;
+		mq->err_check_fn = NULL;
+		ret = MMC_BLK_ABORT;
+		break;
+	case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+		test_pr_info("%s: return partial multiple until success",
+			     __func__);
+		if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
+			mq->err_check_fn = NULL;
+			mbtd->err_check_counter = 0;
+			ret = MMC_BLK_PARTIAL;
+			break;
+		}
+		mq_rq->packed_fail_idx = 1;
+		ret = MMC_BLK_PARTIAL;
+		break;
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		test_pr_info("%s: return partial max fail_idx", __func__);
+		mq_rq->packed_fail_idx = max_packed_reqs - 1;
+		mq->err_check_fn = NULL;
+		ret = MMC_BLK_PARTIAL;
+		break;
+	case TEST_RET_RETRY:
+		test_pr_info("%s: return retry", __func__);
+		ret = MMC_BLK_RETRY;
+		break;
+	case TEST_RET_CMD_ERR:
+		test_pr_info("%s: return cmd err", __func__);
+		ret = MMC_BLK_CMD_ERR;
+		break;
+	case TEST_RET_DATA_ERR:
+		test_pr_info("%s: return data err", __func__);
+		ret = MMC_BLK_DATA_ERR;
+		break;
+	default:
+		test_pr_err("%s: unexpected testcase %d",
+			__func__, mbtd->test_info.testcase);
+	}
+
+	return ret;
+}
+
+/*
+ * This is a specific implementation for the get_test_case_str_fn function
+ * pointer in the test_info data structure. Given a valid test_data instance,
+ * the function returns a string resembling the test name, based on the testcase
+ */
+static char *get_test_case_str(struct test_data *td)
+{
+	if (!td) {
+		test_pr_err("%s: NULL td", __func__);
+		return NULL;
+	}
+
+	switch (td->test_info.testcase) {
+	case TEST_STOP_DUE_TO_FLUSH:
+		return "Test stop due to flush";
+	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+		return "Test stop due to flush after max-1 reqs";
+	case TEST_STOP_DUE_TO_READ:
+		return "Test stop due to read";
+	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+		return "Test stop due to read after max-1 reqs";
+	case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+		return "Test stop due to empty queue";
+	case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+		return "Test stop due to max req num";
+	case TEST_STOP_DUE_TO_THRESHOLD:
+		return "Test stop due to exceeding threshold";
+	case TEST_RET_ABORT:
+		return "Test err_check return abort";
+	case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+		return "Test err_check return partial followed by success";
+	case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+		return "Test err_check return partial followed by abort";
+	case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+		return "Test err_check return partial multiple until success";
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		return "Test err_check return partial max fail index";
+	case TEST_RET_RETRY:
+		return "Test err_check return retry";
+	case TEST_RET_CMD_ERR:
+		return "Test err_check return cmd error";
+	case TEST_RET_DATA_ERR:
+		return "Test err_check return data error";
+	case TEST_HDR_INVALID_VERSION:
+		return "Test invalid - wrong header version";
+	case TEST_HDR_WRONG_WRITE_CODE:
+		return "Test invalid - wrong write code";
+	case TEST_HDR_INVALID_RW_CODE:
+		return "Test invalid - wrong R/W code";
+	case TEST_HDR_DIFFERENT_ADDRESSES:
+		return "Test invalid - header different addresses";
+	case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+		return "Test invalid - header req num smaller than actual";
+	case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+		return "Test invalid - header req num larger than actual";
+	case TEST_HDR_CMD23_PACKED_BIT_SET:
+		return "Test invalid - header cmd23 packed bit set";
+	case TEST_CMD23_MAX_PACKED_WRITES:
+		return "Test invalid - cmd23 max packed writes";
+	case TEST_CMD23_ZERO_PACKED_WRITES:
+		return "Test invalid - cmd23 zero packed writes";
+	case TEST_CMD23_PACKED_BIT_UNSET:
+		return "Test invalid - cmd23 packed bit unset";
+	case TEST_CMD23_REL_WR_BIT_SET:
+		return "Test invalid - cmd23 rel wr bit set";
+	case TEST_CMD23_BITS_16TO29_SET:
+		return "Test invalid - cmd23 bits [16-29] set";
+	case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+		return "Test invalid - cmd23 header block not in count";
+	case TEST_PACKING_EXP_N_OVER_TRIGGER:
+		return "\nTest packing control - pack n";
+	case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+		return "\nTest packing control - pack n followed by read";
+	case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+		return "\nTest packing control - pack n followed by flush";
+	case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+		return "\nTest packing control - pack one followed by read";
+	case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+		return "\nTest packing control - pack threshold";
+	case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+		return "\nTest packing control - no packing";
+	case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+		return "\nTest packing control - no packing, trigger requests";
+	case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+		return "\nTest packing control - no pack, trigger-read-trigger";
+	case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+		return "\nTest packing control- no pack, trigger-flush-trigger";
+	case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+		return "\nTest packing control - mix: pack -> no pack -> pack";
+	case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+		return "\nTest packing control - mix: no pack->pack->no pack";
+	default:
+		 return "Unknown testcase";
+	}
+
+	return NULL;
+}
+
+/*
+ * Compare individual testcase's statistics to the expected statistics:
+ * Compare stop reason and number of packing events
+ */
+static int check_wr_packing_statistics(struct test_data *td)
+{
+	struct mmc_wr_pack_stats *mmc_packed_stats;
+	struct mmc_queue *mq = td->req_q->queuedata;
+	int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+	int i;
+	struct mmc_card *card = mq->card;
+	struct mmc_wr_pack_stats expected_stats;
+	int *stop_reason;
+	int ret = 0;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	expected_stats = mbtd->exp_packed_stats;
+
+	mmc_packed_stats = mmc_blk_get_packed_statistics(card);
+	if (!mmc_packed_stats) {
+		test_pr_err("%s: NULL mmc_packed_stats", __func__);
+		return -EINVAL;
+	}
+
+	if (!mmc_packed_stats->packing_events) {
+		test_pr_err("%s: NULL packing_events", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock(&mmc_packed_stats->lock);
+
+	if (!mmc_packed_stats->enabled) {
+		test_pr_err("%s write packing statistics are not enabled",
+			     __func__);
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	stop_reason = mmc_packed_stats->pack_stop_reason;
+
+	for (i = 1; i <= max_packed_reqs; ++i) {
+		if (mmc_packed_stats->packing_events[i] !=
+		    expected_stats.packing_events[i]) {
+			test_pr_err(
+			"%s: Wrong pack stats in index %d, got %d, expected %d",
+			__func__, i, mmc_packed_stats->packing_events[i],
+			       expected_stats.packing_events[i]);
+			if (td->fs_wr_reqs_during_test)
+				goto cancel_round;
+			ret = -EINVAL;
+			goto exit_err;
+		}
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
+	    expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
+			__func__, stop_reason[EXCEEDS_SEGMENTS],
+		       expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
+	    expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
+			__func__, stop_reason[EXCEEDS_SECTORS],
+		       expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
+	    expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
+		       __func__, stop_reason[WRONG_DATA_DIR],
+		       expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
+	    expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
+		       __func__, stop_reason[FLUSH_OR_DISCARD],
+		       expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
+	    expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
+		       __func__, stop_reason[EMPTY_QUEUE],
+		       expected_stats.pack_stop_reason[EMPTY_QUEUE]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
+	    expected_stats.pack_stop_reason[REL_WRITE]) {
+		test_pr_err(
+			"%s: Wrong pack stop reason REL_WRITE %d, expected %d",
+		       __func__, stop_reason[REL_WRITE],
+		       expected_stats.pack_stop_reason[REL_WRITE]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+exit_err:
+	spin_unlock(&mmc_packed_stats->lock);
+	if (ret && mmc_packed_stats->enabled)
+		print_mmc_packing_stats(card);
+	return ret;
+cancel_round:
+	spin_unlock(&mmc_packed_stats->lock);
+	test_iosched_set_ignore_round(true);
+	return 0;
+}
+
+/*
+ * Pseudo-randomly choose a seed based on the last seed, and update it in
+ * seed_number. then return seed_number (mod max_val), or min_val.
+ */
+static unsigned int pseudo_random_seed(unsigned int *seed_number,
+				       unsigned int min_val,
+				       unsigned int max_val)
+{
+	int ret = 0;
+
+	if (!seed_number)
+		return 0;
+
+	*seed_number = ((unsigned int)(((unsigned long)*seed_number *
+				(unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
+	ret = (unsigned int)((*seed_number) % max_val);
+
+	return (ret > min_val ? ret : min_val);
+}
+
+/*
+ * Given a pseudo-random seed, find a pseudo-random num_of_bios.
+ * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
+ */
+static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
+				   unsigned int *num_of_bios)
+{
+	do {
+		*num_of_bios = pseudo_random_seed(num_bios_seed, 1,
+						  TEST_MAX_BIOS_PER_REQ);
+		if (!(*num_of_bios))
+			*num_of_bios = 1;
+	} while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
+}
+
+/* Add a single read request to the given td's request queue */
+static int prepare_request_add_read(struct test_data *td)
+{
+	int ret;
+	int start_sec;
+
+	if (td)
+		start_sec = td->start_sector;
+	else {
+		test_pr_err("%s: NULL td", __func__);
+		return 0;
+	}
+
+	test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
+		     td->wr_rd_next_req_id);
+
+	ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
+					      TEST_PATTERN_5A, NULL);
+	if (ret) {
+		test_pr_err("%s: failed to add a read request", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Add a single flush request to the given td's request queue */
+static int prepare_request_add_flush(struct test_data *td)
+{
+	int ret;
+
+	if (!td) {
+		test_pr_err("%s: NULL td", __func__);
+		return 0;
+	}
+
+	test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
+		     td->unique_next_req_id);
+	ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
+				  0, 0, NULL);
+	if (ret) {
+		test_pr_err("%s: failed to add a flush request", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * Add num_requets amount of write requests to the given td's request queue.
+ * If random test mode is chosen we pseudo-randomly choose the number of bios
+ * for each write request, otherwise add between 1 to 5 bio per request.
+ */
+static int prepare_request_add_write_reqs(struct test_data *td,
+					  int num_requests, int is_err_expected,
+					  int is_random)
+{
+	int i;
+	unsigned int start_sec;
+	int num_bios;
+	int ret = 0;
+	unsigned int *bio_seed = &mbtd->random_test_seed;
+
+	if (td)
+		start_sec = td->start_sector;
+	else {
+		test_pr_err("%s: NULL td", __func__);
+		return ret;
+	}
+
+	test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
+		     num_requests, td->wr_rd_next_req_id);
+
+	for (i = 1; i <= num_requests; i++) {
+		start_sec = td->start_sector + 4096 * td->num_of_write_bios;
+		if (is_random)
+			pseudo_rnd_num_of_bios(bio_seed, &num_bios);
+		else
+			/*
+			 * For the non-random case, give num_bios a value
+			 * between 1 and 5, to keep a small number of BIOs
+			 */
+			num_bios = (i%5)+1;
+
+		ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+				start_sec, num_bios, TEST_PATTERN_5A, NULL);
+
+		if (ret) {
+			test_pr_err("%s: failed to add a write request",
+				    __func__);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for a generic packed commands
+ * testcase
+ */
+static int prepare_packed_requests(struct test_data *td, int is_err_expected,
+				   int num_requests, int is_random)
+{
+	int ret = 0;
+	struct mmc_queue *mq;
+	int max_packed_reqs;
+	struct request_queue *req_q;
+
+	if (!td) {
+		pr_err("%s: NULL td", __func__);
+		return -EINVAL;
+	}
+
+	req_q = td->req_q;
+
+	if (!req_q) {
+		pr_err("%s: NULL request queue", __func__);
+		return -EINVAL;
+	}
+
+	mq = req_q->queuedata;
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	if (mbtd->random_test_seed <= 0) {
+		mbtd->random_test_seed =
+			(unsigned int)(get_jiffies_64() & 0xFFFF);
+		test_pr_info("%s: got seed from jiffies %d",
+			     __func__, mbtd->random_test_seed);
+	}
+
+	mmc_blk_init_packed_statistics(mq->card);
+
+	ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+					     is_random);
+	if (ret)
+		return ret;
+
+	/* Avoid memory corruption in upcoming stats set */
+	if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
+		num_requests--;
+
+	memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+		sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+	memset(mbtd->exp_packed_stats.packing_events, 0,
+		(max_packed_reqs + 1) * sizeof(u32));
+	if (num_requests <= max_packed_reqs)
+		mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+
+	switch (td->test_info.testcase) {
+	case TEST_STOP_DUE_TO_FLUSH:
+	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+		ret = prepare_request_add_flush(td);
+		if (ret)
+			return ret;
+
+		mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+		break;
+	case TEST_STOP_DUE_TO_READ:
+	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+		ret = prepare_request_add_read(td);
+		if (ret)
+			return ret;
+
+		mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+		break;
+	case TEST_STOP_DUE_TO_THRESHOLD:
+		mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+		mbtd->exp_packed_stats.packing_events[1] = 1;
+		mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+		mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+		break;
+	case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+		break;
+	default:
+		mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+	}
+	mbtd->num_requests = num_requests;
+
+	return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for the packing control
+ * testcases
+ */
+static int prepare_packed_control_tests_requests(struct test_data *td,
+			int is_err_expected, int num_requests, int is_random)
+{
+	int ret = 0;
+	struct mmc_queue *mq;
+	int max_packed_reqs;
+	int temp_num_req = num_requests;
+	struct request_queue *req_q;
+	int test_packed_trigger;
+	int num_packed_reqs;
+
+	if (!td) {
+		test_pr_err("%s: NULL td\n", __func__);
+		return -EINVAL;
+	}
+
+	req_q = td->req_q;
+
+	if (!req_q) {
+		test_pr_err("%s: NULL request queue\n", __func__);
+		return -EINVAL;
+	}
+
+	mq = req_q->queuedata;
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+	test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+	num_packed_reqs = num_requests - test_packed_trigger;
+
+	if (mbtd->random_test_seed == 0) {
+		mbtd->random_test_seed =
+			(unsigned int)(get_jiffies_64() & 0xFFFF);
+		test_pr_info("%s: got seed from jiffies %d",
+			     __func__, mbtd->random_test_seed);
+	}
+
+	mmc_blk_init_packed_statistics(mq->card);
+
+	if (td->test_info.testcase ==
+			TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
+		temp_num_req = num_requests;
+		num_requests = test_packed_trigger - 1;
+	}
+
+	/* Verify that the packing is disabled before starting the test */
+	mq->wr_packing_enabled = false;
+	mq->num_of_potential_packed_wr_reqs = 0;
+
+	if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
+		mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
+		mq->wr_packing_enabled = true;
+		num_requests = test_packed_trigger + 2;
+	}
+
+	ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+					     is_random);
+	if (ret)
+		goto exit;
+
+	if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
+		num_requests = temp_num_req;
+
+	memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+		sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+	memset(mbtd->exp_packed_stats.packing_events, 0,
+		(max_packed_reqs + 1) * sizeof(u32));
+
+	switch (td->test_info.testcase) {
+	case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+	case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+		ret = prepare_request_add_read(td);
+		if (ret)
+			goto exit;
+
+		mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+		mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+		break;
+	case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+		ret = prepare_request_add_flush(td);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_write_reqs(td, num_packed_reqs,
+					     is_err_expected, is_random);
+		if (ret)
+			goto exit;
+
+		mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+		mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+		mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
+		break;
+	case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+		ret = prepare_request_add_read(td);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+						    is_err_expected, is_random);
+		if (ret)
+			goto exit;
+
+		mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+		break;
+	case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+		ret = prepare_request_add_flush(td);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+						    is_err_expected, is_random);
+		if (ret)
+			goto exit;
+
+		mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+		break;
+	case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+		ret = prepare_request_add_read(td);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+						    is_err_expected, is_random);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_write_reqs(td, num_requests,
+						    is_err_expected, is_random);
+		if (ret)
+			goto exit;
+
+		mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+		mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
+		mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+		mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+		break;
+	case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+		ret = prepare_request_add_read(td);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_write_reqs(td, num_requests,
+						    is_err_expected, is_random);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_read(td);
+		if (ret)
+			goto exit;
+
+		ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+						    is_err_expected, is_random);
+		if (ret)
+			goto exit;
+
+		mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+		mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+		break;
+	case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+	case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+		mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+		break;
+	default:
+		mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+		mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+	}
+	mbtd->num_requests = num_requests;
+
+exit:
+	return ret;
+}
+
+/*
+ * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
+ * In this testcase we have mixed error expectations from different
+ * write requests, hence the special prepare function.
+ */
+static int prepare_partial_followed_by_abort(struct test_data *td,
+					      int num_requests)
+{
+	int i, start_address;
+	int is_err_expected = 0;
+	int ret = 0;
+	struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+	int max_packed_reqs;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	mmc_blk_init_packed_statistics(mq->card);
+
+	for (i = 1; i <= num_requests; i++) {
+		if (i > (num_requests / 2))
+			is_err_expected = 1;
+
+		start_address = td->start_sector + 4096 * td->num_of_write_bios;
+		ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+				start_address, (i % 5) + 1, TEST_PATTERN_5A,
+				NULL);
+		if (ret) {
+			test_pr_err("%s: failed to add a write request",
+				    __func__);
+			return ret;
+		}
+	}
+
+	memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
+		sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+	memset(mbtd->exp_packed_stats.packing_events, 0,
+		(max_packed_reqs + 1) * sizeof(u32));
+	mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+	mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+
+	mbtd->num_requests = num_requests;
+
+	return ret;
+}
+
+/*
+ * Get number of write requests for current testcase. If random test mode was
+ * chosen, pseudo-randomly choose the number of requests, otherwise set to
+ * two less than the packing threshold.
+ */
+static int get_num_requests(struct test_data *td)
+{
+	int *seed = &mbtd->random_test_seed;
+	struct request_queue *req_q;
+	struct mmc_queue *mq;
+	int max_num_requests;
+	int num_requests;
+	int min_num_requests = 2;
+	int is_random = mbtd->is_random;
+	int max_for_double;
+	int test_packed_trigger;
+
+	req_q = test_iosched_get_req_queue();
+	if (req_q)
+		mq = req_q->queuedata;
+	else {
+		test_pr_err("%s: NULL request queue", __func__);
+		return 0;
+	}
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_num_requests = mq->card->ext_csd.max_packed_writes;
+	num_requests = max_num_requests - 2;
+	test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+
+	/*
+	 * Here max_for_double is intended for packed control testcases
+	 * in which we issue many write requests. It's purpose is to prevent
+	 * exceeding max number of req_queue requests.
+	 */
+	max_for_double = max_num_requests - 10;
+
+	if (td->test_info.testcase ==
+				TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+		/* Don't expect packing, so issue up to trigger-1 reqs */
+		num_requests = test_packed_trigger - 1;
+
+	if (is_random) {
+		if (td->test_info.testcase ==
+		    TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
+			/*
+			 * Here we don't want num_requests to be less than 1
+			 * as a consequence of division by 2.
+			 */
+			min_num_requests = 3;
+
+		if (td->test_info.testcase ==
+				TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+			/* Don't expect packing, so issue up to trigger reqs */
+			max_num_requests = test_packed_trigger;
+
+		num_requests = pseudo_random_seed(seed, min_num_requests,
+						  max_num_requests - 1);
+	}
+
+	if (td->test_info.testcase ==
+				TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+		num_requests -= test_packed_trigger;
+
+	if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
+		num_requests =
+		num_requests > max_for_double ? max_for_double : num_requests;
+
+	if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
+		num_requests += test_packed_trigger;
+
+	if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
+		num_requests = test_packed_trigger;
+
+	return num_requests;
+}
+
+/*
+ * An implementation for the prepare_test_fn pointer in the test_info
+ * data structure. According to the testcase we add the right number of requests
+ * and decide if an error is expected or not.
+ */
+static int prepare_test(struct test_data *td)
+{
+	struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+	int max_num_requests;
+	int num_requests = 0;
+	int ret = 0;
+	int is_random = mbtd->is_random;
+	int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_num_requests = mq->card->ext_csd.max_packed_writes;
+
+	if (is_random && mbtd->random_test_seed == 0) {
+		mbtd->random_test_seed =
+			(unsigned int)(get_jiffies_64() & 0xFFFF);
+		test_pr_info("%s: got seed from jiffies %d",
+			__func__, mbtd->random_test_seed);
+	}
+
+	num_requests = get_num_requests(td);
+
+	if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
+		mq->packed_test_fn =
+				test_invalid_packed_cmd;
+
+	if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
+		mq->err_check_fn = test_err_check;
+
+	switch (td->test_info.testcase) {
+	case TEST_STOP_DUE_TO_FLUSH:
+	case TEST_STOP_DUE_TO_READ:
+	case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+	case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+	case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+	case TEST_CMD23_PACKED_BIT_UNSET:
+		ret = prepare_packed_requests(td, 0, num_requests, is_random);
+		break;
+	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+		ret = prepare_packed_requests(td, 0, max_num_requests - 1,
+					      is_random);
+		break;
+	case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+		ret = prepare_partial_followed_by_abort(td, num_requests);
+		break;
+	case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		ret = prepare_packed_requests(td, 0, max_num_requests,
+					      is_random);
+		break;
+	case TEST_STOP_DUE_TO_THRESHOLD:
+		ret = prepare_packed_requests(td, 0, max_num_requests + 1,
+					      is_random);
+		break;
+	case TEST_RET_ABORT:
+	case TEST_RET_RETRY:
+	case TEST_RET_CMD_ERR:
+	case TEST_RET_DATA_ERR:
+	case TEST_HDR_INVALID_VERSION:
+	case TEST_HDR_WRONG_WRITE_CODE:
+	case TEST_HDR_INVALID_RW_CODE:
+	case TEST_HDR_DIFFERENT_ADDRESSES:
+	case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+	case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+	case TEST_CMD23_MAX_PACKED_WRITES:
+	case TEST_CMD23_ZERO_PACKED_WRITES:
+	case TEST_CMD23_REL_WR_BIT_SET:
+	case TEST_CMD23_BITS_16TO29_SET:
+	case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+	case TEST_HDR_CMD23_PACKED_BIT_SET:
+		ret = prepare_packed_requests(td, 1, num_requests, is_random);
+		break;
+	case TEST_PACKING_EXP_N_OVER_TRIGGER:
+	case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+	case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+	case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+	case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+	case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+		ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+			is_random);
+		break;
+	case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+		ret = prepare_packed_control_tests_requests(td, 0,
+			max_num_requests, is_random);
+		break;
+	case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+		ret = prepare_packed_control_tests_requests(td, 0,
+			test_packed_trigger + 1,
+					is_random);
+		break;
+	case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+		ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+			is_random);
+		break;
+	case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+	case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+		ret = prepare_packed_control_tests_requests(td, 0,
+			test_packed_trigger, is_random);
+		break;
+	default:
+		test_pr_info("%s: Invalid test case...", __func__);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * An implementation for the post_test_fn in the test_info data structure.
+ * In our case we just reset the function pointers in the mmc_queue in order for
+ * the FS to be able to dispatch it's requests correctly after the test is
+ * finished.
+ */
+static int post_test(struct test_data *td)
+{
+	struct mmc_queue *mq;
+
+	if (!td)
+		return -EINVAL;
+
+	mq = td->req_q->queuedata;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	mq->packed_test_fn = NULL;
+	mq->err_check_fn = NULL;
+
+	return 0;
+}
+
+/*
+ * This function checks, based on the current test's test_group, that the
+ * packed commands capability and control are set right. In addition, we check
+ * if the card supports the packed command feature.
+ */
+static int validate_packed_commands_settings(void)
+{
+	struct request_queue *req_q;
+	struct mmc_queue *mq;
+	int max_num_requests;
+	struct mmc_host *host;
+
+	req_q = test_iosched_get_req_queue();
+	if (!req_q) {
+		test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
+		test_iosched_set_test_result(TEST_FAILED);
+		return -EINVAL;
+	}
+
+	mq = req_q->queuedata;
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_num_requests = mq->card->ext_csd.max_packed_writes;
+	host = mq->card->host;
+
+	if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
+		test_pr_err("%s: Packed Write capability disabled, exit test",
+			    __func__);
+		test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+		return -EINVAL;
+	}
+
+	if (max_num_requests == 0) {
+		test_pr_err(
+		"%s: no write packing support, ext_csd.max_packed_writes=%d",
+		__func__, mq->card->ext_csd.max_packed_writes);
+		test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+		return -EINVAL;
+	}
+
+	test_pr_info("%s: max number of packed requests supported is %d ",
+		     __func__, max_num_requests);
+
+	switch (mbtd->test_group) {
+	case TEST_SEND_WRITE_PACKING_GROUP:
+	case TEST_ERR_CHECK_GROUP:
+	case TEST_SEND_INVALID_GROUP:
+		/* disable the packing control */
+		host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
+		break;
+	case TEST_PACKING_CONTROL_GROUP:
+		host->caps2 |=  MMC_CAP2_PACKED_WR_CONTROL;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static bool message_repeat;
+static int test_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	message_repeat = 1;
+	return 0;
+}
+
+/* send_packing TEST */
+static ssize_t send_write_packing_test_write(struct file *file,
+				const char __user *buf,
+				size_t count,
+				loff_t *ppos)
+{
+	int ret = 0;
+	int i = 0;
+	int number = -1;
+	int j = 0;
+
+	test_pr_info("%s: -- send_write_packing TEST --", __func__);
+
+	sscanf(buf, "%d", &number);
+
+	if (number <= 0)
+		number = 1;
+
+
+	mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
+
+	if (validate_packed_commands_settings())
+		return count;
+
+	if (mbtd->random_test_seed > 0)
+		test_pr_info("%s: Test seed: %d", __func__,
+			      mbtd->random_test_seed);
+
+	memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+	mbtd->test_info.data = mbtd;
+	mbtd->test_info.prepare_test_fn = prepare_test;
+	mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+	mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+	mbtd->test_info.post_test_fn = post_test;
+
+	for (i = 0; i < number; ++i) {
+		test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		test_pr_info("%s: ====================", __func__);
+
+		for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
+		      j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
+
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = NON_RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+		}
+	}
+
+	test_pr_info("%s: Completed all the test cases.", __func__);
+
+	return count;
+}
+
+static ssize_t send_write_packing_test_read(struct file *file,
+			       char __user *buffer,
+			       size_t count,
+			       loff_t *offset)
+{
+	memset((void *)buffer, 0, count);
+
+	snprintf(buffer, count,
+		 "\nsend_write_packing_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test checks the following scenarios\n"
+		 "- Pack due to FLUSH message\n"
+		 "- Pack due to FLUSH after threshold writes\n"
+		 "- Pack due to READ message\n"
+		 "- Pack due to READ after threshold writes\n"
+		 "- Pack due to empty queue\n"
+		 "- Pack due to threshold writes\n"
+		 "- Pack due to one over threshold writes\n");
+
+	if (message_repeat == 1) {
+		message_repeat = 0;
+		return strnlen(buffer, count);
+	} else {
+		return 0;
+	}
+}
+
+const struct file_operations send_write_packing_test_ops = {
+	.open = test_open,
+	.write = send_write_packing_test_write,
+	.read = send_write_packing_test_read,
+};
+
+/* err_check TEST */
+static ssize_t err_check_test_write(struct file *file,
+				const char __user *buf,
+				size_t count,
+				loff_t *ppos)
+{
+	int ret = 0;
+	int i = 0;
+	int number = -1;
+	int j = 0;
+
+	test_pr_info("%s: -- err_check TEST --", __func__);
+
+	sscanf(buf, "%d", &number);
+
+	if (number <= 0)
+		number = 1;
+
+	mbtd->test_group = TEST_ERR_CHECK_GROUP;
+
+	if (validate_packed_commands_settings())
+		return count;
+
+	if (mbtd->random_test_seed > 0)
+		test_pr_info("%s: Test seed: %d", __func__,
+			      mbtd->random_test_seed);
+
+	memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+	mbtd->test_info.data = mbtd;
+	mbtd->test_info.prepare_test_fn = prepare_test;
+	mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+	mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+	mbtd->test_info.post_test_fn = post_test;
+
+	for (i = 0; i < number; ++i) {
+		test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		test_pr_info("%s: ====================", __func__);
+
+		for (j = ERR_CHECK_MIN_TESTCASE;
+					j <= ERR_CHECK_MAX_TESTCASE ; j++) {
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = NON_RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+		}
+	}
+
+	test_pr_info("%s: Completed all the test cases.", __func__);
+
+	return count;
+}
+
+static ssize_t err_check_test_read(struct file *file,
+			       char __user *buffer,
+			       size_t count,
+			       loff_t *offset)
+{
+	memset((void *)buffer, 0, count);
+
+	snprintf(buffer, count,
+		 "\nerr_check_TEST\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test checks the following scenarios\n"
+		 "- Return ABORT\n"
+		 "- Return PARTIAL followed by success\n"
+		 "- Return PARTIAL followed by abort\n"
+		 "- Return PARTIAL multiple times until success\n"
+		 "- Return PARTIAL with fail index = threshold\n"
+		 "- Return RETRY\n"
+		 "- Return CMD_ERR\n"
+		 "- Return DATA_ERR\n");
+
+	if (message_repeat == 1) {
+		message_repeat = 0;
+		return strnlen(buffer, count);
+	} else {
+		return 0;
+	}
+}
+
+const struct file_operations err_check_test_ops = {
+	.open = test_open,
+	.write = err_check_test_write,
+	.read = err_check_test_read,
+};
+
+/* send_invalid_packed TEST */
+static ssize_t send_invalid_packed_test_write(struct file *file,
+				const char __user *buf,
+				size_t count,
+				loff_t *ppos)
+{
+	int ret = 0;
+	int i = 0;
+	int number = -1;
+	int j = 0;
+	int num_of_failures = 0;
+
+	test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
+
+	sscanf(buf, "%d", &number);
+
+	if (number <= 0)
+		number = 1;
+
+	mbtd->test_group = TEST_SEND_INVALID_GROUP;
+
+	if (validate_packed_commands_settings())
+		return count;
+
+	if (mbtd->random_test_seed > 0)
+		test_pr_info("%s: Test seed: %d", __func__,
+			      mbtd->random_test_seed);
+
+	memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+	mbtd->test_info.data = mbtd;
+	mbtd->test_info.prepare_test_fn = prepare_test;
+	mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+	mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+	mbtd->test_info.post_test_fn = post_test;
+
+	for (i = 0; i < number; ++i) {
+		test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		test_pr_info("%s: ====================", __func__);
+
+		for (j = INVALID_CMD_MIN_TESTCASE;
+				j <= INVALID_CMD_MAX_TESTCASE ; j++) {
+
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				num_of_failures++;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = NON_RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				num_of_failures++;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+		}
+	}
+
+	test_pr_info("%s: Completed all the test cases.", __func__);
+
+	if (num_of_failures > 0) {
+		test_iosched_set_test_result(TEST_FAILED);
+		test_pr_err(
+			"There were %d failures during the test, TEST FAILED",
+			num_of_failures);
+	}
+	return count;
+}
+
+static ssize_t send_invalid_packed_test_read(struct file *file,
+			       char __user *buffer,
+			       size_t count,
+			       loff_t *offset)
+{
+	memset((void *)buffer, 0, count);
+
+	snprintf(buffer, count,
+		 "\nsend_invalid_packed_TEST\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test checks the following scenarios\n"
+		 "- Send an invalid header version\n"
+		 "- Send the wrong write code\n"
+		 "- Send an invalid R/W code\n"
+		 "- Send wrong start address in header\n"
+		 "- Send header with block_count smaller than actual\n"
+		 "- Send header with block_count larger than actual\n"
+		 "- Send header CMD23 packed bit set\n"
+		 "- Send CMD23 with block count over threshold\n"
+		 "- Send CMD23 with block_count equals zero\n"
+		 "- Send CMD23 packed bit unset\n"
+		 "- Send CMD23 reliable write bit set\n"
+		 "- Send CMD23 bits [16-29] set\n"
+		 "- Send CMD23 header block not in block_count\n");
+
+	if (message_repeat == 1) {
+		message_repeat = 0;
+		return strnlen(buffer, count);
+	} else {
+		return 0;
+	}
+}
+
+const struct file_operations send_invalid_packed_test_ops = {
+	.open = test_open,
+	.write = send_invalid_packed_test_write,
+	.read = send_invalid_packed_test_read,
+};
+
+/* packing_control TEST */
+static ssize_t write_packing_control_test_write(struct file *file,
+				const char __user *buf,
+				size_t count,
+				loff_t *ppos)
+{
+	int ret = 0;
+	int i = 0;
+	int number = -1;
+	int j = 0;
+	struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+	int max_num_requests = mq->card->ext_csd.max_packed_writes;
+	int test_successful = 1;
+
+	test_pr_info("%s: -- write_packing_control TEST --", __func__);
+
+	sscanf(buf, "%d", &number);
+
+	if (number <= 0)
+		number = 1;
+
+	test_pr_info("%s: max_num_requests = %d ", __func__,
+			max_num_requests);
+
+	memset(&mbtd->test_info, 0, sizeof(struct test_info));
+	mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
+
+	if (validate_packed_commands_settings())
+		return count;
+
+	mbtd->test_info.data = mbtd;
+	mbtd->test_info.prepare_test_fn = prepare_test;
+	mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+	mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+
+	for (i = 0; i < number; ++i) {
+		test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		test_pr_info("%s: ====================", __func__);
+
+		for (j = PACKING_CONTROL_MIN_TESTCASE;
+				j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
+
+			test_successful = 1;
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret) {
+				test_successful = 0;
+				break;
+			}
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = NON_RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret) {
+				test_successful = 0;
+				break;
+			}
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+		}
+
+		if (!test_successful)
+			break;
+	}
+
+	test_pr_info("%s: Completed all the test cases.", __func__);
+
+	return count;
+}
+
+static ssize_t write_packing_control_test_read(struct file *file,
+			       char __user *buffer,
+			       size_t count,
+			       loff_t *offset)
+{
+	memset((void *)buffer, 0, count);
+
+	snprintf(buffer, count,
+		 "\nwrite_packing_control_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test checks the following scenarios\n"
+		 "- Packing expected - one over trigger\n"
+		 "- Packing expected - N over trigger\n"
+		 "- Packing expected - N over trigger followed by read\n"
+		 "- Packing expected - N over trigger followed by flush\n"
+		 "- Packing expected - threshold over trigger FB by flush\n"
+		 "- Packing not expected - less than trigger\n"
+		 "- Packing not expected - trigger requests\n"
+		 "- Packing not expected - trigger, read, trigger\n"
+		 "- Mixed state - packing -> no packing -> packing\n"
+		 "- Mixed state - no packing -> packing -> no packing\n");
+
+	if (message_repeat == 1) {
+		message_repeat = 0;
+		return strnlen(buffer, count);
+	} else {
+		return 0;
+	}
+}
+
+const struct file_operations write_packing_control_test_ops = {
+	.open = test_open,
+	.write = write_packing_control_test_write,
+	.read = write_packing_control_test_read,
+};
+
+static void mmc_block_test_debugfs_cleanup(void)
+{
+	debugfs_remove(mbtd->debug.random_test_seed);
+	debugfs_remove(mbtd->debug.send_write_packing_test);
+	debugfs_remove(mbtd->debug.err_check_test);
+	debugfs_remove(mbtd->debug.send_invalid_packed_test);
+	debugfs_remove(mbtd->debug.packing_control_test);
+}
+
+static int mmc_block_test_debugfs_init(void)
+{
+	struct dentry *utils_root, *tests_root;
+
+	utils_root = test_iosched_get_debugfs_utils_root();
+	tests_root = test_iosched_get_debugfs_tests_root();
+
+	if (!utils_root || !tests_root)
+		return -EINVAL;
+
+	mbtd->debug.random_test_seed = debugfs_create_u32(
+					"random_test_seed",
+					S_IRUGO | S_IWUGO,
+					utils_root,
+					&mbtd->random_test_seed);
+
+	if (!mbtd->debug.random_test_seed)
+		goto err_nomem;
+
+	mbtd->debug.send_write_packing_test =
+		debugfs_create_file("send_write_packing_test",
+				    S_IRUGO | S_IWUGO,
+				    tests_root,
+				    NULL,
+				    &send_write_packing_test_ops);
+
+	if (!mbtd->debug.send_write_packing_test)
+		goto err_nomem;
+
+	mbtd->debug.err_check_test =
+		debugfs_create_file("err_check_test",
+				    S_IRUGO | S_IWUGO,
+				    tests_root,
+				    NULL,
+				    &err_check_test_ops);
+
+	if (!mbtd->debug.err_check_test)
+		goto err_nomem;
+
+	mbtd->debug.send_invalid_packed_test =
+		debugfs_create_file("send_invalid_packed_test",
+				    S_IRUGO | S_IWUGO,
+				    tests_root,
+				    NULL,
+				    &send_invalid_packed_test_ops);
+
+	if (!mbtd->debug.send_invalid_packed_test)
+		goto err_nomem;
+
+	mbtd->debug.packing_control_test = debugfs_create_file(
+					"packing_control_test",
+					S_IRUGO | S_IWUGO,
+					tests_root,
+					NULL,
+					&write_packing_control_test_ops);
+
+	if (!mbtd->debug.packing_control_test)
+		goto err_nomem;
+
+	return 0;
+
+err_nomem:
+	mmc_block_test_debugfs_cleanup();
+	return -ENOMEM;
+}
+
+static void mmc_block_test_probe(void)
+{
+	struct request_queue *q = test_iosched_get_req_queue();
+	struct mmc_queue *mq;
+	int max_packed_reqs;
+
+	if (!q) {
+		test_pr_err("%s: NULL request queue", __func__);
+		return;
+	}
+
+	mq = q->queuedata;
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+	mbtd->exp_packed_stats.packing_events =
+			kzalloc((max_packed_reqs + 1) *
+				sizeof(*mbtd->exp_packed_stats.packing_events),
+				GFP_KERNEL);
+
+	mmc_block_test_debugfs_init();
+}
+
+static void mmc_block_test_remove(void)
+{
+	mmc_block_test_debugfs_cleanup();
+}
+
+static int __init mmc_block_test_init(void)
+{
+	mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
+	if (!mbtd) {
+		test_pr_err("%s: failed to allocate mmc_block_test_data",
+			    __func__);
+		return -ENODEV;
+	}
+
+	mbtd->bdt.init_fn = mmc_block_test_probe;
+	mbtd->bdt.exit_fn = mmc_block_test_remove;
+	INIT_LIST_HEAD(&mbtd->bdt.list);
+	test_iosched_register(&mbtd->bdt);
+
+	return 0;
+}
+
+static void __exit mmc_block_test_exit(void)
+{
+	test_iosched_unregister(&mbtd->bdt);
+	kfree(mbtd);
+}
+
+module_init(mmc_block_test_init);
+module_exit(mmc_block_test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMC block test");
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 93e4b59..a8c104e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,17 @@
 	struct mmc_data		data;
 };
 
+enum mmc_blk_status {
+	MMC_BLK_SUCCESS = 0,
+	MMC_BLK_PARTIAL,
+	MMC_BLK_CMD_ERR,
+	MMC_BLK_RETRY,
+	MMC_BLK_ABORT,
+	MMC_BLK_DATA_ERR,
+	MMC_BLK_ECC_ERR,
+	MMC_BLK_NOMEDIUM,
+};
+
 enum mmc_packed_cmd {
 	MMC_PACKED_NONE = 0,
 	MMC_PACKED_WRITE,
@@ -48,6 +59,8 @@
 	bool			wr_packing_enabled;
 	int			num_of_potential_packed_wr_reqs;
 	int			num_wr_reqs_to_start_packing;
+	int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *);
+	void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *);
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -61,4 +74,6 @@
 extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
 extern void mmc_queue_bounce_post(struct mmc_queue_req *);
 
+extern void print_mmc_packing_stats(struct mmc_card *card);
+
 #endif
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index 8809abe..ebe3ad06 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -439,7 +439,7 @@
 	int			i, pwm_size, rc = 0;
 	int			burst_size = SPMI_MAX_BUF_LEN;
 	int			list_len = lut->list_len << 1;
-	int			offset = lut->lo_index << 2;
+	int			offset = lut->lo_index << 1;
 
 	pwm_size = QPNP_GET_PWM_SIZE(
 			chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) &
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index a9b0dbb..23903df 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -640,6 +640,26 @@
 		raw->last_good_ocv_raw -= MBG_TRANSIENT_ERROR_RAW;
 }
 
+#define SEL_ALT_OREG_BIT  BIT(2)
+static int ocv_ir_compensation(struct pm8921_bms_chip *chip, int ocv)
+{
+	int compensated_ocv;
+	int ibatt_ua;
+	int rbatt_mohm = chip->default_rbatt_mohm + chip->rconn_mohm;
+
+	pm_bms_masked_write(chip, BMS_TEST1,
+			SEL_ALT_OREG_BIT, SEL_ALT_OREG_BIT);
+
+	/* since the SEL_ALT_OREG_BIT is set this will give us VSENSE_OCV */
+	pm8921_bms_get_battery_current(&ibatt_ua);
+	compensated_ocv = ocv + div_s64((s64)ibatt_ua * rbatt_mohm, 1000);
+	pr_debug("comp ocv = %d, ocv = %d, ibatt_ua = %d, rbatt_mohm = %d\n",
+			compensated_ocv, ocv, ibatt_ua, rbatt_mohm);
+
+	pm_bms_masked_write(chip, BMS_TEST1, SEL_ALT_OREG_BIT, 0);
+	return compensated_ocv;
+}
+
 static int read_soc_params_raw(struct pm8921_bms_chip *chip,
 				struct pm8921_soc_params *raw)
 {
@@ -662,6 +682,8 @@
 		adjust_pon_ocv_raw(chip, raw);
 		convert_vbatt_raw_to_uv(chip, usb_chg,
 			raw->last_good_ocv_raw, &raw->last_good_ocv_uv);
+		raw->last_good_ocv_uv = ocv_ir_compensation(chip,
+						raw->last_good_ocv_uv);
 		chip->last_ocv_uv = raw->last_good_ocv_uv;
 		pr_debug("PON_OCV_UV = %d\n", chip->last_ocv_uv);
 	} else if (chip->prev_last_good_ocv_raw != raw->last_good_ocv_raw) {
@@ -1012,7 +1034,7 @@
 	 * samples with the the shutdown_iavg_ua
 	 */
 	if (firsttime && chip->shutdown_iavg_ua != 0) {
-		pr_emerg("Using shutdown_iavg_ua = %d in all samples\n",
+		pr_debug("Using shutdown_iavg_ua = %d in all samples\n",
 				chip->shutdown_iavg_ua);
 		for (i = 0; i < IAVG_SAMPLES; i++)
 			iavg_samples[i] = chip->shutdown_iavg_ua;
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 0ace679..0b0ebe7 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -2574,6 +2574,7 @@
 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
 	int retval = 0;
 	unsigned long flags;
+	unsigned mult = 0;
 
 	trace("ep = %p, desc = %p", ep, desc);
 
@@ -2599,13 +2600,15 @@
 
 	mEp->qh.ptr->cap = 0;
 
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
 		mEp->qh.ptr->cap |=  QH_IOS;
-	else if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
+	} else if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
 		mEp->qh.ptr->cap &= ~QH_MULT;
-		mEp->qh.ptr->cap |= BIT(30);
-	} else
+		mult = ((mEp->ep.maxpacket >> QH_MULT_SHIFT) + 1) & 0x03;
+		mEp->qh.ptr->cap |= (mult << ffs_nr(QH_MULT));
+	} else {
 		mEp->qh.ptr->cap |= QH_ZLT;
+	}
 
 	mEp->qh.ptr->cap |=
 		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 3162e15..d67ad7c 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -67,6 +67,7 @@
 #define QH_MAX_PKT            (0x07FFUL << 16)
 #define QH_ZLT                BIT(29)
 #define QH_MULT               (0x0003UL << 30)
+#define QH_MULT_SHIFT         11
 	/* 1 */
 	u32 curr;
 	/* 2 - 8 */
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
index 108caf9..0353848 100644
--- a/drivers/usb/gadget/f_accessory.c
+++ b/drivers/usb/gadget/f_accessory.c
@@ -40,7 +40,7 @@
 #define BULK_BUFFER_SIZE    16384
 #define ACC_STRING_SIZE     256
 
-#define PROTOCOL_VERSION    1
+#define PROTOCOL_VERSION    2
 
 /* String IDs */
 #define INTERFACE_STRING_INDEX	0
@@ -78,6 +78,8 @@
 	/* set to 1 if we have a pending start request */
 	int start_requested;
 
+	int audio_mode;
+
 	/* synchronize access to our device file */
 	atomic_t open_excl;
 
@@ -510,6 +512,8 @@
 		break;
 	case ACCESSORY_IS_START_REQUESTED:
 		return dev->start_requested;
+	case ACCESSORY_GET_AUDIO_MODE:
+		return dev->audio_mode;
 	}
 	if (!src)
 		return -EINVAL;
@@ -586,6 +590,10 @@
 			cdev->gadget->ep0->driver_data = dev;
 			cdev->req->complete = acc_complete_set_string;
 			value = w_length;
+		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+				w_index == 0 && w_length == 0) {
+			dev->audio_mode = w_value;
+			value = 0;
 		}
 	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
 		if (b_request == ACCESSORY_GET_PROTOCOL) {
@@ -600,6 +608,7 @@
 			memset(dev->uri, 0, sizeof(dev->uri));
 			memset(dev->serial, 0, sizeof(dev->serial));
 			dev->start_requested = 0;
+			dev->audio_mode = 0;
 		}
 	}
 
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index 96790c5..ccbc330 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -463,6 +463,10 @@
 	if (count > MTP_BULK_BUFFER_SIZE)
 		return -EINVAL;
 
+	if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
+		DBG(cdev, "%s - count(%d) not multiple of mtu(%d)\n", __func__,
+						count, dev->ep_out->maxpacket);
+
 	/* we will block until we're online */
 	DBG(cdev, "mtp_read: waiting for online state\n");
 	ret = wait_event_interruptible(dev->read_wq,
@@ -484,7 +488,7 @@
 requeue_req:
 	/* queue a request */
 	req = dev->rx_req[0];
-	req->length = count;
+	req->length = MTP_BULK_BUFFER_SIZE;
 	dev->rx_done = 0;
 	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
 	if (ret < 0) {
@@ -751,6 +755,9 @@
 	count = dev->xfer_file_length;
 
 	DBG(cdev, "receive_file_work(%lld)\n", count);
+	if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
+		DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
+						count, dev->ep_out->maxpacket);
 
 	while (count > 0 || write_req) {
 		if (count > 0) {
@@ -758,8 +765,9 @@
 			read_req = dev->rx_req[cur_buf];
 			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
 
-			read_req->length = (count > MTP_BULK_BUFFER_SIZE
-					? MTP_BULK_BUFFER_SIZE : count);
+			/* some h/w expects size to be aligned to ep's MTU */
+			read_req->length = MTP_BULK_BUFFER_SIZE;
+
 			dev->rx_done = 0;
 			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
 			if (ret < 0) {
@@ -795,6 +803,10 @@
 					usb_ep_dequeue(dev->ep_out, read_req);
 				break;
 			}
+			/* Check if we aligned the size due to MTU constraint */
+			if (count < read_req->length)
+				read_req->actual = (read_req->actual > count ?
+						count : read_req->actual);
 			/* if xfer_file_length is 0xFFFFFFFF, then we read until
 			 * we get a zero length packet
 			 */
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index 3f4e428..b408bfd 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -516,6 +516,15 @@
 {
 	struct usb_info *ui = ept->ui;
 	unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT;
+	const struct usb_endpoint_descriptor *desc = ept->ep.desc;
+	unsigned mult = 0;
+
+	if (desc && ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				== USB_ENDPOINT_XFER_ISOC)) {
+		cfg &= ~(CONFIG_MULT);
+		mult = ((ept->ep.maxpacket >> CONFIG_MULT_SHIFT) + 1) & 0x03;
+		cfg |= (mult << (ffs(CONFIG_MULT) - 1));
+	}
 
 	/* ep0 out needs interrupt-on-setup */
 	if (ept->bit == 0)
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index f8ccee9..946a9d7 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -2956,6 +2956,7 @@
 				mdp_clk_ctrl(0);
 				return -ENODEV;
 			}
+			mdp4_wfd_init(0);
 			pdata->on = mdp4_overlay_writeback_on;
 			pdata->off = mdp4_overlay_writeback_off;
 			mfd->dma_fnc = mdp4_writeback_overlay;
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 67a0429..a73b3ad 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -393,6 +393,7 @@
 	ulong intr_dsi_err;
 	ulong kickoff_ov0;
 	ulong kickoff_ov1;
+	ulong kickoff_ov2;
 	ulong kickoff_dmap;
 	ulong kickoff_dmae;
 	ulong kickoff_dmas;
@@ -920,7 +921,7 @@
 int mdp4_overlay_writeback_on(struct platform_device *pdev);
 int mdp4_overlay_writeback_off(struct platform_device *pdev);
 void mdp4_writeback_overlay(struct msm_fb_data_type *mfd);
-void mdp4_overlay1_done_writeback(struct mdp_dma_data *dma);
+void mdp4_overlay2_done_wfd(struct mdp_dma_data *dma);
 
 int mdp4_writeback_start(struct fb_info *info);
 int mdp4_writeback_stop(struct fb_info *info);
@@ -970,19 +971,17 @@
 u32 mdp4_get_mixer_num(u32 panel_type);
 
 #ifndef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
-static inline void mdp4_writeback_dma_busy_wait(struct msm_fb_data_type *mfd)
+static inline void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
 {
 	/* empty */
 }
-static inline void mdp4_writeback_kickoff_video(struct msm_fb_data_type *mfd,
-		struct mdp4_overlay_pipe *pipe)
+static inline void mdp4_wfd_init(int cndx)
 {
 	/* empty */
 }
 #else
-void mdp4_writeback_dma_busy_wait(struct msm_fb_data_type *mfd);
-void mdp4_writeback_kickoff_video(struct msm_fb_data_type *mfd,
-		struct mdp4_overlay_pipe *pipe);
+void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_wfd_init(int cndx);
 #endif
 
 #endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 6faf3e7..ab27267 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -3463,7 +3463,6 @@
 
 	mdp4_overlay_mdp_perf_req(mfd, ctrl->plist);
 
-
 	if (pipe->mixer_num == MDP4_MIXER0) {
 		if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
 			/* cndx = 0 */
@@ -3483,23 +3482,9 @@
 		if (ctrl->panel_mode & MDP4_PANEL_DTV)
 			mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
 	} else if (pipe->mixer_num == MDP4_MIXER2) {
-
-		if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
-			mdp4_overlay_vg_setup(pipe);  /* video/graphic pipe */
-		else
-			mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
-
-		mdp4_mixer_stage_up(pipe, 0);
-
 		ctrl->mixer2_played++;
-		if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
-			mdp4_writeback_dma_busy_wait(mfd);
-			mdp4_writeback_kickoff_video(mfd, pipe);
-		}
-
-		if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
-			mdp4_iommu_unmap(pipe);
-		mdp4_stat.overlay_play[pipe->mixer_num]++;
+		if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK)
+			mdp4_wfd_pipe_queue(0, pipe);/* cndx = 0 */
 	}
 
 	mutex_unlock(&mfd->dma->ov_mutex);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index ee7e9ce..18c6635 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -45,9 +45,49 @@
 	WITH_CLIENT
 };
 
-static struct mdp4_overlay_pipe *writeback_pipe;
-static struct msm_fb_data_type *writeback_mfd;
-static int busy_wait_cnt;
+#define MAX_CONTROLLER	1
+#define VSYNC_EXPIRE_TICK 0
+
+static struct vsycn_ctrl {
+	struct device *dev;
+	int inited;
+	int update_ndx;
+	u32 ov_koff;
+	u32 ov_done;
+	atomic_t suspend;
+	struct mutex update_lock;
+	struct completion ov_comp;
+	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *base_pipe;
+	struct vsync_update vlist[2];
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clrear other interrupts for comamnd mode */
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clrear other interrupts for comamnd mode */
+	mdp_intr_mask &= ~intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_disable_irq_nosync(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd);
 
 int mdp4_overlay_writeback_on(struct platform_device *pdev)
 {
@@ -58,6 +98,8 @@
 	int bpp;
 	int ret;
 	uint32 data;
+	struct vsycn_ctrl *vctrl;
+	int cndx = 0;
 
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
@@ -67,7 +109,9 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	writeback_mfd = mfd;		  /* keep it */
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->mfd = mfd;
+	vctrl->dev = mfd->fbi->dev;
 
 	fbi = mfd->fbi;
 
@@ -77,12 +121,14 @@
 		fbi->var.yoffset * fbi->fix.line_length;
 
 	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	mdp_clk_ctrl(1);
 
-	if (writeback_pipe == NULL) {
+	if (vctrl->base_pipe == NULL) {
 		pipe = mdp4_overlay_pipe_alloc(OVERLAY_TYPE_BF, MDP4_MIXER2);
-		if (pipe == NULL)
+		if (pipe == NULL) {
 			pr_info("%s: pipe_alloc failed\n", __func__);
+			return -EIO;
+		}
 		pipe->pipe_used++;
 		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
 		pipe->mixer_num  = MDP4_MIXER2;
@@ -92,11 +138,12 @@
 		if (ret < 0)
 			pr_info("%s: format2type failed\n", __func__);
 
-		writeback_pipe = pipe; /* keep it */
+		vctrl->base_pipe = pipe; /* keep it */
 
 	} else {
-		pipe = writeback_pipe;
+		pipe = vctrl->base_pipe;
 	}
+
 	ret = panel_next_on(pdev);
 
 	/* MDP_LAYERMIXER_WB_MUX_SEL to use mixer1 axi for mixer2 writeback */
@@ -113,46 +160,68 @@
 	MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5008,
 		(0x0 & 0xFFF));         /* 12-bit R */
 
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_clk_ctrl(0);
 	return ret;
 }
 
 int mdp4_overlay_writeback_off(struct platform_device *pdev)
 {
-	int ret;
-	struct msm_fb_data_type *mfd =
-			(struct msm_fb_data_type *)platform_get_drvdata(pdev);
-	if (mfd && writeback_pipe) {
-		mdp4_writeback_dma_busy_wait(mfd);
-		mdp4_overlay_pipe_free(writeback_pipe);
-		mdp4_overlay_panel_mode_unset(writeback_pipe->mixer_num,
-						MDP4_PANEL_WRITEBACK);
-		writeback_pipe = NULL;
+	int cndx = 0;
+	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int ret = 0;
+
+	pr_debug("%s+:\n", __func__);
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		return ret;
 	}
+
+	/* sanity check, free pipes besides base layer */
+	mdp4_overlay_unset_mixer(pipe->mixer_num);
+	mdp4_mixer_stage_down(pipe, 1);
+	mdp4_overlay_pipe_free(pipe);
+	vctrl->base_pipe = NULL;
+
 	ret = panel_next_off(pdev);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	mdp_clk_ctrl(1);
 	/* MDP_LAYERMIXER_WB_MUX_SEL to restore to default cfg*/
 	outpdw(MDP_BASE + 0x100F4, 0x0);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_clk_ctrl(0);
+	pr_debug("%s-:\n", __func__);
 	return ret;
 }
-int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd)
+
+static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd)
 {
 	struct fb_info *fbi;
 	uint8 *buf;
 	unsigned int buf_offset;
 	struct mdp4_overlay_pipe *pipe;
 	int bpp;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 
 	if (mfd->key != MFD_KEY)
 		return -ENODEV;
 
-	if (!writeback_pipe)
-		return -EINVAL;
 
 	fbi = mfd->fbi;
 
-	pipe = writeback_pipe;
+	vctrl = &vsync_ctrl_db[cndx];
+
+	pipe = vctrl->base_pipe;
+	if (!pipe) {
+		pr_err("%s: no base layer pipe\n", __func__);
+		return -EINVAL;
+	}
 
 	bpp = fbi->var.bits_per_pixel / 8;
 	buf = (uint8 *) fbi->fix.smem_start;
@@ -160,7 +229,7 @@
 		fbi->var.yoffset * fbi->fix.line_length;
 
 	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	mdp_clk_ctrl(1);
 
 	pipe->src_height = fbi->var.yres;
 	pipe->src_width = fbi->var.xres;
@@ -184,142 +253,190 @@
 	mdp4_mixer_stage_up(pipe, 0);
 
 	mdp4_overlayproc_cfg(pipe);
-	mdp4_mixer_stage_commit(pipe->mixer_num);
 	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_clk_ctrl(0);
 
 	wmb();
 	return 0;
 }
-void mdp4_writeback_dma_busy_wait(struct msm_fb_data_type *mfd)
+
+/*
+ * mdp4_wfd_piep_queue:
+ * called from thread context
+ */
+void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
 {
-	unsigned long flag;
-	int need_wait = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pp;
+	int undx;
 
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->busy == TRUE) {
-		if (busy_wait_cnt == 0)
-			INIT_COMPLETION(mfd->dma->comp);
-		busy_wait_cnt = 1;
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		pr_debug("%s: pending pid=%d\n",
-				__func__, current->pid);
-		wait_for_completion(&mfd->dma->comp);
-	}
-}
-
-void mdp4_overlay1_done_writeback(struct mdp_dma_data *dma)
-{
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	if (busy_wait_cnt)
-		busy_wait_cnt = 0;
-	mdp_disable_irq_nosync(MDP_OVERLAY2_TERM);
-	spin_unlock(&mdp_spin_lock);
-	complete_all(&dma->comp);
-	pr_debug("%s ovdone interrupt\n", __func__);
-
-}
-void mdp4_writeback_overlay_kickoff(struct msm_fb_data_type *mfd,
-				    struct mdp4_overlay_pipe *pipe)
-{
-	unsigned long flag;
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mdp_enable_irq(MDP_OVERLAY2_TERM);
-
-	mfd->dma->busy = TRUE;
-	outp32(MDP_INTR_CLEAR, INTR_OVERLAY2_DONE);
-	mdp_intr_mask |= INTR_OVERLAY2_DONE;
-	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-
-	wmb();	/* make sure all registers updated */
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	/* start OVERLAY pipe */
-	mdp_pipe_kickoff(MDP_OVERLAY2_TERM, mfd);
-	wmb();
-	pr_debug("%s: before ov done interrupt\n", __func__);
-}
-void mdp4_writeback_dma_stop(struct msm_fb_data_type *mfd)
-{
-	/* mutex holded by caller */
-	if (mfd && writeback_pipe) {
-		mdp4_writeback_dma_busy_wait(mfd);
-		mdp4_overlay_writeback_update(mfd);
-
-		mdp4_writeback_overlay_kickoff(mfd, writeback_pipe);
-	}
-}
-
-void mdp4_writeback_kickoff_video(struct msm_fb_data_type *mfd,
-		struct mdp4_overlay_pipe *pipe)
-{
-	struct msmfb_writeback_data_list *node = NULL;
-	mutex_lock(&mfd->unregister_mutex);
-	mutex_lock(&mfd->writeback_mutex);
-	if (!list_empty(&mfd->writeback_free_queue)
-		&& mfd->writeback_state != WB_STOPING
-		&& mfd->writeback_state != WB_STOP) {
-		node = list_first_entry(&mfd->writeback_free_queue,
-				struct msmfb_writeback_data_list, active_entry);
-	}
-	if (node) {
-		list_del(&(node->active_entry));
-		node->state = IN_BUSY_QUEUE;
-		mfd->writeback_active_cnt++;
-	}
-	mutex_unlock(&mfd->writeback_mutex);
-
-	writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
-
-	/* free previous iommu at freelist back to pool */
-	mdp4_overlay_iommu_unmap_freelist(writeback_pipe->mixer_num);
-
-	if (!writeback_pipe->ov_blt_addr) {
-		pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
-			(unsigned int)writeback_pipe->ov_blt_addr, node);
-		mutex_unlock(&mfd->unregister_mutex);
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
 		return;
 	}
 
-	if (writeback_pipe->blt_cnt == 0)
-		mdp4_overlay_writeback_update(mfd);
+	vctrl = &vsync_ctrl_db[cndx];
 
-	pr_debug("%s: pid=%d\n", __func__, current->pid);
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
 
-	mdp4_mixer_stage_commit(pipe->mixer_num);
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
 
-	mdp4_writeback_overlay_kickoff(mfd, pipe);
-	mdp4_writeback_dma_busy_wait(mfd);
+	pp = &vp->plist[pipe->pipe_ndx - 1];	/* ndx start form 1 */
 
-	/* move current committed iommu to freelist */
-	mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+	pr_debug("%s: vndx=%d pipe_ndx=%d pid=%d\n", __func__,
+		undx, pipe->pipe_ndx, current->pid);
 
-	mutex_lock(&mfd->writeback_mutex);
-	list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
-	mutex_unlock(&mfd->writeback_mutex);
-	mfd->writeback_active_cnt--;
-	mutex_unlock(&mfd->unregister_mutex);
-	wake_up(&mfd->wait_q);
+	*pp = *pipe;	/* clone it */
+	vp->update_cnt++;
+
+	mutex_unlock(&vctrl->update_lock);
+	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-void mdp4_writeback_kickoff_ui(struct msm_fb_data_type *mfd,
-		struct mdp4_overlay_pipe *pipe)
-{
-	mdp4_mixer_stage_commit(pipe->mixer_num);
+static void mdp4_wfd_wait4ov(int cndx);
 
-	pr_debug("%s: pid=%d\n", __func__, current->pid);
-	mdp4_writeback_overlay_kickoff(mfd, pipe);
+int mdp4_wfd_pipe_commit(void)
+{
+	int  i, undx;
+	int mixer = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pipe;
+	struct mdp4_overlay_pipe *real_pipe;
+	unsigned long flags;
+	int cnt = 0;
+
+	vctrl = &vsync_ctrl_db[0];
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	pipe = vctrl->base_pipe;
+	mixer = pipe->mixer_num;
+
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
+
+	vctrl->update_ndx++;
+	vctrl->update_ndx &= 0x01;
+	vp->update_cnt = 0;     /* reset */
+	mutex_unlock(&vctrl->update_lock);
+
+	/* free previous committed iommu back to pool */
+	mdp4_overlay_iommu_unmap_freelist(mixer);
+
+	pipe = vp->plist;
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+		if (pipe->pipe_used) {
+			cnt++;
+			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
+			if (real_pipe && real_pipe->pipe_used) {
+				/* pipe not unset */
+				mdp4_overlay_vsync_commit(pipe);
+			}
+			/* free previous iommu to freelist
+			* which will be freed at next
+			* pipe_commit
+			*/
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+			pipe->pipe_used = 0; /* clear */
+		}
+	}
+
+	mdp4_mixer_stage_commit(mixer);
+
+	pipe = vctrl->base_pipe;
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	vctrl->ov_koff++;
+	INIT_COMPLETION(vctrl->ov_comp);
+	vsync_irq_enable(INTR_OVERLAY2_DONE, MDP_OVERLAY2_TERM);
+	pr_debug("%s: kickoff\n", __func__);
+	/* kickoff overlay engine */
+	mdp4_stat.kickoff_ov2++;
+	outpdw(MDP_BASE + 0x00D0, 0);
+	mb(); /* make sure kickoff executed */
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+	return cnt;
+}
+
+void mdp4_wfd_init(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->inited)
+		return;
+
+	vctrl->inited = 1;
+	vctrl->update_ndx = 0;
+	mutex_init(&vctrl->update_lock);
+	init_completion(&vctrl->ov_comp);
+	spin_lock_init(&vctrl->spin_lock);
+}
+
+static void mdp4_wfd_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
+}
+
+
+void mdp4_overlay2_done_wfd(struct mdp_dma_data *dma)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int cndx = 0;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_OVERLAY2_DONE, MDP_OVERLAY2_TERM);
+	vctrl->ov_done++;
+	complete(&vctrl->ov_comp);
+
+	pr_debug("%s ovdone interrupt\n", __func__);
+	spin_unlock(&vctrl->spin_lock);
 }
 
 void mdp4_writeback_overlay(struct msm_fb_data_type *mfd)
 {
-	int ret = 0;
 	struct msmfb_writeback_data_list *node = NULL;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (mfd && !mfd->panel_power_on)
+		return;
+
+	pr_debug("%s:+ mfd=%x\n", __func__, (int)mfd);
+
+	vctrl = &vsync_ctrl_db[0];
+	pipe = vctrl->base_pipe;
 
 	mutex_lock(&mfd->unregister_mutex);
 	mutex_lock(&mfd->writeback_mutex);
@@ -336,44 +453,36 @@
 	}
 	mutex_unlock(&mfd->writeback_mutex);
 
-	writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+	pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+
+	if (!pipe->ov_blt_addr) {
+		pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
+			(unsigned int)pipe->ov_blt_addr, node);
+		mutex_unlock(&mfd->unregister_mutex);
+		return;
+	}
 
 	mutex_lock(&mfd->dma->ov_mutex);
-	pr_debug("%s in writeback\n", __func__);
-	if (writeback_pipe && !writeback_pipe->ov_blt_addr) {
+	if (pipe && !pipe->ov_blt_addr) {
 		pr_err("%s: no writeback buffer 0x%x\n", __func__,
-				(unsigned int)writeback_pipe->ov_blt_addr);
-		ret = mdp4_overlay_writeback_update(mfd);
-		if (ret)
-			pr_err("%s: update failed writeback pipe NULL\n",
-					__func__);
+				(unsigned int)pipe->ov_blt_addr);
 		goto fail_no_blt_addr;
 	}
 
-	if (mfd && mfd->panel_power_on) {
-		pr_debug("%s in before busy wait\n", __func__);
-		mdp4_writeback_dma_busy_wait(mfd);
+	if (pipe->pipe_type == OVERLAY_TYPE_RGB)
+		mdp4_wfd_pipe_queue(0, pipe);
 
-		pr_debug("%s in before update\n", __func__);
-		ret = mdp4_overlay_writeback_update(mfd);
-		if (ret) {
-			pr_err("%s: update failed writeback pipe NULL\n",
-					__func__);
-			goto fail_no_blt_addr;
-		}
+	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
-		pr_debug("%s: in writeback pan display 0x%x\n", __func__,
-				(unsigned int)writeback_pipe->ov_blt_addr);
-		mdp4_writeback_kickoff_ui(mfd, writeback_pipe);
-		mdp4_iommu_unmap(writeback_pipe);
+	mdp_clk_ctrl(1);
+	mdp4_overlay_writeback_update(mfd);
 
-		/* signal if pan function is waiting for the
-		 * update completion */
-		if (mfd->pan_waiting) {
-			mfd->pan_waiting = FALSE;
-			complete(&mfd->pan_comp);
-		}
-	}
+	mdp4_wfd_pipe_commit();
+
+	mdp4_overlay_mdp_perf_upd(mfd, 0);
+
+	mdp4_wfd_wait4ov(0);
+	mdp_clk_ctrl(0);
 
 	mutex_lock(&mfd->writeback_mutex);
 	list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
@@ -385,7 +494,9 @@
 	  mdp4_overlay_resource_release();*/
 	mutex_unlock(&mfd->dma->ov_mutex);
 	mutex_unlock(&mfd->unregister_mutex);
+	pr_debug("%s:-\n", __func__);
 }
+
 static int mdp4_overlay_writeback_register_buffer(
 	struct msm_fb_data_type *mfd, struct msmfb_writeback_data_list *node)
 {
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 82b4e80..516722e 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -635,14 +635,8 @@
 	if (isr & INTR_OVERLAY2_DONE) {
 		mdp4_stat.intr_overlay2++;
 		/* disable DTV interrupt */
-		dma = &dma_wb_data;
-		spin_lock(&mdp_spin_lock);
-		mdp_intr_mask &= ~INTR_OVERLAY2_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		dma->waiting = FALSE;
-		spin_unlock(&mdp_spin_lock);
 		if (panel & MDP4_PANEL_WRITEBACK)
-			mdp4_overlay1_done_writeback(dma);
+			mdp4_overlay2_done_wfd(&dma_wb_data);
 	}
 #endif
 #endif	/* OVERLAY */
diff --git a/include/linux/mfd/wcd9xxx/wcd9304_registers.h b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
index f7c483c..73919e0 100644
--- a/include/linux/mfd/wcd9xxx/wcd9304_registers.h
+++ b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
@@ -409,114 +409,163 @@
 #define SITAR_A_CDC_ANC1_SMLPF_CTL__POR			(0x00000000)
 #define SITAR_A_CDC_ANC1_DCFLT_CTL			(0x20B)
 #define SITAR_A_CDC_ANC1_DCFLT_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_TX1_VOL_CTL_TIMER			(0x220)
-#define SITAR_A_CDC_TX1_VOL_CTL_TIMER__POR			(0x00000000)
-#define SITAR_A_CDC_TX1_VOL_CTL_GAIN			(0x221)
-#define SITAR_A_CDC_TX1_VOL_CTL_GAIN__POR			(0x00000000)
-#define SITAR_A_CDC_TX2_VOL_CTL_GAIN			(0x229)
-#define SITAR_A_CDC_TX2_VOL_CTL_GAIN__POR			   (0x00000000)
-#define SITAR_A_CDC_TX3_VOL_CTL_GAIN			(0x231)
-#define SITAR_A_CDC_TX3_VOL_CTL_GAIN__POR			   (0x00000000)
-#define SITAR_A_CDC_TX4_VOL_CTL_GAIN			(0x239)
-#define SITAR_A_CDC_TX4_VOL_CTL_GAIN__POR			   (0x00000000)
-#define SITAR_A_CDC_TX5_VOL_CTL_GAIN			(0x241)
-#define SITAR_A_CDC_TX5_VOL_CTL_GAIN__POR			   (0x00000000)
-#define SITAR_A_CDC_TX1_VOL_CTL_CFG			 (0x222)
-#define SITAR_A_CDC_TX1_VOL_CTL_CFG__POR			    (0x00000000)
-#define SITAR_A_CDC_TX2_VOL_CTL_CFG			 (0x22A)
-#define SITAR_A_CDC_TX2_VOL_CTL_CFG__POR			    (0x00000000)
-#define SITAR_A_CDC_TX3_VOL_CTL_CFG			 (0x232)
-#define SITAR_A_CDC_TX3_VOL_CTL_CFG__POR			    (0x00000000)
-#define SITAR_A_CDC_TX4_VOL_CTL_CFG			 (0x23A)
-#define SITAR_A_CDC_TX4_VOL_CTL_CFG__POR			    (0x00000000)
+#define SITAR_A_CDC_ANC2_CTL			(0x280)
+#define SITAR_A_CDC_ANC2_CTL__POR			(0x00000000)
+#define SITAR_A_CDC_ANC2_SHIFT			(0x281)
+#define SITAR_A_CDC_ANC2_SHIFT__POR			(0x00000000)
+#define SITAR_A_CDC_ANC2_IIR_B1_CTL		(0x282)
+#define SITAR_A_CDC_ANC2_IIR_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_ANC2_IIR_B2_CTL		(0x283)
+#define SITAR_A_CDC_ANC2_IIR_B2_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_ANC2_IIR_B3_CTL		(0x284)
+#define SITAR_A_CDC_ANC2_IIR_B3_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_ANC2_IIR_B4_CTL		(0x285)
+#define SITAR_A_CDC_ANC2_IIR_B4_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_ANC2_LPF_B1_CTL		(0x286)
+#define SITAR_A_CDC_ANC2_LPF_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_ANC2_LPF_B2_CTL		(0x287)
+#define SITAR_A_CDC_ANC2_LPF_B2_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_ANC2_LPF_B3_CTL		(0x288)
+#define SITAR_A_CDC_ANC2_LPF_B3_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_ANC2_SPARE			(0x289)
+#define SITAR_A_CDC_ANC2_SPARE__POR			(0x00000000)
+#define SITAR_A_CDC_ANC2_SMLPF_CTL		(0x28A)
+#define SITAR_A_CDC_ANC2_SMLPF_CTL__POR			(0x00000000)
+#define SITAR_A_CDC_ANC2_DCFLT_CTL		(0x28B)
+#define SITAR_A_CDC_ANC2_DCFLT_CTL__POR			(0x00000000)
 
+#define SITAR_A_CDC_TX1_VOL_CTL_TIMER		(0x220)
+#define SITAR_A_CDC_TX1_VOL_CTL_TIMER__POR		(0x00000000)
+#define SITAR_A_CDC_TX1_VOL_CTL_GAIN		(0x221)
+#define SITAR_A_CDC_TX1_VOL_CTL_GAIN__POR		(0x00000000)
+#define SITAR_A_CDC_TX1_VOL_CTL_CFG		(0x222)
+#define SITAR_A_CDC_TX1_VOL_CTL_CFG__POR		(0x00000000)
 #define SITAR_A_CDC_TX1_MUX_CTL			(0x223)
 #define SITAR_A_CDC_TX1_MUX_CTL__POR			(0x00000008)
-#define SITAR_A_CDC_TX1_CLK_FS_CTL                      (0x00000224)
-#define SITAR_A_CDC_TX1_CLK_FS_CTL__POR                 (0x00000003)
-#define SITAR_A_CDC_TX2_CLK_FS_CTL                      (0x0000022C)
-#define SITAR_A_CDC_TX2_CLK_FS_CTL__POR                 (0x00000003)
-#define SITAR_A_CDC_TX3_CLK_FS_CTL                      (0x00000234)
-#define SITAR_A_CDC_TX3_CLK_FS_CTL__POR                 (0x00000003)
-#define SITAR_A_CDC_TX4_CLK_FS_CTL                      (0x0000023C)
-#define SITAR_A_CDC_TX4_CLK_FS_CTL__POR                 (0x00000003)
-#define SITAR_A_CDC_TX1_DMIC_CTL			(0x225)
+#define SITAR_A_CDC_TX1_CLK_FS_CTL		(0x224)
+#define SITAR_A_CDC_TX1_CLK_FS_CTL__POR			(0x00000003)
+#define SITAR_A_CDC_TX1_DMIC_CTL		(0x225)
 #define SITAR_A_CDC_TX1_DMIC_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_TX2_MUX_CTL                 (0x22B)
+
+#define SITAR_A_CDC_TX2_VOL_CTL_TIMER		(0x228)
+#define SITAR_A_CDC_TX2_VOL_CTL_TIMER__POR		(0x00000000)
+#define SITAR_A_CDC_TX2_VOL_CTL_GAIN		(0x229)
+#define SITAR_A_CDC_TX2_VOL_CTL_GAIN__POR		(0x00000000)
+#define SITAR_A_CDC_TX2_VOL_CTL_CFG		(0x22A)
+#define SITAR_A_CDC_TX2_VOL_CTL_CFG__POR		(0x00000000)
+#define SITAR_A_CDC_TX2_MUX_CTL			(0x22B)
 #define SITAR_A_CDC_TX2_MUX_CTL__POR			(0x00000008)
-#define SITAR_A_CDC_TX3_MUX_CTL                 (0x233)
+#define SITAR_A_CDC_TX2_CLK_FS_CTL		(0x22C)
+#define SITAR_A_CDC_TX2_CLK_FS_CTL__POR			(0x00000003)
+#define SITAR_A_CDC_TX2_DMIC_CTL		(0x22D)
+#define SITAR_A_CDC_TX2_DMIC_CTL__POR			(0x00000000)
+
+#define SITAR_A_CDC_TX3_VOL_CTL_TIMER		(0x230)
+#define SITAR_A_CDC_TX3_VOL_CTL_TIMER__POR		(0x00000000)
+#define SITAR_A_CDC_TX3_VOL_CTL_GAIN		(0x231)
+#define SITAR_A_CDC_TX3_VOL_CTL_GAIN__POR		(0x00000000)
+#define SITAR_A_CDC_TX3_VOL_CTL_CFG		(0x232)
+#define SITAR_A_CDC_TX3_VOL_CTL_CFG__POR		(0x00000000)
+#define SITAR_A_CDC_TX3_MUX_CTL			(0x233)
 #define SITAR_A_CDC_TX3_MUX_CTL__POR			(0x00000008)
-#define SITAR_A_CDC_TX4_MUX_CTL                 (0x23B)
+#define SITAR_A_CDC_TX3_CLK_FS_CTL		(0x234)
+#define SITAR_A_CDC_TX3_CLK_FS_CTL__POR			(0x00000003)
+#define SITAR_A_CDC_TX3_DMIC_CTL		(0x235)
+#define SITAR_A_CDC_TX3_DMIC_CTL__POR			(0x00000000)
+
+#define SITAR_A_CDC_TX4_VOL_CTL_TIMER		(0x239)
+#define SITAR_A_CDC_TX4_VOL_CTL_TIMER__POR		(0x00000000)
+#define SITAR_A_CDC_TX4_VOL_CTL_GAIN		(0x23A)
+#define SITAR_A_CDC_TX4_VOL_CTL_GAIN__POR		(0x00000000)
+#define SITAR_A_CDC_TX4_VOL_CTL_CFG		(0x23B)
+#define SITAR_A_CDC_TX4_VOL_CTL_CFG__POR		(0x00000000)
+#define SITAR_A_CDC_TX4_MUX_CTL			(0x23C)
 #define SITAR_A_CDC_TX4_MUX_CTL__POR			(0x00000008)
-#define SITAR_A_CDC_TX5_MUX_CTL                 (0x243)
+#define SITAR_A_CDC_TX4_CLK_FS_CTL		(0x23D)
+#define SITAR_A_CDC_TX4_CLK_FS_CTL__POR			(0x00000003)
+#define SITAR_A_CDC_TX4_DMIC_CTL		(0x23E)
+#define SITAR_A_CDC_TX4_DMIC_CTL__POR			(0x00000000)
+
+#define SITAR_A_CDC_TX5_VOL_CTL_TIMER		(0x240)
+#define SITAR_A_CDC_TX5_VOL_CTL_TIMER__POR		(0x00000000)
+#define SITAR_A_CDC_TX5_VOL_CTL_GAIN		(0x241)
+#define SITAR_A_CDC_TX5_VOL_CTL_GAIN__POR		(0x00000000)
+#define SITAR_A_CDC_TX5_VOL_CTL_CFG		(0x242)
+#define SITAR_A_CDC_TX5_VOL_CTL_CFG__POR		(0x00000000)
+#define SITAR_A_CDC_TX5_MUX_CTL			(0x243)
 #define SITAR_A_CDC_TX5_MUX_CTL__POR			(0x00000008)
+#define SITAR_A_CDC_TX5_CLK_FS_CTL		(0x244)
+#define SITAR_A_CDC_TX5_CLK_FS_CTL__POR			(0x00000003)
+#define SITAR_A_CDC_TX5_DMIC_CTL		(0x245)
+#define SITAR_A_CDC_TX5_DMIC_CTL__POR			(0x00000000)
 
 #define SITAR_A_CDC_SRC1_PDA_CFG			(0x2A0)
 #define SITAR_A_CDC_SRC1_PDA_CFG__POR			(0x00000000)
 #define SITAR_A_CDC_SRC1_FS_CTL			(0x2A1)
 #define SITAR_A_CDC_SRC1_FS_CTL__POR			(0x0000001b)
+#define SITAR_A_CDC_SRC2_PDA_CFG		(0x2A8)
+#define SITAR_A_CDC_SRC2_PDA_CFG__POR			(0x00000000)
+#define SITAR_A_CDC_SRC2_FS_CTL			(0x2A9)
+#define SITAR_A_CDC_SRC2_FS_CTL__POR			(0x0000001b)
 
-#define SITAR_A_CDC_RX1_B1_CTL                  (0x000002B0)
+#define SITAR_A_CDC_RX1_B1_CTL                  (0x2B0)
 #define SITAR_A_CDC_RX1_B1_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX2_B1_CTL                  (0x000002B8)
-#define SITAR_A_CDC_RX2_B1_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX3_B1_CTL                  (0x000002C0)
-#define SITAR_A_CDC_RX3_B1_CTL__POR			 (0x00000000)
-
-#define SITAR_A_CDC_RX1_B2_CTL                  (0x000002B1)
+#define SITAR_A_CDC_RX1_B2_CTL                  (0x2B1)
 #define SITAR_A_CDC_RX1_B2_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX2_B2_CTL                  (0x000002B9)
-#define SITAR_A_CDC_RX2_B2_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX3_B2_CTL                  (0x000002C1)
-#define SITAR_A_CDC_RX3_B2_CTL__POR			 (0x00000000)
-
-#define SITAR_A_CDC_RX1_B3_CTL                  (0x000002B2)
+#define SITAR_A_CDC_RX1_B3_CTL                  (0x2B2)
 #define SITAR_A_CDC_RX1_B3_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX2_B3_CTL                  (0x000002BA)
-#define SITAR_A_CDC_RX2_B3_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX3_B3_CTL                  (0x000002C2)
-#define SITAR_A_CDC_RX3_B3_CTL__POR			 (0x00000000)
-
-#define SITAR_A_CDC_RX1_B4_CTL                  (0x000002B3)
+#define SITAR_A_CDC_RX1_B4_CTL                  (0x2B3)
 #define SITAR_A_CDC_RX1_B4_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX2_B4_CTL                  (0x000002BB)
-#define SITAR_A_CDC_RX2_B4_CTL__POR			 (0x00000000)
-#define SITAR_A_CDC_RX3_B4_CTL                  (0x000002C3)
-#define SITAR_A_CDC_RX3_B4_CTL__POR			 (0x00000000)
-
-#define SITAR_A_CDC_RX1_B5_CTL                  (0x000002B4)
+#define SITAR_A_CDC_RX1_B5_CTL                  (0x2B4)
 #define SITAR_A_CDC_RX1_B5_CTL__POR			 (0x00000078)
-#define SITAR_A_CDC_RX2_B5_CTL                  (0x000002BC)
-#define SITAR_A_CDC_RX2_B5_CTL__POR			 (0x00000078)
-#define SITAR_A_CDC_RX3_B5_CTL                  (0x000002C4)
-#define SITAR_A_CDC_RX3_B5_CTL__POR			 (0x00000078)
-
-#define SITAR_A_CDC_RX1_B6_CTL                  (0x000002B5)
+#define SITAR_A_CDC_RX1_B6_CTL                  (0x2B5)
 #define SITAR_A_CDC_RX1_B6_CTL__POR			 (0x00000080)
-#define SITAR_A_CDC_RX2_B6_CTL                  (0x000002BD)
+#define SITAR_A_CDC_RX1_VOL_CTL_B1_CTL		(0x2B6)
+#define SITAR_A_CDC_RX1_VOL_CTL_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_RX1_VOL_CTL_B2_CTL		(0x2B7)
+#define SITAR_A_CDC_RX1_VOL_CTL_B2_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_RX2_B1_CTL                  (0x2B8)
+#define SITAR_A_CDC_RX2_B1_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX2_B2_CTL                  (0x2B9)
+#define SITAR_A_CDC_RX2_B2_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX2_B3_CTL                  (0x2BA)
+#define SITAR_A_CDC_RX2_B3_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX2_B4_CTL                  (0x2BB)
+#define SITAR_A_CDC_RX2_B4_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX2_B5_CTL                  (0x2BC)
+#define SITAR_A_CDC_RX2_B5_CTL__POR			 (0x00000078)
+#define SITAR_A_CDC_RX2_B6_CTL                  (0x2BD)
 #define SITAR_A_CDC_RX2_B6_CTL__POR			 (0x00000080)
-#define SITAR_A_CDC_RX3_B6_CTL                  (0x000002C5)
+#define SITAR_A_CDC_RX2_VOL_CTL_B1_CTL		(0x2BE)
+#define SITAR_A_CDC_RX2_VOL_CTL_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_RX2_VOL_CTL_B2_CTL		(0x2BF)
+#define SITAR_A_CDC_RX2_VOL_CTL_B2_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_RX3_B1_CTL                  (0x2C0)
+#define SITAR_A_CDC_RX3_B1_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX3_B2_CTL                  (0x2C1)
+#define SITAR_A_CDC_RX3_B2_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX3_B3_CTL                  (0x2C2)
+#define SITAR_A_CDC_RX3_B3_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX3_B4_CTL                  (0x2C3)
+#define SITAR_A_CDC_RX3_B4_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX3_B5_CTL                  (0x2C4)
+#define SITAR_A_CDC_RX3_B5_CTL__POR			 (0x00000078)
+#define SITAR_A_CDC_RX3_B6_CTL                  (0x2C5)
 #define SITAR_A_CDC_RX3_B6_CTL__POR			 (0x00000080)
+#define SITAR_A_CDC_RX3_VOL_CTL_B1_CTL		(0x2C6)
+#define SITAR_A_CDC_RX3_VOL_CTL_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_RX3_VOL_CTL_B2_CTL		(0x2C7)
+#define SITAR_A_CDC_RX3_VOL_CTL_B2_CTL__POR		(0x00000000)
 
-
-#define SITAR_A_CDC_RX1_VOL_CTL_B1_CTL			(0x2B6)
-#define SITAR_A_CDC_RX1_VOL_CTL_B1_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_RX1_VOL_CTL_B2_CTL			(0x2B7)
-#define SITAR_A_CDC_RX1_VOL_CTL_B2_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_RX2_VOL_CTL_B2_CTL                  (0x2BF)
-#define SITAR_A_CDC_RX2_VOL_CTL_B2_CTL__POR                     (0x00000000)
-#define SITAR_A_CDC_RX3_VOL_CTL_B2_CTL			(0x2C7)
-#define SITAR_A_CDC_RX3_VOL_CTL_B2_CTL__POR                     (0x00000000)
-
-#define SITAR_A_CDC_CLK_ANC_RESET_CTL			(0x300)
-#define SITAR_A_CDC_CLK_ANC_RESET_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_CLK_RX_RESET_CTL			(0x301)
-#define SITAR_A_CDC_CLK_RX_RESET_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_CLK_TX_RESET_B1_CTL			(0x302)
-#define SITAR_A_CDC_CLK_TX_RESET_B1_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_CLK_TX_RESET_B2_CTL			(0x303)
-#define SITAR_A_CDC_CLK_TX_RESET_B2_CTL__POR			(0x00000000)
-#define SITAR_A_CDC_CLK_DMIC_CTL			(0x304)
+#define SITAR_A_CDC_CLK_ANC_RESET_CTL		(0x300)
+#define SITAR_A_CDC_CLK_ANC_RESET_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_CLK_RX_RESET_CTL		(0x301)
+#define SITAR_A_CDC_CLK_RX_RESET_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_CLK_TX_RESET_B1_CTL		(0x302)
+#define SITAR_A_CDC_CLK_TX_RESET_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_CLK_TX_RESET_B2_CTL		(0x303)
+#define SITAR_A_CDC_CLK_TX_RESET_B2_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_CLK_DMIC_CTL		(0x304)
 #define SITAR_A_CDC_CLK_DMIC_CTL__POR			(0x00000000)
 #define SITAR_A_CDC_CLK_RX_I2S_CTL			(0x305)
 #define SITAR_A_CDC_CLK_RX_I2S_CTL__POR			(0x00000003)
@@ -654,7 +703,23 @@
 #define SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS__POR		(0x00000003)
 #define SITAR_A_CDC_COMP1_FS_CFG			(0x377)
 #define SITAR_A_CDC_COMP1_FS_CFG__POR			(0x0000001b)
-#define SITAR_A_CDC_CONN_RX1_B1_CTL			(0x380)
+#define SITAR_A_CDC_COMP2_B1_CTL		(0x378)
+#define SITAR_A_CDC_COMP2_B1_CTL__POR			(0x00000030)
+#define SITAR_A_CDC_COMP2_B2_CTL		(0x379)
+#define SITAR_A_CDC_COMP2_B2_CTL__POR			(0x000000b5)
+#define SITAR_A_CDC_COMP2_B3_CTL		(0x37A)
+#define SITAR_A_CDC_COMP2_B3_CTL__POR			(0x00000028)
+#define SITAR_A_CDC_COMP2_B4_CTL		(0x37B)
+#define SITAR_A_CDC_COMP2_B4_CTL__POR			(0x0000003c)
+#define SITAR_A_CDC_COMP2_B5_CTL		(0x37C)
+#define SITAR_A_CDC_COMP2_B5_CTL__POR			(0x0000001f)
+#define SITAR_A_CDC_COMP2_B6_CTL		(0x37D)
+#define SITAR_A_CDC_COMP2_B6_CTL__POR			(0x00000000)
+#define SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS	(0x37E)
+#define SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS__POR		(0x00000003)
+#define SITAR_A_CDC_COMP2_FS_CFG		(0x37F)
+#define SITAR_A_CDC_COMP2_FS_CFG__POR			(0x0000001b)
+#define SITAR_A_CDC_CONN_RX1_B1_CTL		(0x380)
 #define SITAR_A_CDC_CONN_RX1_B1_CTL__POR		(0x00000000)
 #define SITAR_A_CDC_CONN_RX1_B2_CTL			(0x381)
 #define SITAR_A_CDC_CONN_RX1_B2_CTL__POR		(0x00000000)
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 62b8a73..6912087 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -46,6 +46,13 @@
 #define KGSL_MEMTYPE_MULTISAMPLE		20
 #define KGSL_MEMTYPE_KERNEL			255
 
+/*
+ * Alignment hint, passed as the power of 2 exponent.
+ * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
+ */
+#define KGSL_MEMALIGN_MASK		0x00FF0000
+#define KGSL_MEMALIGN_SHIFT		16
+
 /* generic flag values */
 #define KGSL_FLAGS_NORMALMODE  0x00000000
 #define KGSL_FLAGS_SAFEMODE    0x00000001
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
index 5b2dcf9..ddb2fd0 100644
--- a/include/linux/usb/f_accessory.h
+++ b/include/linux/usb/f_accessory.h
@@ -36,13 +36,15 @@
 #define ACCESSORY_STRING_URI            4
 #define ACCESSORY_STRING_SERIAL         5
 
-/* Control request for retrieving device's protocol version (currently 1)
+/* Control request for retrieving device's protocol version
  *
  *	requestType:    USB_DIR_IN | USB_TYPE_VENDOR
  *	request:        ACCESSORY_GET_PROTOCOL
  *	value:          0
  *	index:          0
  *	data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds device to host audio support
  */
 #define ACCESSORY_GET_PROTOCOL  51
 
@@ -70,6 +72,17 @@
  */
 #define ACCESSORY_START         53
 
+/* Control request for setting the audio mode.
+ *
+ *	requestType:	USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_AUDIO_MODE
+ *	value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
 /* ioctls for retrieving strings set by the host */
 #define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
 #define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
@@ -79,5 +92,7 @@
 #define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
 /* returns 1 if there is a start request pending */
 #define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
 
 #endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/net/scm.h b/include/net/scm.h
index d456f4c..0c0017c 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -71,9 +71,11 @@
 }
 
 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
-			       struct scm_cookie *scm)
+			       struct scm_cookie *scm, bool forcecreds)
 {
 	memset(scm, 0, sizeof(*scm));
+	if (forcecreds)
+		scm_set_cred(scm, task_tgid(current), current_cred());
 	unix_get_peersec_dgram(sock, scm);
 	if (msg->msg_controllen <= 0)
 		return 0;
diff --git a/kernel/printk.c b/kernel/printk.c
index 4cf4670..7f51b03 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1224,13 +1224,13 @@
 	unsigned long action, void *hcpu)
 {
 	switch (action) {
-	case CPU_ONLINE:
 	case CPU_DEAD:
 	case CPU_DOWN_FAILED:
 	case CPU_UP_CANCELED:
 		console_lock();
 		console_unlock();
 		break;
+	case CPU_ONLINE:
 	case CPU_DYING:
 		/* invoked with preemption disabled, so defer */
 		if (!console_trylock())
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index faa48f7..59debb7 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1329,7 +1329,7 @@
 	if (NULL == siocb->scm)
 		siocb->scm = &scm;
 
-	err = scm_send(sock, msg, siocb->scm);
+	err = scm_send(sock, msg, siocb->scm, true);
 	if (err < 0)
 		return err;
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d510353..109e30b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1446,7 +1446,7 @@
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
 	wait_for_unix_gc();
-	err = scm_send(sock, msg, siocb->scm);
+	err = scm_send(sock, msg, siocb->scm, false);
 	if (err < 0)
 		return err;
 
@@ -1607,7 +1607,7 @@
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
 	wait_for_unix_gc();
-	err = scm_send(sock, msg, siocb->scm);
+	err = scm_send(sock, msg, siocb->scm, false);
 	if (err < 0)
 		return err;
 
diff --git a/sound/soc/codecs/wcd9304-tables.c b/sound/soc/codecs/wcd9304-tables.c
index f0d76e8..ba0b6b6 100644
--- a/sound/soc/codecs/wcd9304-tables.c
+++ b/sound/soc/codecs/wcd9304-tables.c
@@ -419,6 +419,10 @@
 	[SITAR_A_QFUSE_DATA_OUT1] = 1,
 	[SITAR_A_QFUSE_DATA_OUT2] = 1,
 	[SITAR_A_QFUSE_DATA_OUT3] = 1,
+	[SITAR_A_QFUSE_DATA_OUT4] = 1,
+	[SITAR_A_QFUSE_DATA_OUT5] = 1,
+	[SITAR_A_QFUSE_DATA_OUT6] = 1,
+	[SITAR_A_QFUSE_DATA_OUT7] = 1,
 	[SITAR_A_CDC_CTL] = 1,
 	[SITAR_A_LEAKAGE_CTL] = 1,
 	[SITAR_A_INTR_MODE] = 1,
@@ -428,6 +432,9 @@
 	[SITAR_A_INTR_STATUS0] = 1,
 	[SITAR_A_INTR_STATUS1] = 1,
 	[SITAR_A_INTR_STATUS2] = 1,
+	[SITAR_A_INTR_CLEAR0] = 1,
+	[SITAR_A_INTR_CLEAR1] = 1,
+	[SITAR_A_INTR_CLEAR2] = 1,
 	[SITAR_A_INTR_LEVEL0] = 1,
 	[SITAR_A_INTR_LEVEL1] = 1,
 	[SITAR_A_INTR_LEVEL2] = 1,
@@ -593,24 +600,76 @@
 	[SITAR_A_CDC_ANC1_SPARE] = 1,
 	[SITAR_A_CDC_ANC1_SMLPF_CTL] = 1,
 	[SITAR_A_CDC_ANC1_DCFLT_CTL] = 1,
+	[SITAR_A_CDC_ANC2_CTL] = 1,
+	[SITAR_A_CDC_ANC2_SHIFT] = 1,
+	[SITAR_A_CDC_ANC2_IIR_B1_CTL] = 1,
+	[SITAR_A_CDC_ANC2_IIR_B2_CTL] = 1,
+	[SITAR_A_CDC_ANC2_IIR_B3_CTL] = 1,
+	[SITAR_A_CDC_ANC2_IIR_B4_CTL] = 1,
+	[SITAR_A_CDC_ANC2_LPF_B1_CTL] = 1,
+	[SITAR_A_CDC_ANC2_LPF_B2_CTL] = 1,
+	[SITAR_A_CDC_ANC2_LPF_B3_CTL] = 1,
+	[SITAR_A_CDC_ANC2_SPARE] = 1,
+	[SITAR_A_CDC_ANC2_SMLPF_CTL] = 1,
+	[SITAR_A_CDC_ANC2_DCFLT_CTL] = 1,
 	[SITAR_A_CDC_TX1_VOL_CTL_TIMER] = 1,
 	[SITAR_A_CDC_TX1_VOL_CTL_GAIN] = 1,
 	[SITAR_A_CDC_TX1_VOL_CTL_CFG] = 1,
 	[SITAR_A_CDC_TX1_MUX_CTL] = 1,
 	[SITAR_A_CDC_TX1_CLK_FS_CTL] = 1,
 	[SITAR_A_CDC_TX1_DMIC_CTL] = 1,
+	[SITAR_A_CDC_TX2_VOL_CTL_TIMER] = 1,
+	[SITAR_A_CDC_TX2_VOL_CTL_GAIN] = 1,
+	[SITAR_A_CDC_TX2_VOL_CTL_CFG] = 1,
+	[SITAR_A_CDC_TX2_MUX_CTL] = 1,
+	[SITAR_A_CDC_TX2_CLK_FS_CTL] = 1,
+	[SITAR_A_CDC_TX2_DMIC_CTL] = 1,
+	[SITAR_A_CDC_TX3_VOL_CTL_TIMER] = 1,
+	[SITAR_A_CDC_TX3_VOL_CTL_GAIN] = 1,
+	[SITAR_A_CDC_TX3_VOL_CTL_CFG] = 1,
+	[SITAR_A_CDC_TX3_MUX_CTL] = 1,
+	[SITAR_A_CDC_TX3_CLK_FS_CTL] = 1,
+	[SITAR_A_CDC_TX3_DMIC_CTL] = 1,
+	[SITAR_A_CDC_TX4_VOL_CTL_TIMER] = 1,
+	[SITAR_A_CDC_TX4_VOL_CTL_GAIN] = 1,
+	[SITAR_A_CDC_TX4_VOL_CTL_CFG] = 1,
+	[SITAR_A_CDC_TX4_MUX_CTL] = 1,
+	[SITAR_A_CDC_TX4_CLK_FS_CTL] = 1,
+	[SITAR_A_CDC_TX4_DMIC_CTL] = 1,
+	[SITAR_A_CDC_TX5_VOL_CTL_TIMER] = 1,
+	[SITAR_A_CDC_TX5_VOL_CTL_GAIN] = 1,
+	[SITAR_A_CDC_TX5_VOL_CTL_CFG] = 1,
+	[SITAR_A_CDC_TX5_MUX_CTL] = 1,
+	[SITAR_A_CDC_TX5_CLK_FS_CTL] = 1,
+	[SITAR_A_CDC_TX5_DMIC_CTL] = 1,
 	[SITAR_A_CDC_SRC1_PDA_CFG] = 1,
 	[SITAR_A_CDC_SRC1_FS_CTL] = 1,
+	[SITAR_A_CDC_SRC2_PDA_CFG] = 1,
+	[SITAR_A_CDC_SRC2_FS_CTL] = 1,
 	[SITAR_A_CDC_RX1_B1_CTL] = 1,
 	[SITAR_A_CDC_RX1_B2_CTL] = 1,
 	[SITAR_A_CDC_RX1_B3_CTL] = 1,
 	[SITAR_A_CDC_RX1_B4_CTL] = 1,
 	[SITAR_A_CDC_RX1_B5_CTL] = 1,
-	[SITAR_A_CDC_RX2_B5_CTL] = 1,
-	[SITAR_A_CDC_RX3_B5_CTL] = 1,
 	[SITAR_A_CDC_RX1_B6_CTL] = 1,
 	[SITAR_A_CDC_RX1_VOL_CTL_B1_CTL] = 1,
 	[SITAR_A_CDC_RX1_VOL_CTL_B2_CTL] = 1,
+	[SITAR_A_CDC_RX2_B1_CTL] = 1,
+	[SITAR_A_CDC_RX2_B2_CTL] = 1,
+	[SITAR_A_CDC_RX2_B3_CTL] = 1,
+	[SITAR_A_CDC_RX2_B4_CTL] = 1,
+	[SITAR_A_CDC_RX2_B5_CTL] = 1,
+	[SITAR_A_CDC_RX2_B6_CTL] = 1,
+	[SITAR_A_CDC_RX2_VOL_CTL_B1_CTL] = 1,
+	[SITAR_A_CDC_RX2_VOL_CTL_B2_CTL] = 1,
+	[SITAR_A_CDC_RX3_B1_CTL] = 1,
+	[SITAR_A_CDC_RX3_B2_CTL] = 1,
+	[SITAR_A_CDC_RX3_B3_CTL] = 1,
+	[SITAR_A_CDC_RX3_B4_CTL] = 1,
+	[SITAR_A_CDC_RX3_B5_CTL] = 1,
+	[SITAR_A_CDC_RX3_B6_CTL] = 1,
+	[SITAR_A_CDC_RX3_VOL_CTL_B1_CTL] = 1,
+	[SITAR_A_CDC_RX3_VOL_CTL_B2_CTL] = 1,
 	[SITAR_A_CDC_CLK_ANC_RESET_CTL] = 1,
 	[SITAR_A_CDC_CLK_RX_RESET_CTL] = 1,
 	[SITAR_A_CDC_CLK_TX_RESET_B1_CTL] = 1,
@@ -652,6 +711,21 @@
 	[SITAR_A_CDC_IIR1_COEF_B3_CTL] = 1,
 	[SITAR_A_CDC_IIR1_COEF_B4_CTL] = 1,
 	[SITAR_A_CDC_IIR1_COEF_B5_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B1_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B2_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B3_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B4_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B5_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B6_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B7_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_B8_CTL] = 1,
+	[SITAR_A_CDC_IIR2_CTL] = 1,
+	[SITAR_A_CDC_IIR2_GAIN_TIMER_CTL] = 1,
+	[SITAR_A_CDC_IIR2_COEF_B1_CTL] = 1,
+	[SITAR_A_CDC_IIR2_COEF_B2_CTL] = 1,
+	[SITAR_A_CDC_IIR2_COEF_B3_CTL] = 1,
+	[SITAR_A_CDC_IIR2_COEF_B4_CTL] = 1,
+	[SITAR_A_CDC_IIR2_COEF_B5_CTL] = 1,
 	[SITAR_A_CDC_TOP_GAIN_UPDATE] = 1,
 	[SITAR_A_CDC_TOP_RDAC_DOUT_CTL] = 1,
 	[SITAR_A_CDC_DEBUG_B1_CTL] = 1,
@@ -669,6 +743,14 @@
 	[SITAR_A_CDC_COMP1_B6_CTL] = 1,
 	[SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS] = 1,
 	[SITAR_A_CDC_COMP1_FS_CFG] = 1,
+	[SITAR_A_CDC_COMP2_B1_CTL] = 1,
+	[SITAR_A_CDC_COMP2_B2_CTL] = 1,
+	[SITAR_A_CDC_COMP2_B3_CTL] = 1,
+	[SITAR_A_CDC_COMP2_B4_CTL] = 1,
+	[SITAR_A_CDC_COMP2_B5_CTL] = 1,
+	[SITAR_A_CDC_COMP2_B6_CTL] = 1,
+	[SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS] = 1,
+	[SITAR_A_CDC_COMP2_FS_CFG] = 1,
 	[SITAR_A_CDC_CONN_RX1_B1_CTL] = 1,
 	[SITAR_A_CDC_CONN_RX1_B2_CTL] = 1,
 	[SITAR_A_CDC_CONN_RX1_B3_CTL] = 1,
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index b303878..e9e950f 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -41,6 +41,7 @@
 #define ADC_DMIC_SEL_ADC	0
 #define	ADC_DMIC_SEL_DMIC	1
 
+#define NUM_AMIC 3
 #define NUM_DECIMATORS 4
 #define NUM_INTERPOLATORS 3
 #define BITS_PER_REG 8
@@ -4860,6 +4861,7 @@
 	u8 flag = pdata->amic_settings.use_pdata;
 	u8 i = 0, j = 0;
 	u8 val_txfe = 0, value = 0;
+	int amic_reg_count = 0;
 
 	if (!pdata) {
 		rc = -ENODEV;
@@ -4905,7 +4907,8 @@
 	snd_soc_update_bits(codec, SITAR_A_MICB_2_CTL, 0x10,
 		(pdata->micbias.bias2_cap_mode << 4));
 
-	for (i = 0; i < 6; j++, i += 2) {
+	amic_reg_count = (NUM_AMIC % 2) ? NUM_AMIC + 1 : NUM_AMIC;
+	for (i = 0; i < amic_reg_count; j++, i += 2) {
 		if (flag & (0x01 << i)) {
 			value = (leg_mode & (0x01 << i)) ? 0x10 : 0x00;
 			val_txfe = (txfe_bypass & (0x01 << i)) ? 0x20 : 0x00;
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 86a82e2..c046b63 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -106,9 +106,13 @@
 		break;
 	case OCMEM_ALLOC_GROW:
 		audio_ocmem_lcl.buf = data;
+		pr_debug("%s: Alloc grow request received buf->addr: 0x%ld\n",
+						__func__,
+						(audio_ocmem_lcl.buf)->addr);
 		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_GROW);
 		break;
 	case OCMEM_ALLOC_SHRINK:
+		pr_debug("%s: Alloc shrink request received\n", __func__);
 		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_SHRINK);
 		break;
 	default:
@@ -150,11 +154,14 @@
 
 	audio_ocmem_lcl.buf = buf;
 	atomic_set(&audio_ocmem_lcl.audio_exit, 0);
+	atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+	pr_debug("%s: buf->len: %ld\n", __func__, buf->len);
 	if (!buf->len) {
+		pr_debug("%s: buf.len is 0, waiting for ocmem region\n",
+								__func__);
 		wait_event_interruptible(audio_ocmem_lcl.audio_wait,
 			(atomic_read(&audio_ocmem_lcl.audio_cond) == 0)	||
 			(atomic_read(&audio_ocmem_lcl.audio_exit) == 1));
-
 		if (atomic_read(&audio_ocmem_lcl.audio_exit)) {
 			pr_err("%s: audio playback ended while waiting for ocmem\n",
 					__func__);
@@ -162,6 +169,7 @@
 			goto fail_cmd;
 		}
 	}
+	pr_debug("%s: buf->len: %ld\n", __func__, (audio_ocmem_lcl.buf)->len);
 	if (audio_ocmem_lcl.lp_memseg_ptr == NULL) {
 		/* Retrieve low power segments */
 		ret = core_get_low_power_segments(
@@ -190,19 +198,28 @@
 	/* vote for ocmem bus bandwidth */
 	ret = msm_bus_scale_client_update_request(
 				audio_ocmem_lcl.audio_ocmem_bus_client,
-				0);
+				1);
 	if (ret)
 		pr_err("%s: failed to vote for bus bandwidth\n", __func__);
 
 	atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_TRANSITION);
 
+	pr_debug("%s: buf->addr: 0x%ld, len: %ld, audio_state[0x%x]\n",
+				__func__,
+				audio_ocmem_lcl.buf->addr,
+				audio_ocmem_lcl.buf->len,
+				atomic_read(&audio_ocmem_lcl.audio_state));
+
+	atomic_set(&audio_ocmem_lcl.audio_cond, 1);
 	ret = ocmem_map(cid, audio_ocmem_lcl.buf, &audio_ocmem_lcl.mlist);
 	if (ret) {
 		pr_err("%s: ocmem_map failed\n", __func__);
 		goto fail_cmd;
 	}
 
-
+	pr_debug("%s: audio_cond[%d] audio_state[0x%x]\n", __func__,
+				atomic_read(&audio_ocmem_lcl.audio_cond),
+				atomic_read(&audio_ocmem_lcl.audio_state));
 	while ((atomic_read(&audio_ocmem_lcl.audio_state) !=
 						OCMEM_STATE_EXIT)) {
 
@@ -219,6 +236,8 @@
 			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
 			break;
 		case OCMEM_STATE_SHRINK:
+			pr_debug("%s: ocmem shrink request process\n",
+							__func__);
 			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
 			ret = ocmem_unmap(cid, audio_ocmem_lcl.buf,
 					&audio_ocmem_lcl.mlist);
@@ -242,9 +261,11 @@
 				atomic_read(&audio_ocmem_lcl.audio_state));
 				goto fail_cmd;
 			}
-
+			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
 			break;
 		case OCMEM_STATE_GROW:
+			pr_debug("%s: ocmem grow request process\n",
+							__func__);
 			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
 			ret = ocmem_map(cid, audio_ocmem_lcl.buf,
 						&audio_ocmem_lcl.mlist);
@@ -260,6 +281,7 @@
 				atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
 			atomic_set(&audio_ocmem_lcl.audio_state,
 				OCMEM_STATE_MAP_COMPL);
+			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
 			break;
 		}
 	}
@@ -280,8 +302,10 @@
 {
 	int ret;
 
+	pr_debug("%s: disable\n", __func__);
 	if (atomic_read(&audio_ocmem_lcl.audio_cond))
 		atomic_set(&audio_ocmem_lcl.audio_cond, 0);
+
 	pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n", __func__,
 			 atomic_read(&audio_ocmem_lcl.audio_cond),
 			 atomic_read(&audio_ocmem_lcl.audio_state));
@@ -309,6 +333,7 @@
 				atomic_read(&audio_ocmem_lcl.audio_state));
 			goto fail_cmd;
 		}
+		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_EXIT);
 		pr_debug("%s: ocmem_free success\n", __func__);
 	default:
 		pr_debug("%s: state=%d", __func__,
@@ -316,6 +341,9 @@
 		break;
 
 	}
+	msm_bus_scale_client_update_request(
+				audio_ocmem_lcl.audio_ocmem_bus_client,
+				0);
 	return 0;
 fail_cmd:
 	return ret;
@@ -345,6 +373,7 @@
 		rc = -EINVAL;
 	}
 
+	return;
 }
 /**
  * voice_ocmem_process_req() - disable/enable OCMEM during voice call
@@ -441,6 +470,7 @@
 		rc = -EINVAL;
 	}
 
+	return;
 }
 
 /**
@@ -484,86 +514,21 @@
 
 static int audio_ocmem_platform_data_populate(struct platform_device *pdev)
 {
-	int ret;
-	struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
-	struct msm_bus_vectors *audio_ocmem_bus_vectors = NULL;
-	struct msm_bus_paths *ocmem_audio_bus_paths = NULL;
-	u32 val;
+	struct msm_bus_scale_pdata *audio_ocmem_adata = NULL;
 
 	if (!pdev->dev.of_node) {
 		pr_err("%s: device tree information missing\n", __func__);
 		return -ENODEV;
 	}
-
-	audio_ocmem_bus_vectors = kzalloc(sizeof(struct msm_bus_vectors),
-								GFP_KERNEL);
-	if (!audio_ocmem_bus_vectors) {
-		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
-		return -ENOMEM;
+	audio_ocmem_adata = msm_bus_cl_get_pdata(pdev);
+	if (!audio_ocmem_adata) {
+		pr_err("%s: bus device tree allocation failed\n", __func__);
+		return -EINVAL;
 	}
 
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,msm-ocmem-audio-src-id", &val);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-src-id missing in DT node\n",
-				__func__);
-		goto fail1;
-	}
-	audio_ocmem_bus_vectors->src = val;
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,msm-ocmem-audio-dst-id", &val);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-dst-id missing in DT node\n",
-				__func__);
-		goto fail1;
-	}
-	audio_ocmem_bus_vectors->dst = val;
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,msm-ocmem-audio-ab", &val);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ab missing in DT node\n",
-					__func__);
-		goto fail1;
-	}
-	audio_ocmem_bus_vectors->ab = val;
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,msm-ocmem-audio-ib", &val);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ib missing in DT node\n",
-					__func__);
-		goto fail1;
-	}
-	audio_ocmem_bus_vectors->ib = val;
+	dev_set_drvdata(&pdev->dev, audio_ocmem_adata);
 
-	ocmem_audio_bus_paths = kzalloc(sizeof(struct msm_bus_paths),
-								GFP_KERNEL);
-	if (!ocmem_audio_bus_paths) {
-		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
-		goto fail1;
-	}
-	ocmem_audio_bus_paths->num_paths = 1;
-	ocmem_audio_bus_paths->vectors = audio_ocmem_bus_vectors;
-
-	audio_ocmem_bus_scale_pdata =
-		kzalloc(sizeof(struct msm_bus_scale_pdata), GFP_KERNEL);
-
-	if (!audio_ocmem_bus_scale_pdata) {
-		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
-		goto fail2;
-	}
-
-	audio_ocmem_bus_scale_pdata->usecase = ocmem_audio_bus_paths;
-	audio_ocmem_bus_scale_pdata->num_usecases = 1;
-	audio_ocmem_bus_scale_pdata->name = "audio-ocmem";
-
-	dev_set_drvdata(&pdev->dev, audio_ocmem_bus_scale_pdata);
-	return ret;
-
-fail2:
-	kfree(ocmem_audio_bus_paths);
-fail1:
-	kfree(audio_ocmem_bus_vectors);
-	return ret;
+	return 0;
 }
 static int ocmem_audio_client_probe(struct platform_device *pdev)
 {
@@ -628,9 +593,7 @@
 	audio_ocmem_bus_scale_pdata = (struct msm_bus_scale_pdata *)
 					dev_get_drvdata(&pdev->dev);
 
-	kfree(audio_ocmem_bus_scale_pdata->usecase->vectors);
-	kfree(audio_ocmem_bus_scale_pdata->usecase);
-	kfree(audio_ocmem_bus_scale_pdata);
+	msm_bus_cl_clear_pdata(audio_ocmem_bus_scale_pdata);
 	ocmem_notifier_unregister(audio_ocmem_lcl.audio_hdl,
 					&audio_ocmem_client_nb);
 	return 0;