Merge "msm: krait-l2-accessors: Remove workaround for 8064 Krait errata"
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
index 7a90cc0..dcf023d 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -13,7 +13,6 @@
   - qcom,iommu-ctx-sids : List of stream identifiers associated with this
     translation context.
   - label : Name of the context bank
-  - qcom,iommu-smt-size : Number of SMR entries in the SMT of this HW block
   - vdd-supply : vdd-supply: phandle to GDSC regulator controlling this IOMMU.
 
 Optional properties:
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 22ae844..08f7312 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,23 @@
 
 	force_ro		Enforce read-only access even if write protect switch is off.
 
+	num_wr_reqs_to_start_packing 	This attribute is used to determine
+	the trigger for activating the write packing, in case the write
+	packing control feature is enabled.
+
+	When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+	write requests could be packed, it enables the write packing feature.
+	This allows us to start the write packing only when it is beneficial
+	and has minimum affect on the read latency.
+
+	The number of potential packed requests that will trigger the packing
+	can be configured via sysfs by writing the required value to:
+	/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
+
+	The default value of num_wr_reqs_to_start_packing was determined by
+	running parallel lmdd write and lmdd read operations and calculating
+	the max number of packed writes requests.
+
 SD and MMC Device Attributes
 ============================
 
diff --git a/arch/arm/boot/dts/msm-iommu.dtsi b/arch/arm/boot/dts/msm-iommu.dtsi
index e907de8..2b2a3c0 100755
--- a/arch/arm/boot/dts/msm-iommu.dtsi
+++ b/arch/arm/boot/dts/msm-iommu.dtsi
@@ -18,7 +18,6 @@
 		ranges;
 		reg = <0xfda64000 0x10000>;
 		vdd-supply = <&gdsc_jpeg>;
-		qcom,iommu-smt-size = <16>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fda6c000 {
@@ -50,7 +49,6 @@
 		ranges;
 		reg = <0xfd928000 0x10000>;
 		vdd-supply = <&gdsc_mdss>;
-		qcom,iommu-smt-size = <16>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fd930000 {
@@ -75,7 +73,6 @@
 		ranges;
 		reg = <0xfdc84000 0x10000>;
 		vdd-supply = <&gdsc_venus>;
-		qcom,iommu-smt-size = <16>;
 		qcom,needs-alt-core-clk;
 		status = "disabled";
 
@@ -108,7 +105,6 @@
 		ranges;
 		reg = <0xfdb10000 0x10000>;
 		vdd-supply = <&gdsc_oxili_cx>;
-		qcom,iommu-smt-size = <32>;
 		qcom,needs-alt-core-clk;
 		status = "disabled";
 
@@ -134,7 +130,6 @@
 		ranges;
 		reg = <0xfda44000 0x10000>;
 		vdd-supply = <&gdsc_vfe>;
-		qcom,iommu-smt-size = <32>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fda4c000 {
diff --git a/arch/arm/boot/dts/msm9625-regulator.dtsi b/arch/arm/boot/dts/msm9625-regulator.dtsi
index c42af2c..9184dfe 100644
--- a/arch/arm/boot/dts/msm9625-regulator.dtsi
+++ b/arch/arm/boot/dts/msm9625-regulator.dtsi
@@ -10,154 +10,241 @@
  * GNU General Public License for more details.
  */
 
-&spmi_bus {
-	qcom,pm8019@1 {
-		pm8019_s1: regulator@1400 {
+&rpm_bus {
+	rpm-regulator-smpa1 {
+		status = "okay";
+		pm8019_s1: regulator-s1 {
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <1050000>;
-			qcom,enable-time = <500>;
+			qcom,init-voltage = <1050000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_s2: regulator@1700 {
+	rpm-regulator-smpa2 {
+		status = "okay";
+		qcom,allow-atomic = <1>;
+		pm8019_s2: regulator-s2 {
 			regulator-min-microvolt = <1250000>;
 			regulator-max-microvolt = <1250000>;
+			qcom,init-voltage = <1250000>;
+			qcom,init-current = <100>;
 			qcom,system-load = <100000>;
-			qcom,enable-time = <500>;
 			regulator-always-on;
 			status = "okay";
 		};
+	};
 
-		pm8019_s3: regulator@1a00 {
-			regulator-min-microvolt = <1100000>;
+	rpm-regulator-smpa3 {
+		status = "okay";
+		pm8019_s3: regulator-s3 {
+			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1100000>;
+			qcom,init-voltage = <1100000>;
+			qcom,init-current = <100>;
 			qcom,system-load = <100000>;
-			qcom,enable-time = <500>;
 			regulator-always-on;
 			status = "okay";
 		};
+		pm8019_s3_ao: regulator-s3-ao {
+			compatible = "qcom,rpm-regulator-smd";
+			regulator-name = "8019_s3_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1100000>;
+			status = "okay";
+		};
+	};
 
-		pm8019_s4: regulator@1d00 {
+	rpm-regulator-smpa4 {
+		status = "okay";
+		pm8019_s4: regulator-s4 {
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2075000>;
+			qcom,init-voltage = <2075000>;
+			qcom,init-current = <100>;
 			qcom,system-load = <100000>;
-			qcom,enable-time = <500>;
 			regulator-always-on;
 			status = "okay";
 		};
+	};
 
-		pm8019_l1: regulator@4000 {
+	rpm-regulator-ldoa1 {
+		status = "okay";
+		pm8019_l1: regulator-l1 {
 			parent-supply = <&pm8019_s2>;
 			regulator-min-microvolt = <1225000>;
 			regulator-max-microvolt = <1225000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <1225000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l2: regulator@4100 {
+	rpm-regulator-ldoa2 {
+		status = "okay";
+		pm8019_l2: regulator-l2 {
 			parent-supply = <&pm8019_s4>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <1800000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l3: regulator@4200 {
+	rpm-regulator-ldoa3 {
+		status = "okay";
+		pm8019_l3: regulator-l3 {
 			parent-supply = <&pm8019_s4>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <1800000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l4: regulator@4300 {
+	rpm-regulator-ldoa4 {
+		status = "okay";
+		pm8019_l4: regulator-l4 {
 			regulator-min-microvolt = <3075000>;
 			regulator-max-microvolt = <3075000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <3075000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l5: regulator@4400 {
+	rpm-regulator-ldoa5 {
+		status = "okay";
+		pm8019_l5: regulator-l5 {
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2850000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <1800000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l6: regulator@4500 {
+	rpm-regulator-ldoa6 {
+		status = "okay";
+		pm8019_l6: regulator-l6 {
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2850000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <1800000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l7: regulator@4600 {
+	rpm-regulator-ldoa7 {
+		status = "okay";
+		pm8019_l7: regulator-l7 {
 			parent-supply = <&pm8019_s4>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <1800000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l8: regulator@4700 {
+	rpm-regulator-ldoa8 {
+		status = "okay";
+		pm8019_l8: regulator-l8 {
 			parent-supply = <&pm8019_s4>;
 			regulator-min-microvolt = <2050000>;
 			regulator-max-microvolt = <2050000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <2050000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l9: regulator@4800 {
+	rpm-regulator-ldoa9 {
+		status = "okay";
+		pm8019_l9: regulator-l9 {
 			parent-supply = <&pm8019_s2>;
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
+			qcom,init-voltage = <1200000>;
+			qcom,init-current = <10>;
 			qcom,system-load = <10000>;
-			qcom,enable-time = <200>;
 			regulator-always-on;
 			status = "okay";
 		};
+	};
 
-		pm8019_l10: regulator@4900 {
+	rpm-regulator-ldoa10 {
+		status = "okay";
+		pm8019_l10: regulator-l10 {
 			parent-supply = <&pm8019_s3>;
-			regulator-min-microvolt = <1050000>;
+			regulator-min-microvolt = <500000>;
 			regulator-max-microvolt = <1050000>;
-			qcom,system-load = <10000>;
-			qcom,enable-time = <200>;
-			regulator-always-on;
 			status = "okay";
 		};
+		pm8019_l10_corner: regulator-l10-corner {
+			compatible = "qcom,rpm-regulator-smd";
+			regulator-name = "8019_l10_corner";
+			qcom,set = <3>;
+			regulator-min-microvolt = <1>;
+			regulator-max-microvolt = <7>;
+			qcom,use-voltage-corner;
+			status = "okay";
+		};
+		pm8019_l10_corner_ao: regulator-l10-corner-ao {
+			compatible = "qcom,rpm-regulator-smd";
+			regulator-name = "8019_l10_corner_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt = <1>;
+			regulator-max-microvolt = <7>;
+			qcom,use-voltage-corner;
+			status = "okay";
+		};
+	};
 
-		pm8019_l11: regulator@4a00 {
+	rpm-regulator-ldoa11 {
+		status = "okay";
+		pm8019_l11: regulator-l11 {
 			parent-supply = <&pm8019_s4>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			qcom,init-current = <10>;
 			qcom,system-load = <10000>;
-			qcom,enable-time = <200>;
 			regulator-always-on;
 			status = "okay";
 		};
+	};
 
-		pm8019_l12: regulator@4b00 {
+	rpm-regulator-ldoa12 {
+		status = "okay";
+		pm8019_l12: regulator-l12 {
 			parent-supply = <&pm8019_s3>;
-			regulator-min-microvolt = <1050000>;
+			regulator-min-microvolt = <750000>;
 			regulator-max-microvolt = <1050000>;
-			qcom,system-load = <10000>;
-			qcom,enable-time = <200>;
-			regulator-always-on;
 			status = "okay";
 		};
+		pm8019_l12_ao: regulator-l12-ao {
+			compatible = "qcom,rpm-regulator-smd";
+			regulator-name = "8019_l12_ao";
+			qcom,set = <1>;
+			parent-supply = <&pm8019_s3_ao>;
+			regulator-min-microvolt = <750000>;
+			regulator-max-microvolt = <1050000>;
+			status = "okay";
+		};
+	};
 
-		pm8019_l13: regulator@4c00 {
+	rpm-regulator-ldoa13 {
+		status = "okay";
+		pm8019_l13: regulator-l13 {
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2950000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <2950000>;
 			status = "okay";
 		};
+	};
 
-		pm8019_l14: regulator@4d00 {
+	rpm-regulator-ldoa14 {
+		status = "okay";
+		pm8019_l14: regulator-l14 {
 			regulator-min-microvolt = <2700000>;
 			regulator-max-microvolt = <2700000>;
-			qcom,enable-time = <200>;
+			qcom,init-voltage = <2700000>;
 			status = "okay";
 		};
 	};
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 45d52e4..6a6bfda 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -71,7 +71,8 @@
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 CONFIG_MSM_AVS_HW=y
 # CONFIG_MSM_HW3D is not set
-CONFIG_MSM_PIL_QDSP6V4=y
+CONFIG_MSM_PIL_LPASS_QDSP6V4=y
+CONFIG_MSM_PIL_MODEM_QDSP6V4=y
 CONFIG_MSM_PIL_RIVA=y
 CONFIG_MSM_PIL_TZAPPS=y
 CONFIG_MSM_PIL_DSPS=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 465598f..cf2dd23 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -70,7 +70,8 @@
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 CONFIG_MSM_AVS_HW=y
 # CONFIG_MSM_HW3D is not set
-CONFIG_MSM_PIL_QDSP6V4=y
+CONFIG_MSM_PIL_LPASS_QDSP6V4=y
+CONFIG_MSM_PIL_MODEM_QDSP6V4=y
 CONFIG_MSM_PIL_RIVA=y
 CONFIG_MSM_PIL_TZAPPS=y
 CONFIG_MSM_PIL_DSPS=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 284f5fc..60963c6 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -35,6 +35,9 @@
 CONFIG_CPU_HAS_L2_PMU=y
 # CONFIG_MSM_FIQ_SUPPORT is not set
 # CONFIG_MSM_PROC_COMM is not set
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_RPM_REGULATOR_SMD=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
 CONFIG_MSM_WATCHDOG_V2=y
 CONFIG_NO_HZ=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 2020422..6e841c7 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -355,10 +355,25 @@
 	select MSM_SMP
 	select CPU_V7
 	select MSM_GPIOMUX
+	select MSM_RPM_SMD
 	select MULTI_IRQ_HANDLER
 	select GPIO_MSM_V3
 	select MAY_HAVE_SPARSE_IRQ
 	select SPARSE_IRQ
+
+config ARCH_MSM8910
+	bool "MSM8910"
+	select ARM_GIC
+	select GIC_SECURE
+	select SMP
+	select ARCH_MSM_CORTEXMP
+	select CPU_V7
+	select MSM_SCM if SMP
+	select MAY_HAVE_SPARSE_IRQ
+	select SPARSE_IRQ
+	select MULTI_IRQ_HANDLER
+	select GPIO_MSM_V3
+	select MSM_GPIOMUX
 endmenu
 
 choice
@@ -926,6 +941,7 @@
 	default "0x00000000" if ARCH_MSM8974
 	default "0x00000000" if ARCH_MPQ8092
 	default "0x00000000" if ARCH_MSM8226
+	default "0x00000000" if ARCH_MSM8910
 	default "0x10000000" if ARCH_FSM9XXX
 	default "0x00200000" if ARCH_MSM9625
 	default "0x00200000" if !MSM_STACKED_MEMORY
@@ -1914,13 +1930,25 @@
 	  Support for booting and shutting down QDSP6v3 processors (hexagon).
 	  The QDSP6 is a low power DSP used in audio software applications.
 
-config MSM_PIL_QDSP6V4
-	tristate "QDSP6v4 (Hexagon) Boot Support"
+config MSM_PIL_LPASS_QDSP6V4
+	tristate "LPASS QDSP6v4 (Hexagon) Boot Support"
 	depends on MSM_PIL
 	help
-	  Support for booting and shutting down QDSP6v4 processors (hexagon).
-	  The QDSP6 is a low power DSP used in audio, modem firmware, and modem
-	  software applications.
+	  Support for booting and shutting down QDSP6v4 processors (hexagon)
+	  in low power audio subsystems. If you would like to record or
+	  play audio then say Y here.
+
+	  If unsure, say N.
+
+config MSM_PIL_MODEM_QDSP6V4
+	tristate "Modem QDSP6v4 (Hexagon) Boot Support"
+	depends on MSM_PIL
+	help
+	  Support for booting and shutting down QDSP6v4 processors (hexagon)
+	  in modem subsystems. If you would like to make or receive phone
+	  calls then say Y here.
+
+	  If unsure, say N.
 
 config MSM_PIL_LPASS_QDSP6V5
        tristate "LPASS QDSP6v5 (Hexagon) Boot Support"
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 7dece76..0329896 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -72,7 +72,8 @@
 obj-$(CONFIG_MSM_PIL) += peripheral-loader.o
 obj-$(CONFIG_MSM_PIL) += scm-pas.o
 obj-$(CONFIG_MSM_PIL_QDSP6V3) += pil-q6v3.o
-obj-$(CONFIG_MSM_PIL_QDSP6V4) += pil-q6v4.o
+obj-$(CONFIG_MSM_PIL_LPASS_QDSP6V4) += pil-q6v4.o pil-q6v4-lpass.o
+obj-$(CONFIG_MSM_PIL_MODEM_QDSP6V4) += pil-q6v4.o pil-q6v4-mss.o
 obj-$(CONFIG_MSM_PIL_LPASS_QDSP6V5) += pil-q6v5.o pil-q6v5-lpass.o
 obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-q6v5-mss.o
 obj-$(CONFIG_MSM_PIL_MBA) += pil-mba.o
@@ -348,7 +349,7 @@
 obj-$(CONFIG_ARCH_MSM8226) += gpiomux-v2.o gpiomux.o
 
 obj-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += idle_stats_device.o
-obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_dcvs_idle.o msm_mpdecision.o
+obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_mpdecision.o
 obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
 obj-$(CONFIG_MSM_SHOW_RESUME_IRQ) += msm_show_resume_irq.o
 obj-$(CONFIG_BT_MSM_PINTEST)  += btpintest.o
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
index d10211bc..d1613d9 100644
--- a/arch/arm/mach-msm/acpuclock-8064.c
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -116,7 +116,7 @@
 };
 
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {  384000, PLL_8, 0, 0x00 }, 1050000, 1050000, 1 },
+	[0]  = { {  384000, PLL_8, 0, 0x00 },  950000, 1050000, 1 },
 	[1]  = { {  432000, HFPLL, 2, 0x20 }, 1050000, 1050000, 2 },
 	[2]  = { {  486000, HFPLL, 2, 0x24 }, 1050000, 1050000, 2 },
 	[3]  = { {  540000, HFPLL, 2, 0x28 }, 1050000, 1050000, 2 },
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index c7075bc..fad7092 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -31,13 +31,14 @@
 };
 
 static struct msm_dcvs_core_info grp3d_core_info = {
-	.freq_tbl	= &grp3d_freq[0],
-	.num_cores	= 1,
-	.sensors	= (int[]){0},
-	.core_param	= {
+	.freq_tbl		= &grp3d_freq[0],
+	.num_cores		= 1,
+	.sensors		= (int[]){0},
+	.thermal_poll_ms	= 60000,
+	.core_param		= {
 		.core_type	= MSM_DCVS_CORE_TYPE_GPU,
 	},
-	.algo_param	= {
+	.algo_param		= {
 		.disable_pc_threshold	= 0,
 		.em_win_size_min_us	= 100000,
 		.em_win_size_max_us	= 300000,
@@ -53,8 +54,7 @@
 		.ss_iobusy_conv		= 100,
 	},
 
-
-	.energy_coeffs	= {
+	.energy_coeffs		= {
 		.leakage_coeff_a	= -17720,
 		.leakage_coeff_b	= 37,
 		.leakage_coeff_c	= 3329,
@@ -65,7 +65,7 @@
 		.active_coeff_c		= 0
 	},
 
-	.power_param	= {
+	.power_param		= {
 		.current_temp	= 25,
 		.num_freq	= ARRAY_SIZE(grp3d_freq),
 	}
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index ef3c81d..2bef087 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -121,8 +121,6 @@
 };
 VREG_CONSUMERS(L23) = {
 	REGULATOR_SUPPLY("8921_l23",		NULL),
-	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.1"),
-	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.2"),
 	REGULATOR_SUPPLY("HSUSB_1p8",		"msm_ehci_host.0"),
 	REGULATOR_SUPPLY("HSUSB_1p8",		"msm_ehci_host.1"),
 };
@@ -141,15 +139,13 @@
 };
 VREG_CONSUMERS(L26) = {
 	REGULATOR_SUPPLY("8921_l26",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.0"),
+	REGULATOR_SUPPLY("core_vdd",		"pil-q6v4-lpass"),
 };
 VREG_CONSUMERS(L27) = {
 	REGULATOR_SUPPLY("8921_l27",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.2"),
 };
 VREG_CONSUMERS(L28) = {
 	REGULATOR_SUPPLY("8921_l28",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.1"),
 };
 VREG_CONSUMERS(L29) = {
 	REGULATOR_SUPPLY("8921_l29",		NULL),
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index cc9dcbb..e2dc98e 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -605,6 +605,11 @@
 #endif
 }
 
+static void __init reserve_mpdcvs_memory(void)
+{
+	apq8064_reserve_table[MEMTYPE_EBI1].size += SZ_32K;
+}
+
 static void __init apq8064_calculate_reserve_sizes(void)
 {
 	size_pmem_devices();
@@ -613,6 +618,7 @@
 	reserve_mdp_memory();
 	reserve_rtb_memory();
 	reserve_cache_dump_memory();
+	reserve_mpdcvs_memory();
 }
 
 static struct reserve_info apq8064_reserve_info __initdata = {
@@ -2568,7 +2574,6 @@
 	&msm_pil_vidc,
 	&msm_gss,
 	&apq8064_rtb_device,
-	&apq8064_cpu_idle_device,
 	&apq8064_msm_gov_device,
 	&apq8064_device_cache_erp,
 	&msm8960_device_ebi1_ch0_erp,
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8038.c b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
index 208f15b..16a82b4 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8038.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
@@ -117,7 +117,7 @@
 };
 VREG_CONSUMERS(L16) = {
 	REGULATOR_SUPPLY("8038_l16",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.2"),
+	REGULATOR_SUPPLY("sw_core_vdd",		"pil-q6v4-modem"),
 };
 VREG_CONSUMERS(L17) = {
 	REGULATOR_SUPPLY("8038_l17",		NULL),
@@ -127,7 +127,7 @@
 };
 VREG_CONSUMERS(L19) = {
 	REGULATOR_SUPPLY("8038_l19",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.1"),
+	REGULATOR_SUPPLY("fw_core_vdd",		"pil-q6v4-modem"),
 };
 VREG_CONSUMERS(L20) = {
 	REGULATOR_SUPPLY("8038_l20",		NULL),
@@ -151,8 +151,7 @@
 	REGULATOR_SUPPLY("hdmi_avdd",		"hdmi_msm.0"),
 	REGULATOR_SUPPLY("hdmi_vcc",		"hdmi_msm.0"),
 	REGULATOR_SUPPLY("pll_vdd",		"pil_riva"),
-	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.1"),
-	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.2"),
+	REGULATOR_SUPPLY("pll_vdd",		"pil-q6v4-modem"),
 };
 VREG_CONSUMERS(L24) = {
 	REGULATOR_SUPPLY("8038_l24",		NULL),
@@ -166,7 +165,7 @@
 };
 VREG_CONSUMERS(L27) = {
 	REGULATOR_SUPPLY("8038_l27",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.0"),
+	REGULATOR_SUPPLY("core_vdd",		"pil-q6v4-lpass"),
 };
 VREG_CONSUMERS(S1) = {
 	REGULATOR_SUPPLY("8038_s1",		NULL),
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8917.c b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
index db40e5d..b7f554c 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8917.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
@@ -547,7 +547,7 @@
 	RPM_SMPS(S2, 0, 1, 0, 1300000, 1300000, NULL,      0, 1p60, NONE, NONE),
 	RPM_SMPS(S3, 0, 1, 1,  500000, 1150000, NULL, 100000, 4p80, AUTO, LPM),
 	RPM_SMPS(S4, 1, 1, 0, 1800000, 1800000, NULL, 100000, 1p60, AUTO, LPM),
-	RPM_SMPS(S7, 0, 1, 0, 1150000, 1150000, NULL, 100000, 3p20, NONE, NONE),
+	RPM_SMPS(S7, 0, 1, 0, 1150000, 1150000, NULL, 100000, 3p20, AUTO, AUTO),
 	RPM_SMPS(S8, 1, 1, 1, 2050000, 2050000, NULL, 100000, 1p60, NONE, NONE),
 
 	/*	ID     a_on pd ss min_uV   max_uV  supply  sys_uA init_ip */
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index a6a90a7..fc4b819 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -2347,8 +2347,7 @@
 
 static struct platform_device *common_devices[] __initdata = {
 	&msm_8960_q6_lpass,
-	&msm_8960_q6_mss_fw,
-	&msm_8960_q6_mss_sw,
+	&msm_8960_q6_mss,
 	&msm_8960_riva,
 	&msm_pil_tzapps,
 	&msm_pil_vidc,
@@ -2413,7 +2412,6 @@
 	&gpio_keys_8930,
 #endif
 	&msm8930_rtb_device,
-	&msm8930_cpu_idle_device,
 	&msm_bus_8930_apps_fabric,
 	&msm_bus_8930_sys_fabric,
 	&msm_bus_8930_mm_fabric,
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index 2fa98b6..a6c0bc7 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -120,8 +120,7 @@
 	REGULATOR_SUPPLY("dsi_pll_vddio",	"mdp.0"),
 	REGULATOR_SUPPLY("hdmi_avdd",		"hdmi_msm.0"),
 	REGULATOR_SUPPLY("pll_vdd",		"pil_riva"),
-	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.1"),
-	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.2"),
+	REGULATOR_SUPPLY("pll_vdd",		"pil-q6v4-modem"),
 };
 VREG_CONSUMERS(L24) = {
 	REGULATOR_SUPPLY("8921_l24",		NULL),
@@ -136,15 +135,15 @@
 };
 VREG_CONSUMERS(L26) = {
 	REGULATOR_SUPPLY("8921_l26",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.0"),
+	REGULATOR_SUPPLY("core_vdd",		"pil-q6v4-lpass"),
 };
 VREG_CONSUMERS(L27) = {
 	REGULATOR_SUPPLY("8921_l27",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.2"),
+	REGULATOR_SUPPLY("sw_core_vdd",		"pil-q6v4-modem"),
 };
 VREG_CONSUMERS(L28) = {
 	REGULATOR_SUPPLY("8921_l28",		NULL),
-	REGULATOR_SUPPLY("core_vdd",		"pil_qdsp6v4.1"),
+	REGULATOR_SUPPLY("fw_core_vdd",		"pil-q6v4-modem"),
 };
 VREG_CONSUMERS(L29) = {
 	REGULATOR_SUPPLY("8921_l29",		NULL),
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 7115e40..7833225 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2762,7 +2762,6 @@
 	&msm_device_dspcrashd_8960,
 	&msm8960_device_watchdog,
 	&msm8960_rtb_device,
-	&msm8960_cpu_idle_device,
 	&msm8960_device_cache_erp,
 	&msm8960_device_ebi1_ch0_erp,
 	&msm8960_device_ebi1_ch1_erp,
@@ -3281,10 +3280,8 @@
 
 	msm8960_pm8921_gpio_mpp_init();
 	/* Don't add modem devices on APQ targets */
-	if (socinfo_get_id() != 124) {
-		platform_device_register(&msm_8960_q6_mss_fw);
-		platform_device_register(&msm_8960_q6_mss_sw);
-	}
+	if (socinfo_get_id() != 124)
+		platform_device_register(&msm_8960_q6_mss);
 	platform_add_devices(cdp_devices, ARRAY_SIZE(cdp_devices));
 	msm8960_init_smsc_hub();
 	msm8960_init_hsic();
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 37e93b6..ca9bdaa 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -32,7 +32,12 @@
 #include <mach/clk-provider.h>
 #include <mach/qpnp-int.h>
 #include <mach/msm_memtypes.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
 #include "clock.h"
+#include "modem_notifier.h"
 
 #define MSM_KERNEL_EBI_SIZE	0x51000
 
@@ -134,6 +139,153 @@
 	msm_reserve();
 }
 
+static struct resource smd_resource[] = {
+	{
+		.name   = "modem_smd_in",
+		.start  = 32 + 25,              /* mss_sw_to_kpss_ipc_irq0  */
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.name   = "modem_smsm_in",
+		.start  = 32 + 26,              /* mss_sw_to_kpss_ipc_irq1  */
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.name   = "adsp_smd_in",
+		.start  = 32 + 156,             /* lpass_to_kpss_ipc_irq0  */
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.name   = "adsp_smsm_in",
+		.start  = 32 + 157,             /* lpass_to_kpss_ipc_irq1  */
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.name   = "rpm_smd_in",
+		.start  = 32 + 168,             /* rpm_to_kpss_ipc_irq4  */
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct smd_subsystem_config smd_config_list[] = {
+	{
+		.irq_config_id = SMD_MODEM,
+		.subsys_name = "modem",
+		.edge = SMD_APPS_MODEM,
+
+		.smd_int.irq_name = "modem_smd_in",
+		.smd_int.flags = IRQF_TRIGGER_RISING,
+		.smd_int.irq_id = -1,
+		.smd_int.device_name = "smd_dev",
+		.smd_int.dev_id = 0,
+		.smd_int.out_bit_pos = 1 << 12,
+		.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
+		.smd_int.out_offset = 0x8,
+
+		.smsm_int.irq_name = "modem_smsm_in",
+		.smsm_int.flags = IRQF_TRIGGER_RISING,
+		.smsm_int.irq_id = -1,
+		.smsm_int.device_name = "smsm_dev",
+		.smsm_int.dev_id = 0,
+		.smsm_int.out_bit_pos = 1 << 13,
+		.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
+		.smsm_int.out_offset = 0x8,
+	},
+	{
+		.irq_config_id = SMD_Q6,
+		.subsys_name = "adsp",
+		.edge = SMD_APPS_QDSP,
+
+		.smd_int.irq_name = "adsp_smd_in",
+		.smd_int.flags = IRQF_TRIGGER_RISING,
+		.smd_int.irq_id = -1,
+		.smd_int.device_name = "smd_dev",
+		.smd_int.dev_id = 0,
+		.smd_int.out_bit_pos = 1 << 8,
+		.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
+		.smd_int.out_offset = 0x8,
+
+		.smsm_int.irq_name = "adsp_smsm_in",
+		.smsm_int.flags = IRQF_TRIGGER_RISING,
+		.smsm_int.irq_id = -1,
+		.smsm_int.device_name = "smsm_dev",
+		.smsm_int.dev_id = 0,
+		.smsm_int.out_bit_pos = 1 << 9,
+		.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
+		.smsm_int.out_offset = 0x8,
+	},
+	{
+		.irq_config_id = SMD_RPM,
+		.subsys_name = NULL, /* do not use PIL to load RPM */
+		.edge = SMD_APPS_RPM,
+
+		.smd_int.irq_name = "rpm_smd_in",
+		.smd_int.flags = IRQF_TRIGGER_RISING,
+		.smd_int.irq_id = -1,
+		.smd_int.device_name = "smd_dev",
+		.smd_int.dev_id = 0,
+		.smd_int.out_bit_pos = 1 << 0,
+		.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
+		.smd_int.out_offset = 0x8,
+
+		.smsm_int.irq_name = NULL, /* RPM does not support SMSM */
+		.smsm_int.flags = 0,
+		.smsm_int.irq_id = 0,
+		.smsm_int.device_name = NULL,
+		.smsm_int.dev_id = 0,
+		.smsm_int.out_bit_pos = 0,
+		.smsm_int.out_base = NULL,
+		.smsm_int.out_offset = 0,
+	},
+};
+
+static struct smd_smem_regions aux_smem_areas[] = {
+	{
+		.phys_addr = (void *)(0xfc428000),
+		.size = 0x4000,
+	},
+};
+
+static struct smd_subsystem_restart_config smd_ssr_cfg = {
+	.disable_smsm_reset_handshake = 1,
+};
+
+static struct smd_platform smd_platform_data = {
+	.num_ss_configs = ARRAY_SIZE(smd_config_list),
+	.smd_ss_configs = smd_config_list,
+	.smd_ssr_config = &smd_ssr_cfg,
+	.num_smem_areas = ARRAY_SIZE(aux_smem_areas),
+	.smd_smem_areas = aux_smem_areas,
+};
+
+struct platform_device msm_device_smd_9625 = {
+	.name   = "msm_smd",
+	.id     = -1,
+	.resource = smd_resource,
+	.num_resources = ARRAY_SIZE(smd_resource),
+	.dev = {
+		.platform_data = &smd_platform_data,
+	}
+};
+
+void __init msm9625_add_devices(void)
+{
+	platform_device_register(&msm_device_smd_9625);
+}
+
+/*
+ * Used to satisfy dependencies for devices that need to be
+ * run early or in a particular order. Most likely your device doesn't fall
+ * into this category, and thus the driver should not be added here.
+ * EPROBE_DEFER can satisfy most dependency problems.
+ */
+void __init msm9625_add_drivers(void)
+{
+	msm_init_modem_notifier_list();
+	msm_smd_init();
+	msm_rpm_driver_init();
+	rpm_regulator_smd_driver_init();
+}
 
 void __init msm9625_init(void)
 {
@@ -144,6 +296,8 @@
 	msm_clock_init(&msm_dummy_clock_init_data);
 	of_platform_populate(NULL, of_default_bus_match_table,
 			msm9625_auxdata_lookup, NULL);
+	msm9625_add_devices();
+	msm9625_add_drivers();
 }
 
 DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 2cd2cd4..3a72be3 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -5216,9 +5216,7 @@
 	CLK_LOOKUP("pwm_clk",		cxo_clk.c,	"0-0048"),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"wcnss_wlan.0"),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"pil_riva"),
-	CLK_LOOKUP("xo",		pxo_clk.c,	"pil_qdsp6v4.0"),
-	CLK_LOOKUP("xo",		cxo_clk.c,	"pil_qdsp6v4.1"),
-	CLK_LOOKUP("xo",		cxo_clk.c,	"pil_qdsp6v4.2"),
+	CLK_LOOKUP("xo",		pxo_clk.c,	"pil-q6v4-lpass"),
 	CLK_LOOKUP("xo",		cxo_clk.c,	"pil_gss"),
 	CLK_LOOKUP("xo",		cxo_clk.c,	"BAM_RMNT"),
 	CLK_LOOKUP("xo",		cxo_clk.c,	"msm_xo"),
@@ -5562,9 +5560,8 @@
 	CLK_LOOKUP("xo",		pxo_a_clk.c,	""),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"wcnss_wlan.0"),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"pil_riva"),
-	CLK_LOOKUP("xo",		pxo_clk.c,	"pil_qdsp6v4.0"),
-	CLK_LOOKUP("xo",		cxo_clk.c,	"pil_qdsp6v4.1"),
-	CLK_LOOKUP("xo",		cxo_clk.c,	"pil_qdsp6v4.2"),
+	CLK_LOOKUP("xo",		pxo_clk.c,	"pil-q6v4-lpass"),
+	CLK_LOOKUP("xo",		cxo_clk.c,	"pil-q6v4-modem"),
 	CLK_LOOKUP("xo",		cxo_clk.c,	"BAM_RMNT"),
 	CLK_LOOKUP("xo",		cxo_clk.c,	"msm_xo"),
 	CLK_LOOKUP("vref_buff",		cxo_clk.c,	"rpm-regulator"),
@@ -5915,9 +5912,8 @@
 	CLK_LOOKUP("xo",		cxo_clk.c,	"msm_xo"),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"wcnss_wlan.0"),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"pil_riva"),
-	CLK_LOOKUP("xo",		pxo_clk.c,	"pil_qdsp6v4.0"),
-	CLK_LOOKUP("xo",		cxo_clk.c,	"pil_qdsp6v4.1"),
-	CLK_LOOKUP("xo",		cxo_clk.c,	"pil_qdsp6v4.2"),
+	CLK_LOOKUP("xo",		pxo_clk.c,	"pil-q6v4-lpass"),
+	CLK_LOOKUP("xo",		cxo_clk.c,	"pil-q6v4-modem"),
 	CLK_LOOKUP("xo",		cxo_clk.c,	"BAM_RMNT"),
 	CLK_LOOKUP("vref_buff",		cxo_clk.c,	"rpm-regulator"),
 	CLK_LOOKUP("pll2",		pll2_clk.c,	NULL),
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 3cd4b2f..6e7283b 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -2649,15 +2649,6 @@
 	.num_resources	= ARRAY_SIZE(i2s_mdm_resources),
 	.resource	= i2s_mdm_resources,
 };
-static int apq8064_LPM_latency = 1000; /* >100 usec for WFI */
-
-struct platform_device apq8064_cpu_idle_device = {
-	.name   = "msm_cpu_idle",
-	.id     = -1,
-	.dev = {
-		.platform_data = &apq8064_LPM_latency,
-	},
-};
 
 static struct msm_dcvs_freq_entry apq8064_freq[] = {
 	{ 384000, 900,  0, 0, 0},
@@ -2670,13 +2661,14 @@
 };
 
 static struct msm_dcvs_core_info apq8064_core_info = {
-	.freq_tbl	= &apq8064_freq[0],
-	.num_cores	= 4,
-	.sensors	= (int[]){7, 8, 9, 10},
-	.core_param	= {
+	.freq_tbl		= &apq8064_freq[0],
+	.num_cores		= 4,
+	.sensors		= (int[]){7, 8, 9, 10},
+	.thermal_poll_ms	= 60000,
+	.core_param		= {
 		.core_type	= MSM_DCVS_CORE_TYPE_CPU,
 	},
-	.algo_param	= {
+	.algo_param		= {
 		.disable_pc_threshold		= 1458000,
 		.em_win_size_min_us		= 100000,
 		.em_win_size_max_us		= 300000,
@@ -2692,7 +2684,7 @@
 		.ss_win_size_max_us		= 1000000,
 		.ss_util_pct			= 95,
 	},
-	.energy_coeffs	= {
+	.energy_coeffs		= {
 		.active_coeff_a		= 336,
 		.active_coeff_b		= 0,
 		.active_coeff_c		= 0,
@@ -2702,17 +2694,24 @@
 		.leakage_coeff_c	= 3329,
 		.leakage_coeff_d	= -277,
 	},
-	.power_param	= {
+	.power_param		= {
 		.current_temp	= 25,
 		.num_freq	= ARRAY_SIZE(apq8064_freq),
 	}
 };
 
+#define APQ8064_LPM_LATENCY  1000 /* >100 usec for WFI */
+
+static struct msm_gov_platform_data gov_platform_data = {
+	.info = &apq8064_core_info,
+	.latency = APQ8064_LPM_LATENCY,
+};
+
 struct platform_device apq8064_msm_gov_device = {
 	.name = "msm_dcvs_gov",
 	.id = -1,
 	.dev = {
-		.platform_data = &apq8064_core_info,
+		.platform_data = &gov_platform_data,
 	},
 };
 
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index d062ff4..ef3b950 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -583,16 +583,6 @@
 	.resource = &msm_rpm_rbcpr_resource,
 };
 
-static int msm8930_LPM_latency = 1000; /* >100 usec for WFI */
-
-struct platform_device msm8930_cpu_idle_device = {
-	.name   = "msm_cpu_idle",
-	.id     = -1,
-	.dev = {
-		.platform_data = &msm8930_LPM_latency,
-	},
-};
-
 struct platform_device msm_bus_8930_sys_fabric = {
 	.name  = "msm_bus_fabric",
 	.id    =  MSM_BUS_FAB_SYSTEM,
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 72da3d8..0b167e5 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -1334,13 +1334,10 @@
 	},
 };
 
-#define MSM_LPASS_QDSP6SS_PHYS	0x28800000
-#define SFAB_LPASS_Q6_ACLK_CTL	(MSM_CLK_CTL_BASE + 0x23A0)
-
 static struct resource msm_8960_q6_lpass_resources[] = {
 	{
-		.start  = MSM_LPASS_QDSP6SS_PHYS,
-		.end    = MSM_LPASS_QDSP6SS_PHYS + SZ_256 - 1,
+		.start  = 0x28800000,
+		.end    = 0x28800000 + SZ_256 - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 };
@@ -1349,93 +1346,69 @@
 	.strap_tcm_base  = 0x01460000,
 	.strap_ahb_upper = 0x00290000,
 	.strap_ahb_lower = 0x00000280,
-	.aclk_reg = SFAB_LPASS_Q6_ACLK_CTL,
+	.aclk_reg = MSM_CLK_CTL_BASE + 0x23A0,
 	.name = "q6",
 	.pas_id = PAS_Q6,
 	.bus_port = MSM_BUS_MASTER_LPASS_PROC,
 };
 
 struct platform_device msm_8960_q6_lpass = {
-	.name = "pil_qdsp6v4",
-	.id = 0,
+	.name = "pil-q6v4-lpass",
+	.id = -1,
 	.num_resources  = ARRAY_SIZE(msm_8960_q6_lpass_resources),
 	.resource       = msm_8960_q6_lpass_resources,
 	.dev.platform_data = &msm_8960_q6_lpass_data,
 };
 
-#define MSM_MSS_ENABLE_PHYS	0x08B00000
-#define MSM_FW_QDSP6SS_PHYS	0x08800000
-#define MSS_Q6FW_JTAG_CLK_CTL	(MSM_CLK_CTL_BASE + 0x2C6C)
-#define SFAB_MSS_Q6_FW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2044)
-
-static struct resource msm_8960_q6_mss_fw_resources[] = {
+static struct resource msm_8960_q6_mss_resources[] = {
 	{
-		.start  = MSM_FW_QDSP6SS_PHYS,
-		.end    = MSM_FW_QDSP6SS_PHYS + SZ_256 - 1,
+		.start  = 0x08800000,
+		.end    = 0x08800000 + SZ_256 - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 	{
-		.start  = MSM_MSS_ENABLE_PHYS,
-		.end    = MSM_MSS_ENABLE_PHYS + 4 - 1,
+		.start  = 0x08B00000,
+		.end    = 0x08B00000 + SZ_256 - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.start  = 0x08900000,
+		.end    = 0x08900000 + SZ_256 - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 };
 
-static struct pil_q6v4_pdata msm_8960_q6_mss_fw_data = {
-	.strap_tcm_base  = 0x00400000,
-	.strap_ahb_upper = 0x00090000,
-	.strap_ahb_lower = 0x00000080,
-	.aclk_reg = SFAB_MSS_Q6_FW_ACLK_CTL,
-	.jtag_clk_reg = MSS_Q6FW_JTAG_CLK_CTL,
-	.name = "modem_fw",
-	.depends = "q6",
-	.pas_id = PAS_MODEM_FW,
-	.bus_port = MSM_BUS_MASTER_MSS_FW_PROC,
-};
-
-struct platform_device msm_8960_q6_mss_fw = {
-	.name = "pil_qdsp6v4",
-	.id = 1,
-	.num_resources  = ARRAY_SIZE(msm_8960_q6_mss_fw_resources),
-	.resource       = msm_8960_q6_mss_fw_resources,
-	.dev.platform_data = &msm_8960_q6_mss_fw_data,
-};
-
-#define MSM_SW_QDSP6SS_PHYS	0x08900000
-#define SFAB_MSS_Q6_SW_ACLK_CTL	(MSM_CLK_CTL_BASE + 0x2040)
-#define MSS_Q6SW_JTAG_CLK_CTL	(MSM_CLK_CTL_BASE + 0x2C68)
-
-static struct resource msm_8960_q6_mss_sw_resources[] = {
+static struct pil_q6v4_pdata msm_8960_q6_mss_data[2] = {
 	{
-		.start  = MSM_SW_QDSP6SS_PHYS,
-		.end    = MSM_SW_QDSP6SS_PHYS + SZ_256 - 1,
-		.flags  = IORESOURCE_MEM,
+		.strap_tcm_base  = 0x00400000,
+		.strap_ahb_upper = 0x00090000,
+		.strap_ahb_lower = 0x00000080,
+		.aclk_reg = MSM_CLK_CTL_BASE + 0x2C6C,
+		.jtag_clk_reg = MSM_CLK_CTL_BASE + 0x2044,
+		.name = "modem_fw",
+		.depends = "q6",
+		.pas_id = PAS_MODEM_FW,
+		.bus_port = MSM_BUS_MASTER_MSS_FW_PROC,
 	},
 	{
-		.start  = MSM_MSS_ENABLE_PHYS,
-		.end    = MSM_MSS_ENABLE_PHYS + 4 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+		.strap_tcm_base  = 0x00420000,
+		.strap_ahb_upper = 0x00090000,
+		.strap_ahb_lower = 0x00000080,
+		.aclk_reg = MSM_CLK_CTL_BASE + 0x2040,
+		.jtag_clk_reg = MSM_CLK_CTL_BASE + 0x2C68,
+		.name = "modem",
+		.depends = "modem_fw",
+		.pas_id = PAS_MODEM_SW,
+		.bus_port = MSM_BUS_MASTER_MSS_SW_PROC,
+	}
 };
 
-static struct pil_q6v4_pdata msm_8960_q6_mss_sw_data = {
-	.strap_tcm_base  = 0x00420000,
-	.strap_ahb_upper = 0x00090000,
-	.strap_ahb_lower = 0x00000080,
-	.aclk_reg = SFAB_MSS_Q6_SW_ACLK_CTL,
-	.jtag_clk_reg = MSS_Q6SW_JTAG_CLK_CTL,
-	.name = "modem",
-	.depends = "modem_fw",
-	.pas_id = PAS_MODEM_SW,
-	.bus_port = MSM_BUS_MASTER_MSS_SW_PROC,
-};
-
-struct platform_device msm_8960_q6_mss_sw = {
-	.name = "pil_qdsp6v4",
-	.id = 2,
-	.num_resources  = ARRAY_SIZE(msm_8960_q6_mss_sw_resources),
-	.resource       = msm_8960_q6_mss_sw_resources,
-	.dev.platform_data = &msm_8960_q6_mss_sw_data,
+struct platform_device msm_8960_q6_mss = {
+	.name = "pil-q6v4-modem",
+	.id = -1,
+	.num_resources  = ARRAY_SIZE(msm_8960_q6_mss_resources),
+	.resource       = msm_8960_q6_mss_resources,
+	.dev.platform_data = msm_8960_q6_mss_data,
 };
 
 static struct resource msm_8960_riva_resources[] = {
@@ -4071,16 +4044,6 @@
 	.resource	= msm_ebi1_ch1_erp_resources,
 };
 
-static int msm8960_LPM_latency = 1000; /* >100 usec for WFI */
-
-struct platform_device msm8960_cpu_idle_device = {
-	.name   = "msm_cpu_idle",
-	.id     = -1,
-	.dev = {
-		.platform_data = &msm8960_LPM_latency,
-	},
-};
-
 static struct resource msm_cache_erp_resources[] = {
 	{
 		.name = "l1_irq",
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 6f3dda3..5b291a7 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -251,8 +251,7 @@
 extern struct platform_device msm_pil_dsps;
 extern struct platform_device msm_pil_vidc;
 extern struct platform_device msm_8960_q6_lpass;
-extern struct platform_device msm_8960_q6_mss_fw;
-extern struct platform_device msm_8960_q6_mss_sw;
+extern struct platform_device msm_8960_q6_mss;
 extern struct platform_device msm_8960_riva;
 extern struct platform_device msm_gss;
 
@@ -394,10 +393,6 @@
 extern struct platform_device *msm_8974_stub_regulator_devices[];
 extern int msm_8974_stub_regulator_devices_len;
 
-extern struct platform_device msm8960_cpu_idle_device;
-extern struct platform_device msm8930_cpu_idle_device;
-extern struct platform_device apq8064_cpu_idle_device;
-
 extern struct platform_device apq8064_msm_gov_device;
 
 extern struct platform_device msm_bus_8930_apps_fabric;
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index f63af64..41a59af6 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -88,7 +88,6 @@
  * @aclk:	Alternate clock for this IOMMU core, if any
  * @name:	Human-readable name of this IOMMU device
  * @gdsc:	Regulator needed to power this HW block (v2 only)
- * @nsmr:	Size of the SMT on this HW block (v2 only)
  * @bfb_settings: Optional BFB performance tuning parameters
  *
  * A msm_iommu_drvdata holds the global driver data about a single piece
@@ -103,7 +102,6 @@
 	struct clk *aclk;
 	const char *name;
 	struct regulator *gdsc;
-	unsigned int nsmr;
 	struct msm_iommu_bfb_settings *bfb_settings;
 };
 
diff --git a/arch/arm/mach-msm/include/mach/irqs-8910.h b/arch/arm/mach-msm/include/mach/irqs-8910.h
new file mode 100644
index 0000000..22fdc16
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-8910.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_8910_H
+#define __ASM_ARCH_MSM_IRQS_8910_H
+
+/* MSM ACPU Interrupt Numbers */
+
+/*
+ * 0-15:  STI/SGI (software triggered/generated interrupts)
+ * 16-31: PPI (private peripheral interrupts)
+ * 32+:   SPI (shared peripheral interrupts)
+ */
+
+#define GIC_PPI_START 16
+#define GIC_SPI_START 32
+
+#define INT_ARMQC_PERFMON			(GIC_PPI_START + 10)
+
+#define APCC_QGICL2PERFMONIRPTREQ	(GIC_SPI_START + 1)
+#define SC_SICL2PERFMONIRPTREQ		APCC_QGICL2PERFMONIRPTREQ
+#define TLMM_MSM_SUMMARY_IRQ		(GIC_SPI_START + 208)
+
+#define NR_MSM_IRQS 256
+#define NR_GPIO_IRQS 146
+#define NR_QPNP_IRQS 32768 /* SPARSE_IRQ is required to support this */
+#define NR_BOARD_IRQS NR_QPNP_IRQS
+#define NR_TLMM_MSM_DIR_CONN_IRQ 8
+#define NR_MSM_GPIOS NR_GPIO_IRQS
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index f562c40..7aff770 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -60,6 +60,8 @@
 
 #if defined(CONFIG_ARCH_MSM8974)
 #include "irqs-8974.h"
+#elif defined(CONFIG_ARCH_MSM8910)
+#include "irqs-8910.h"
 #elif defined(CONFIG_ARCH_MPQ8092)
 #include "irqs-8092.h"
 #elif defined(CONFIG_ARCH_MSM9615)
diff --git a/arch/arm/mach-msm/include/mach/msm_dcvs.h b/arch/arm/mach-msm/include/mach/msm_dcvs.h
index e8908e8..e81cee4 100644
--- a/arch/arm/mach-msm/include/mach/msm_dcvs.h
+++ b/arch/arm/mach-msm/include/mach/msm_dcvs.h
@@ -19,6 +19,10 @@
 #define CORES_MAX (10)
 
 #define CPU_OFFSET	1  /* used to notify TZ the core number */
+#define GPU_OFFSET (CORES_MAX * 2/3)  /* there will be more cpus than gpus,
+				     * let the GPU be assigned fewer core
+				     * elements and start later
+				     */
 
 enum msm_core_idle_state {
 	MSM_DCVS_IDLE_ENTER,
@@ -32,43 +36,14 @@
 	MSM_DCVS_DISABLE_HIGH_LATENCY_MODES,
 };
 
-/**
- * struct msm_dcvs_idle
- *
- * API for idle code to register and send idle enter/exit
- * notifications to msm_dcvs driver.
- */
-struct msm_dcvs_idle {
-	const char *core_name;
-	/* Enable/Disable idle state/notifications */
-	int (*enable)(struct msm_dcvs_idle *self,
-			enum msm_core_control_event event);
+struct msm_gov_platform_data {
+	struct msm_dcvs_core_info *info;
+	int latency;
 };
 
 /**
- * msm_dcvs_idle_source_register
- * @drv: Pointer to the source driver
- * @return: Handle to be used for sending idle state notifications.
- *
- * Register the idle driver with the msm_dcvs driver to send idle
- * state notifications for the core.
- */
-extern int msm_dcvs_idle_source_register(struct msm_dcvs_idle *drv);
-
-/**
- * msm_dcvs_idle_source_unregister
- * @drv: Pointer to the source driver
- * @return:
- *	0 on success
- *	-EINVAL
- *
- * Description: Unregister the idle driver with the msm_dcvs driver
- */
-extern int msm_dcvs_idle_source_unregister(struct msm_dcvs_idle *drv);
-
-/**
  * msm_dcvs_idle
- * @handle: Handle provided back at registration
+ * @dcvs_core_id: The id returned by msm_dcvs_register_core
  * @state: The enter/exit idle state the core is in
  * @iowaited: iowait in us
  * on iMSM_DCVS_IDLE_EXIT.
@@ -80,7 +55,7 @@
  *
  * Send idle state notifications to the msm_dcvs driver
  */
-int msm_dcvs_idle(int handle, enum msm_core_idle_state state,
+int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state,
 		uint32_t iowaited);
 
 /**
@@ -92,6 +67,7 @@
 struct msm_dcvs_core_info {
 	int					num_cores;
 	int					*sensors;
+	int					thermal_poll_ms;
 	struct msm_dcvs_freq_entry		*freq_tbl;
 	struct msm_dcvs_core_param		core_param;
 	struct msm_dcvs_algo_param		algo_param;
@@ -101,7 +77,8 @@
 
 /**
  * msm_dcvs_register_core
- * @core_name: Unique name identifier for the core.
+ * @type: whether this is a CPU or a GPU
+ * @type_core_num: The number of the core for a type
  * @info: The core specific algorithm parameters.
  * @sensor: The thermal sensor number of the core in question
  * @return :
@@ -113,35 +90,28 @@
  * msm_dcvs_freq_sink_register
  * Cores that need to run synchronously must share the same group id.
  */
-extern int msm_dcvs_register_core(const char *core_name,
-		struct msm_dcvs_core_info *info, int sensor);
+extern int msm_dcvs_register_core(
+	enum msm_dcvs_core_type type,
+	int type_core_num,
+	struct msm_dcvs_core_info *info,
+	int (*set_frequency)(int type_core_num, unsigned int freq),
+	unsigned int (*get_frequency)(int type_core_num),
+	int (*idle_enable)(int type_core_num,
+				enum msm_core_control_event event),
+	int sensor);
 
 /**
- * struct msm_dcvs_freq
- *
- * API for clock driver code to register and receive frequency change
- * request for the core from the msm_dcvs driver.
- */
-struct msm_dcvs_freq {
-	const char *core_name;
-	/* Callback from msm_dcvs to set the core frequency */
-	int (*set_frequency)(struct msm_dcvs_freq *self,
-			unsigned int freq);
-	unsigned int (*get_frequency)(struct msm_dcvs_freq *self);
-};
-
-/**
- * msm_dcvs_freq_sink_register
+ * msm_dcvs_freq_sink_start
  * @drv: The sink driver
  * @return: Handle unique to the core.
  *
  * Register the clock driver code with the msm_dvs driver to get notified about
  * frequency change requests.
  */
-extern int msm_dcvs_freq_sink_register(struct msm_dcvs_freq *drv);
+extern int msm_dcvs_freq_sink_start(int dcvs_core_id);
 
 /**
- * msm_dcvs_freq_sink_unregister
+ * msm_dcvs_freq_sink_stop
  * @drv: The sink driver
  * @return:
  *	0 on success,
@@ -150,6 +120,13 @@
  * Unregister the sink driver for the core. This will cause the source driver
  * for the core to stop sending idle pulses.
  */
-extern int msm_dcvs_freq_sink_unregister(struct msm_dcvs_freq *drv);
+extern int msm_dcvs_freq_sink_stop(int dcvs_core_id);
 
+/**
+ * msm_dcvs_update_limits
+ * @drv: The sink driver
+ *
+ * Update the frequency known to dcvs when the limits are changed.
+ */
+extern void msm_dcvs_update_limits(int dcvs_core_id);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8910.h b/arch/arm/mach-msm/include/mach/msm_iomap-8910.h
new file mode 100644
index 0000000..e4cd312
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8910.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_IOMAP_8910_H
+#define __ASM_ARCH_MSM_IOMAP_8910_H
+
+/* Physical base address and size of peripherals.
+ * Ordered by the virtual base addresses they will be mapped at.
+ *
+ * If you add or remove entries here, you'll want to edit the
+ * io desc array in arch/arm/mach-msm/io.c to reflect your
+ * changes.
+ *
+ */
+
+#define MSM8910_MSM_SHARED_RAM_PHYS	0x0FA00000
+
+#define MSM8910_APCS_GCC_PHYS	0xF9011000
+#define MSM8910_APCS_GCC_SIZE	SZ_4K
+
+#define MSM8910_TLMM_PHYS	0xFD510000
+#define MSM8910_TLMM_SIZE	SZ_16K
+
+#define MSM8910_IMEM_PHYS	0xFC42B000
+#define MSM8910_IMEM_SIZE	SZ_4K
+
+#ifdef CONFIG_DEBUG_MSM8910_UART
+#define MSM_DEBUG_UART_BASE	IOMEM(0xFA71E000)
+#define MSM_DEBUG_UART_PHYS	0xF991E000
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 8dbd29c..f372b1e 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -54,7 +54,7 @@
 	defined(CONFIG_ARCH_MSM7X25) || defined(CONFIG_ARCH_MSM7X01A) || \
 	defined(CONFIG_ARCH_MSM8625) || defined(CONFIG_ARCH_MSM7X30) || \
 	defined(CONFIG_ARCH_MSM9625) || defined(CONFIG_ARCH_MPQ8092) || \
-	defined(CONFIG_ARCH_MSM8226)
+	defined(CONFIG_ARCH_MSM8226) || defined(CONFIG_ARCH_MSM8910)
 
 /* Unified iomap */
 
@@ -122,6 +122,7 @@
 #include "msm_iomap-9625.h"
 #include "msm_iomap-8092.h"
 #include "msm_iomap-8226.h"
+#include "msm_iomap-8910.h"
 
 #else
 /* Legacy single-target iomap */
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 225440c..86045b9 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -54,6 +54,12 @@
 	of_machine_is_compatible("qcom,msm8226")
 #define machine_is_msm8226_sim()		\
 	of_machine_is_compatible("qcom,msm8226-sim")
+#define early_machine_is_msm8910()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8910")
+#define machine_is_msm8910()		\
+	of_machine_is_compatible("qcom,msm8910")
+#define machine_is_msm8910_sim()		\
+	of_machine_is_compatible("qcom,msm8910-sim")
 #else
 #define early_machine_is_msm8974()	0
 #define machine_is_msm8974()		0
@@ -66,6 +72,9 @@
 #define early_machine_is_msm8226()	0
 #define machine_is_msm8226()		0
 #define machine_is_msm8226_sim()	0
+#define early_machine_is_msm8910()	0
+#define machine_is_msm8910()		0
+#define machine_is_msm8910_sim()	0
 
 #endif
 
@@ -99,7 +108,8 @@
 	MSM_CPU_8625,
 	MSM_CPU_9625,
 	MSM_CPU_8092,
-	MSM_CPU_8226
+	MSM_CPU_8226,
+	MSM_CPU_8910,
 };
 
 enum pmic_model {
@@ -415,4 +425,15 @@
 #endif
 }
 
+static inline int cpu_is_msm8910(void)
+{
+#ifdef CONFIG_ARCH_MSM8910
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8910;
+#else
+	return 0;
+#endif
+}
 #endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 39ac253..8ebead8 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -529,3 +529,22 @@
 	msm_map_io(msm_8226_io_desc, ARRAY_SIZE(msm_8226_io_desc));
 }
 #endif /* CONFIG_ARCH_MSM8226 */
+
+#ifdef CONFIG_ARCH_MSM8910
+static struct map_desc msm8910_io_desc[] __initdata = {
+	MSM_CHIP_DEVICE(APCS_GCC, MSM8910),
+	MSM_CHIP_DEVICE(TLMM, MSM8910),
+	MSM_CHIP_DEVICE(IMEM, MSM8910),
+	{
+		.virtual =  (unsigned long) MSM_SHARED_RAM_BASE,
+		.length =   MSM_SHARED_RAM_SIZE,
+		.type =     MT_DEVICE,
+	},
+};
+
+void __init msm_map_msm8910_io(void)
+{
+	msm_shared_ram_phys = MSM8910_MSM_SHARED_RAM_PHYS;
+	msm_map_io(msm8910_io_desc, ARRAY_SIZE(msm8910_io_desc));
+}
+#endif /* CONFIG_ARCH_MSM8910 */
diff --git a/arch/arm/mach-msm/msm_dcvs.c b/arch/arm/mach-msm/msm_dcvs.c
index f761bf9..1c5377f 100644
--- a/arch/arm/mach-msm/msm_dcvs.c
+++ b/arch/arm/mach-msm/msm_dcvs.c
@@ -14,7 +14,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/kthread.h>
 #include <linux/kobject.h>
 #include <linux/ktime.h>
@@ -27,23 +26,14 @@
 #include <asm/atomic.h>
 #include <asm/page.h>
 #include <mach/msm_dcvs.h>
+#include <trace/events/mpdcvs_trace.h>
 
 #define CORE_HANDLE_OFFSET (0xA0)
 #define __err(f, ...) pr_err("MSM_DCVS: %s: " f, __func__, __VA_ARGS__)
 #define __info(f, ...) pr_info("MSM_DCVS: %s: " f, __func__, __VA_ARGS__)
 #define MAX_PENDING	(5)
 
-enum {
-	MSM_DCVS_DEBUG_NOTIFIER    = BIT(0),
-	MSM_DCVS_DEBUG_IDLE_PULSE  = BIT(1),
-	MSM_DCVS_DEBUG_FREQ_CHANGE = BIT(2),
-};
-
 struct core_attribs {
-	struct kobj_attribute core_id;
-	struct kobj_attribute idle_enabled;
-	struct kobj_attribute freq_change_enabled;
-	struct kobj_attribute actual_freq;
 	struct kobj_attribute freq_change_us;
 
 	struct kobj_attribute disable_pc_threshold;
@@ -69,10 +59,46 @@
 	struct kobj_attribute leakage_coeff_c;
 	struct kobj_attribute leakage_coeff_d;
 
+	struct kobj_attribute thermal_poll_ms;
+
 	struct attribute_group attrib_group;
 };
 
+enum pending_freq_state {
+	/*
+	 * used by the thread to check if pending_freq was updated while it was
+	 * setting previous frequency - this is written to and used by the
+	 * freq updating thread
+	 */
+	NO_OUTSTANDING_FREQ_CHANGE = 0,
+
+	/*
+	 * This request is set to indicate that the governor is stopped and no
+	 * more frequency change requests are accepted untill it starts again.
+	 * This is checked/used by the threads that want to change the freq
+	 */
+	STOP_FREQ_CHANGE = -1,
+
+	/*
+	 * Any other +ve value means that a freq change was requested and the
+	 * thread has not gotten around to update it
+	 *
+	 * Any other -ve value means that this is the last freq change i.e. a
+	 * freq change was requested but the thread has not run yet and
+	 * meanwhile the governor was stopped.
+	 */
+};
+
 struct dcvs_core {
+	spinlock_t	idle_state_change_lock;
+	/* 0 when not idle (busy)  1 when idle and -1 when governor starts and
+	 * we dont know whether the next call is going to be idle enter or exit
+	 */
+	int		idle_entered;
+
+	enum msm_dcvs_core_type type;
+	/* this is the number in each type for example cpu 0,1,2 and gpu 0,1 */
+	int type_core_num;
 	char core_name[CORE_NAME_MAX];
 	uint32_t actual_freq;
 	uint32_t freq_change_us;
@@ -81,111 +107,200 @@
 
 	struct msm_dcvs_algo_param algo_param;
 	struct msm_dcvs_energy_curve_coeffs coeffs;
-	struct msm_dcvs_idle *idle_driver;
-	struct msm_dcvs_freq *freq_driver;
 
 	/* private */
-	int64_t time_start;
-	struct mutex lock;
-	spinlock_t cpu_lock;
+	ktime_t time_start;
 	struct task_struct *task;
 	struct core_attribs attrib;
-	uint32_t handle;
-	struct hrtimer timer;
-	int32_t timer_disabled;
+	uint32_t dcvs_core_id;
 	struct msm_dcvs_core_info *info;
 	int sensor;
-	int pending_freq;
 	wait_queue_head_t wait_q;
+
+	int (*set_frequency)(int type_core_num, unsigned int freq);
+	unsigned int (*get_frequency)(int type_core_num);
+	int (*idle_enable)(int type_core_num,
+			enum msm_core_control_event event);
+
+	spinlock_t	pending_freq_lock;
+	int pending_freq;
+
+	struct hrtimer	slack_timer;
+	struct delayed_work	temperature_work;
 };
 
-static int msm_dcvs_debug;
 static int msm_dcvs_enabled = 1;
 module_param_named(enable, msm_dcvs_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
 
 static struct dentry		*debugfs_base;
 
 static struct dcvs_core core_list[CORES_MAX];
-static DEFINE_MUTEX(core_list_lock);
 
 static struct kobject *cores_kobj;
-static struct dcvs_core *core_handles[CORES_MAX];
 
-/* Change core frequency, called with core mutex locked */
+static void force_stop_slack_timer(struct dcvs_core *core)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+	hrtimer_cancel(&core->slack_timer);
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+}
+
+static void force_start_slack_timer(struct dcvs_core *core, int slack_us)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+
+	/*
+	 * only start the timer if governor is not stopped
+	 */
+	if (slack_us != 0) {
+		ret = hrtimer_start(&core->slack_timer,
+				ktime_set(0, slack_us * 1000),
+				HRTIMER_MODE_REL_PINNED);
+		if (ret) {
+			pr_err("%s Failed to start timer ret = %d\n",
+					core->core_name, ret);
+		}
+	}
+
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+}
+
+static void stop_slack_timer(struct dcvs_core *core)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+	/* err only for cpu type's GPU's can do idle exit consecutively */
+	if (core->idle_entered == 1 && !(core->dcvs_core_id >= GPU_OFFSET))
+		__err("%s trying to reenter idle", core->core_name);
+	core->idle_entered = 1;
+	hrtimer_cancel(&core->slack_timer);
+	core->idle_entered = 1;
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+}
+
+static void start_slack_timer(struct dcvs_core *core, int slack_us)
+{
+	unsigned long flags1, flags2;
+	int ret;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags2);
+
+	spin_lock_irqsave(&core->pending_freq_lock, flags1);
+
+	/* err only for cpu type's GPU's can do idle enter consecutively */
+	if (core->idle_entered == 0 && !(core->dcvs_core_id >= GPU_OFFSET))
+		__err("%s trying to reexit idle", core->core_name);
+	core->idle_entered = 0;
+	/*
+	 * only start the timer if governor is not stopped
+	 */
+	if (slack_us != 0
+		&& !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) {
+		ret = hrtimer_start(&core->slack_timer,
+				ktime_set(0, slack_us * 1000),
+				HRTIMER_MODE_REL_PINNED);
+		if (ret) {
+			pr_err("%s Failed to start timer ret = %d\n",
+					core->core_name, ret);
+		}
+	}
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags1);
+
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
+}
+
+static void restart_slack_timer(struct dcvs_core *core, int slack_us)
+{
+	unsigned long flags1, flags2;
+	int ret;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags2);
+
+	hrtimer_cancel(&core->slack_timer);
+
+	spin_lock_irqsave(&core->pending_freq_lock, flags1);
+
+	/*
+	 * only start the timer if idle is not entered
+	 * and governor is not stopped
+	 */
+	if (slack_us != 0 && (core->idle_entered != 1)
+		&& !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) {
+		ret = hrtimer_start(&core->slack_timer,
+				ktime_set(0, slack_us * 1000),
+				HRTIMER_MODE_REL_PINNED);
+		if (ret) {
+			pr_err("%s Failed to start timer ret = %d\n",
+					core->core_name, ret);
+		}
+	}
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags1);
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
+}
+
 static int __msm_dcvs_change_freq(struct dcvs_core *core)
 {
 	int ret = 0;
 	unsigned long flags = 0;
-	unsigned int requested_freq = 0;
-	unsigned int prev_freq = 0;
-	int64_t time_start = 0;
-	int64_t time_end = 0;
+	int requested_freq = 0;
+	ktime_t time_start;
 	uint32_t slack_us = 0;
 	uint32_t ret1 = 0;
 
-	if (!core->freq_driver || !core->freq_driver->set_frequency) {
-		/* Core may have unregistered or hotplugged */
-		return -ENODEV;
-	}
-	spin_lock_irqsave(&core->cpu_lock, flags);
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
 repeat:
+	BUG_ON(!core->pending_freq);
+	if (core->pending_freq == STOP_FREQ_CHANGE)
+		BUG();
 
 	requested_freq = core->pending_freq;
 	time_start = core->time_start;
-	core->time_start = 0;
-	/**
-	 * Cancel the timers, we dont want the timer firing as we are
-	 * changing the clock rate. Dont let idle_exit and others setup
-	 * timers as well.
-	 */
-	hrtimer_cancel(&core->timer);
-	core->timer_disabled = 1;
+	core->time_start = ns_to_ktime(0);
+
+	if (requested_freq < 0) {
+		requested_freq = -1 * requested_freq;
+		core->pending_freq = STOP_FREQ_CHANGE;
+	} else {
+		core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
+	}
+
 	if (requested_freq == core->actual_freq)
 		goto out;
 
-	spin_unlock_irqrestore(&core->cpu_lock, flags);
-
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
 
 	/**
 	 * Call the frequency sink driver to change the frequency
 	 * We will need to get back the actual frequency in KHz and
 	 * the record the time taken to change it.
 	 */
-	ret = core->freq_driver->set_frequency(core->freq_driver,
-				requested_freq);
-	if (ret <= 0) {
+	ret = core->set_frequency(core->type_core_num, requested_freq);
+	if (ret <= 0)
 		__err("Core %s failed to set freq %u\n",
 				core->core_name, requested_freq);
 		/* continue to call TZ to get updated slack timer */
-	} else {
-		prev_freq = core->actual_freq;
+	else
 		core->actual_freq = ret;
-	}
 
-	time_end = ktime_to_ns(ktime_get());
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
-		__info("Core %s Time end %llu Time start: %llu\n",
-			core->core_name, time_end, time_start);
-	time_end -= time_start;
-	do_div(time_end, NSEC_PER_USEC);
-	core->freq_change_us = (uint32_t)time_end;
+	core->freq_change_us = (uint32_t)ktime_to_us(
+					ktime_sub(ktime_get(), time_start));
 
 	/**
 	 * Disable low power modes if the actual frequency is >
 	 * disable_pc_threshold.
 	 */
-	if (core->actual_freq >
-			core->algo_param.disable_pc_threshold) {
-		core->idle_driver->enable(core->idle_driver,
+	if (core->actual_freq > core->algo_param.disable_pc_threshold) {
+		core->idle_enable(core->type_core_num,
 				MSM_DCVS_DISABLE_HIGH_LATENCY_MODES);
-		if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-			__info("Disabling LPM for %s\n", core->core_name);
-	} else if (core->actual_freq <=
-			core->algo_param.disable_pc_threshold) {
-		core->idle_driver->enable(core->idle_driver,
+	} else if (core->actual_freq <= core->algo_param.disable_pc_threshold) {
+		core->idle_enable(core->type_core_num,
 				MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
-		if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-			__info("Enabling LPM for %s\n", core->core_name);
 	}
 
 	/**
@@ -193,66 +308,75 @@
 	 * to this frequency and that will get us the new slack
 	 * timer
 	 */
-	ret = msm_dcvs_scm_event(core->handle, MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
-		core->actual_freq, (uint32_t)time_end, &slack_us, &ret1);
-	if (!ret) {
-		/* Reset the slack timer */
-		if (slack_us) {
-			core->timer_disabled = 0;
-			ret = hrtimer_start(&core->timer,
-				ktime_set(0, slack_us * 1000),
-				HRTIMER_MODE_REL_PINNED);
-			if (ret)
-				__err("Failed to register timer for core %s\n",
-						core->core_name);
-		}
-	} else {
-		__err("Error sending core (%s) freq change (%u)\n",
-				core->core_name, core->actual_freq);
+	ret = msm_dcvs_scm_event(core->dcvs_core_id,
+			MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
+			core->actual_freq, core->freq_change_us,
+			&slack_us, &ret1);
+	if (ret) {
+		__err("Error sending core (%s) dcvs_core_id = %d freq change (%u) reqfreq = %d slack_us=%d ret = %d\n",
+				core->core_name, core->dcvs_core_id,
+				core->actual_freq, requested_freq,
+				slack_us, ret);
 	}
 
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
-		__info("Freq %u requested for core %s (actual %u prev %u) "
-			"change time %u us slack time %u us\n",
-			requested_freq, core->core_name,
-			core->actual_freq, prev_freq,
-			core->freq_change_us, slack_us);
+	/* TODO confirm that we get a valid freq from SM even when the above
+	 * FREQ_UPDATE fails
+	 */
+	restart_slack_timer(core, slack_us);
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
 
-	spin_lock_irqsave(&core->cpu_lock, flags);
 	/**
 	 * By the time we are done with freq changes, we could be asked to
 	 * change again. Check before exiting.
 	 */
-	if (core->pending_freq)
+	if (core->pending_freq != NO_OUTSTANDING_FREQ_CHANGE
+		&& core->pending_freq != STOP_FREQ_CHANGE) {
 		goto repeat;
+	}
 
-
-out: /* should always be jumped to with the spin_lock held */
-	core->pending_freq = 0;
-	spin_unlock_irqrestore(&core->cpu_lock, flags);
+out:   /* should always be jumped to with the spin_lock held */
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
 
 	return ret;
 }
 
-static int __msm_dcvs_report_temp(struct dcvs_core *core)
+static void msm_dcvs_report_temp_work(struct work_struct *work)
 {
+	struct dcvs_core *core = container_of(work,
+					struct dcvs_core,
+					temperature_work.work);
 	struct msm_dcvs_core_info *info = core->info;
 	struct tsens_device tsens_dev;
 	int ret;
 	unsigned long temp = 0;
+	int interval_ms;
 
 	tsens_dev.sensor_num = core->sensor;
 	ret = tsens_get_temp(&tsens_dev, &temp);
-	if (!ret) {
+	if (!temp) {
 		tsens_dev.sensor_num = 0;
 		ret = tsens_get_temp(&tsens_dev, &temp);
-		if (!ret)
-			return -ENODEV;
+		if (!temp)
+			goto out;
 	}
 
-	ret = msm_dcvs_scm_set_power_params(core->handle, &info->power_param,
+	if (temp == info->power_param.current_temp)
+		goto out;
+	info->power_param.current_temp = temp;
+
+	ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
+			&info->power_param,
 			&info->freq_tbl[0], &core->coeffs);
-	return ret;
+out:
+	if (info->thermal_poll_ms == 0)
+		interval_ms = 60000;
+	else if (info->thermal_poll_ms < 1000)
+		interval_ms = 1000;
+	else
+		interval_ms = info->thermal_poll_ms;
+
+	schedule_delayed_work(&core->temperature_work,
+			msecs_to_jiffies(interval_ms));
 }
 
 static int msm_dcvs_do_freq(void *data)
@@ -270,25 +394,57 @@
 		if (kthread_should_stop())
 			break;
 
-		mutex_lock(&core->lock);
 		__msm_dcvs_change_freq(core);
-		__msm_dcvs_report_temp(core);
-		mutex_unlock(&core->lock);
 	}
 
 	return 0;
 }
 
+/* freq_pending_lock should be held */
+static void request_freq_change(struct dcvs_core *core, int new_freq)
+{
+	if (new_freq == NO_OUTSTANDING_FREQ_CHANGE) {
+		if (core->pending_freq != STOP_FREQ_CHANGE) {
+			__err("%s gov started with earlier pending freq %d\n",
+					core->core_name, core->pending_freq);
+		}
+		core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
+		return;
+	}
+
+	if (new_freq == STOP_FREQ_CHANGE) {
+		if (core->pending_freq == NO_OUTSTANDING_FREQ_CHANGE)
+			core->pending_freq = STOP_FREQ_CHANGE;
+		else if (core->pending_freq > 0)
+			core->pending_freq = -1 * core->pending_freq;
+		return;
+	}
+
+	if (core->pending_freq < 0) {
+		/* a value less than 0 means that the governor has stopped
+		 * and no more freq changes should be requested
+		 */
+		return;
+	}
+
+	if (core->actual_freq != new_freq && core->pending_freq != new_freq) {
+		core->pending_freq = new_freq;
+		core->time_start = ktime_get();
+		wake_up(&core->wait_q);
+	}
+}
+
 static int msm_dcvs_update_freq(struct dcvs_core *core,
 		enum msm_dcvs_scm_event event, uint32_t param0,
-		uint32_t *ret1, int *freq_changed)
+		uint32_t *ret1)
 {
 	int ret = 0;
 	unsigned long flags = 0;
-	uint32_t new_freq = 0;
+	uint32_t new_freq = -EINVAL;
 
-	spin_lock_irqsave(&core->cpu_lock, flags);
-	ret = msm_dcvs_scm_event(core->handle, event, param0,
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
+
+	ret = msm_dcvs_scm_event(core->dcvs_core_id, event, param0,
 				core->actual_freq, &new_freq, ret1);
 	if (ret) {
 		if (ret == -13)
@@ -299,18 +455,18 @@
 		goto out;
 	}
 
-	if (core->actual_freq != new_freq && core->pending_freq != new_freq) {
-		core->pending_freq = new_freq;
-		core->time_start = ktime_to_ns(ktime_get());
-
-		if (core->task)
-			wake_up(&core->wait_q);
-	} else {
-		if (freq_changed)
-			*freq_changed = 0;
+	if (new_freq == 0) {
+		/*
+		 * sometimes TZ gives us a 0 freq back,
+		 * do not queue up a request
+		 */
+		goto out;
 	}
+
+	request_freq_change(core, new_freq);
+
 out:
-	spin_unlock_irqrestore(&core->cpu_lock, flags);
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
 
 	return ret;
 }
@@ -318,19 +474,17 @@
 static enum hrtimer_restart msm_dcvs_core_slack_timer(struct hrtimer *timer)
 {
 	int ret = 0;
-	struct dcvs_core *core = container_of(timer, struct dcvs_core, timer);
+	struct dcvs_core *core = container_of(timer,
+					struct dcvs_core, slack_timer);
 	uint32_t ret1;
-	uint32_t ret2;
 
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
-		__info("Slack timer fired for core %s\n", core->core_name);
-
+	trace_printk("dcvs: Slack timer fired for core=%s\n", core->core_name);
 	/**
 	 * Timer expired, notify TZ
 	 * Dont care about the third arg.
 	 */
 	ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_QOS_TIMER_EXPIRED, 0,
-				   &ret1, &ret2);
+				   &ret1);
 	if (ret)
 		__err("Timer expired for core %s but failed to notify.\n",
 				core->core_name);
@@ -351,6 +505,28 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", v); \
 }
 
+#define DCVS_PARAM_STORE(_name) \
+static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
+		struct kobj_attribute *attr, char *buf) \
+{ \
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
+	return snprintf(buf, PAGE_SIZE, "%d\n", core->info->_name); \
+} \
+static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
+		struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+	int ret = 0; \
+	uint32_t val = 0; \
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
+	ret = kstrtouint(buf, 10, &val); \
+	if (ret) { \
+		__err("Invalid input %s for %s\n", buf, __stringify(_name));\
+	} else { \
+		core->info->_name = val; \
+	} \
+	return count; \
+}
+
 #define DCVS_ALGO_PARAM(_name) \
 static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
 		struct kobj_attribute *attr, char *buf) \
@@ -364,14 +540,13 @@
 	int ret = 0; \
 	uint32_t val = 0; \
 	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
-	mutex_lock(&core->lock); \
 	ret = kstrtouint(buf, 10, &val); \
 	if (ret) { \
 		__err("Invalid input %s for %s\n", buf, __stringify(_name));\
 	} else { \
 		uint32_t old_val = core->algo_param._name; \
 		core->algo_param._name = val; \
-		ret = msm_dcvs_scm_set_algo_params(core->handle, \
+		ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id, \
 				&core->algo_param); \
 		if (ret) { \
 			core->algo_param._name = old_val; \
@@ -379,7 +554,6 @@
 					ret, val, __stringify(_name)); \
 		} \
 	} \
-	mutex_unlock(&core->lock); \
 	return count; \
 }
 
@@ -396,14 +570,13 @@
 	int ret = 0; \
 	int32_t val = 0; \
 	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
-	mutex_lock(&core->lock); \
 	ret = kstrtoint(buf, 10, &val); \
 	if (ret) { \
 		__err("Invalid input %s for %s\n", buf, __stringify(_name));\
 	} else { \
 		int32_t old_val = core->coeffs._name; \
 		core->coeffs._name = val; \
-		ret = msm_dcvs_scm_set_power_params(core->handle, \
+		ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, \
 			&core->info->power_param, &core->info->freq_tbl[0], \
 				&core->coeffs); \
 		if (ret) { \
@@ -412,7 +585,6 @@
 					ret, val, __stringify(_name)); \
 		} \
 	} \
-	mutex_unlock(&core->lock); \
 	return count; \
 }
 
@@ -434,10 +606,6 @@
  * Function declarations for different attributes.
  * Gets used when setting the attribute show and store parameters.
  */
-DCVS_PARAM_SHOW(core_id, core->handle)
-DCVS_PARAM_SHOW(idle_enabled, (core->idle_driver != NULL))
-DCVS_PARAM_SHOW(freq_change_enabled, (core->freq_driver != NULL))
-DCVS_PARAM_SHOW(actual_freq, (core->actual_freq))
 DCVS_PARAM_SHOW(freq_change_us, (core->freq_change_us))
 
 DCVS_ALGO_PARAM(disable_pc_threshold)
@@ -463,11 +631,13 @@
 DCVS_ENERGY_PARAM(leakage_coeff_c)
 DCVS_ENERGY_PARAM(leakage_coeff_d)
 
+DCVS_PARAM_STORE(thermal_poll_ms)
+
 static int msm_dcvs_setup_core_sysfs(struct dcvs_core *core)
 {
 	int ret = 0;
 	struct kobject *core_kobj = NULL;
-	const int attr_count = 27;
+	const int attr_count = 24;
 
 	BUG_ON(!cores_kobj);
 
@@ -479,37 +649,33 @@
 		goto done;
 	}
 
+	DCVS_RO_ATTRIB(0, freq_change_us);
 
-	DCVS_RO_ATTRIB(0, core_id);
-	DCVS_RO_ATTRIB(1, idle_enabled);
-	DCVS_RO_ATTRIB(2, freq_change_enabled);
-	DCVS_RO_ATTRIB(3, actual_freq);
-	DCVS_RO_ATTRIB(4, freq_change_us);
+	DCVS_RW_ATTRIB(1, disable_pc_threshold);
+	DCVS_RW_ATTRIB(2, em_win_size_min_us);
+	DCVS_RW_ATTRIB(3, em_win_size_max_us);
+	DCVS_RW_ATTRIB(4, em_max_util_pct);
+	DCVS_RW_ATTRIB(5, group_id);
+	DCVS_RW_ATTRIB(6, max_freq_chg_time_us);
+	DCVS_RW_ATTRIB(7, slack_mode_dynamic);
+	DCVS_RW_ATTRIB(8, slack_weight_thresh_pct);
+	DCVS_RW_ATTRIB(9, slack_time_min_us);
+	DCVS_RW_ATTRIB(10, slack_time_max_us);
+	DCVS_RW_ATTRIB(11, ss_iobusy_conv);
+	DCVS_RW_ATTRIB(12, ss_win_size_min_us);
+	DCVS_RW_ATTRIB(13, ss_win_size_max_us);
+	DCVS_RW_ATTRIB(14, ss_util_pct);
 
-	DCVS_RW_ATTRIB(5, disable_pc_threshold);
-	DCVS_RW_ATTRIB(6, em_win_size_min_us);
-	DCVS_RW_ATTRIB(7, em_win_size_max_us);
-	DCVS_RW_ATTRIB(8, em_max_util_pct);
-	DCVS_RW_ATTRIB(9, group_id);
-	DCVS_RW_ATTRIB(10, max_freq_chg_time_us);
-	DCVS_RW_ATTRIB(11, slack_mode_dynamic);
-	DCVS_RW_ATTRIB(12, slack_time_min_us);
-	DCVS_RW_ATTRIB(13, slack_time_max_us);
-	DCVS_RW_ATTRIB(14, slack_weight_thresh_pct);
-	DCVS_RW_ATTRIB(15, ss_iobusy_conv);
-	DCVS_RW_ATTRIB(16, ss_win_size_min_us);
-	DCVS_RW_ATTRIB(17, ss_win_size_max_us);
-	DCVS_RW_ATTRIB(18, ss_util_pct);
+	DCVS_RW_ATTRIB(15, active_coeff_a);
+	DCVS_RW_ATTRIB(16, active_coeff_b);
+	DCVS_RW_ATTRIB(17, active_coeff_c);
+	DCVS_RW_ATTRIB(18, leakage_coeff_a);
+	DCVS_RW_ATTRIB(19, leakage_coeff_b);
+	DCVS_RW_ATTRIB(20, leakage_coeff_c);
+	DCVS_RW_ATTRIB(21, leakage_coeff_d);
+	DCVS_RW_ATTRIB(22, thermal_poll_ms);
 
-	DCVS_RW_ATTRIB(19, active_coeff_a);
-	DCVS_RW_ATTRIB(20, active_coeff_b);
-	DCVS_RW_ATTRIB(21, active_coeff_c);
-	DCVS_RW_ATTRIB(22, leakage_coeff_a);
-	DCVS_RW_ATTRIB(23, leakage_coeff_b);
-	DCVS_RW_ATTRIB(24, leakage_coeff_c);
-	DCVS_RW_ATTRIB(25, leakage_coeff_d);
-
-	core->attrib.attrib_group.attrs[26] = NULL;
+	core->attrib.attrib_group.attrs[23] = NULL;
 
 	core_kobj = kobject_create_and_add(core->core_name, cores_kobj);
 	if (!core_kobj) {
@@ -520,8 +686,6 @@
 	ret = sysfs_create_group(core_kobj, &core->attrib.attrib_group);
 	if (ret)
 		__err("Cannot create core %s attr group\n", core->core_name);
-	else if (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER)
-		__info("Setting up attributes for core %s\n", core->core_name);
 
 done:
 	if (ret) {
@@ -532,65 +696,74 @@
 	return ret;
 }
 
-/* Return the core if found or add to list if @add_to_list is true */
-static struct dcvs_core *msm_dcvs_get_core(const char *name, int add_to_list)
+/* Return the core and initialize non platform data specific numbers in it */
+static struct dcvs_core *msm_dcvs_add_core(enum msm_dcvs_core_type type,
+								int num)
 {
 	struct dcvs_core *core = NULL;
 	int i;
-	int empty = -1;
+	char name[CORE_NAME_MAX];
 
-	if (!name[0] ||
-		(strnlen(name, CORE_NAME_MAX - 1) == CORE_NAME_MAX - 1))
-		return core;
-
-	mutex_lock(&core_list_lock);
-	for (i = 0; i < CORES_MAX; i++) {
-		core = &core_list[i];
-		if ((empty < 0) && !core->core_name[0]) {
-			empty = i;
-			continue;
-		}
-		if (!strncmp(name, core->core_name, CORE_NAME_MAX))
-			break;
-	}
-
-	/* Check for core_list full */
-	if ((i == CORES_MAX) && (empty < 0)) {
-		mutex_unlock(&core_list_lock);
+	switch (type) {
+	case MSM_DCVS_CORE_TYPE_CPU:
+		i = CPU_OFFSET + num;
+		BUG_ON(i >= GPU_OFFSET);
+		snprintf(name, CORE_NAME_MAX, "cpu%d", num);
+		break;
+	case MSM_DCVS_CORE_TYPE_GPU:
+		i = GPU_OFFSET + num;
+		BUG_ON(i >= CORES_MAX);
+		snprintf(name, CORE_NAME_MAX, "gpu%d", num);
+		break;
+	default:
 		return NULL;
 	}
 
-	if (i == CORES_MAX && add_to_list) {
-		core = &core_list[empty];
-		strlcpy(core->core_name, name, CORE_NAME_MAX);
-		mutex_init(&core->lock);
-		spin_lock_init(&core->cpu_lock);
-		core->handle = empty + CORE_HANDLE_OFFSET;
-		hrtimer_init(&core->timer,
-				CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
-		core->timer.function = msm_dcvs_core_slack_timer;
-	}
-	mutex_unlock(&core_list_lock);
-
+	core = &core_list[i];
+	core->dcvs_core_id = i;
+	strlcpy(core->core_name, name, CORE_NAME_MAX);
+	spin_lock_init(&core->pending_freq_lock);
+	spin_lock_init(&core->idle_state_change_lock);
+	hrtimer_init(&core->slack_timer,
+			CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+	core->slack_timer.function = msm_dcvs_core_slack_timer;
 	return core;
 }
 
-int msm_dcvs_register_core(const char *core_name,
-		struct msm_dcvs_core_info *info, int sensor)
+/* Return the core if found or add to list if @add_to_list is true */
+static struct dcvs_core *msm_dcvs_get_core(int offset)
+{
+	/* if the handle is still not set bug */
+	BUG_ON(core_list[offset].dcvs_core_id == -1);
+	return &core_list[offset];
+}
+
+
+int msm_dcvs_register_core(
+	enum msm_dcvs_core_type type,
+	int type_core_num,
+	struct msm_dcvs_core_info *info,
+	int (*set_frequency)(int type_core_num, unsigned int freq),
+	unsigned int (*get_frequency)(int type_core_num),
+	int (*idle_enable)(int type_core_num,
+					enum msm_core_control_event event),
+	int sensor)
 {
 	int ret = -EINVAL;
 	struct dcvs_core *core = NULL;
 	uint32_t ret1;
 	uint32_t ret2;
 
-	if (!core_name || !core_name[0])
-		return ret;
-
-	core = msm_dcvs_get_core(core_name, true);
+	core = msm_dcvs_add_core(type, type_core_num);
 	if (!core)
 		return ret;
 
-	mutex_lock(&core->lock);
+	core->type = type;
+	core->type_core_num = type_core_num;
+	core->set_frequency = set_frequency;
+	core->get_frequency = get_frequency;
+	core->idle_enable = idle_enable;
+	core->pending_freq = STOP_FREQ_CHANGE;
 
 	core->info = info;
 	memcpy(&core->algo_param, &info->algo_param,
@@ -599,23 +772,39 @@
 	memcpy(&core->coeffs, &info->energy_coeffs,
 			sizeof(struct msm_dcvs_energy_curve_coeffs));
 
-	pr_debug("registering core with sensor %d\n", sensor);
+	/*
+	 * The tz expects cpu0 to represent bit 0 in the mask, however the
+	 * dcvs_core_id needs to start from 1, dcvs_core_id = 0 is used to
+	 * indicate that this request is not associated with any core.
+	 * mpdecision
+	 */
+	info->core_param.core_bitmask_id
+				= 1 << (core->dcvs_core_id - CPU_OFFSET);
 	core->sensor = sensor;
-	ret = msm_dcvs_scm_register_core(core->handle,
-			&info->core_param);
-	if (ret)
-		goto bail;
 
-	ret = msm_dcvs_scm_set_algo_params(core->handle, &info->algo_param);
-	if (ret)
+	ret = msm_dcvs_scm_register_core(core->dcvs_core_id, &info->core_param);
+	if (ret) {
+		__err("%s: scm register core fail handle = %d ret = %d\n",
+					__func__, core->dcvs_core_id, ret);
 		goto bail;
+	}
 
-	ret = msm_dcvs_scm_set_power_params(core->handle, &info->power_param,
+	ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id,
+							&info->algo_param);
+	if (ret) {
+		__err("%s: scm algo params failed ret = %d\n", __func__, ret);
+		goto bail;
+	}
+
+	ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
+				&info->power_param,
 				&info->freq_tbl[0], &core->coeffs);
-	if (ret)
+	if (ret) {
+		__err("%s: scm power params failed ret = %d\n", __func__, ret);
 		goto bail;
+	}
 
-	ret = msm_dcvs_scm_event(core->handle, MSM_DCVS_SCM_CORE_ONLINE,
+	ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE,
 				core->actual_freq, 0, &ret1, &ret2);
 	if (ret)
 		goto bail;
@@ -623,185 +812,164 @@
 	ret = msm_dcvs_setup_core_sysfs(core);
 	if (ret) {
 		__err("Unable to setup core %s sysfs\n", core->core_name);
-		core_handles[core->handle - CORE_HANDLE_OFFSET] = NULL;
 		goto bail;
 	}
+	core->idle_entered = -1;
 	init_waitqueue_head(&core->wait_q);
 	core->task = kthread_run(msm_dcvs_do_freq, (void *)core,
-			"msm_dcvs/%d", core->handle);
-bail:
-	mutex_unlock(&core->lock);
+			"msm_dcvs/%d", core->dcvs_core_id);
+	ret = core->dcvs_core_id;
+
+	INIT_DELAYED_WORK(&core->temperature_work, msm_dcvs_report_temp_work);
+	schedule_delayed_work(&core->temperature_work,
+			msecs_to_jiffies(info->thermal_poll_ms));
 	return ret;
+bail:
+	core->dcvs_core_id = -1;
+	return -EINVAL;
 }
 EXPORT_SYMBOL(msm_dcvs_register_core);
 
-int msm_dcvs_freq_sink_register(struct msm_dcvs_freq *drv)
+void msm_dcvs_update_limits(int dcvs_core_id)
+{
+	struct dcvs_core *core;
+
+	if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
+		__err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
+				__func__, dcvs_core_id);
+		return;
+	}
+
+	core = msm_dcvs_get_core(dcvs_core_id);
+	core->actual_freq = core->get_frequency(core->type_core_num);
+}
+
+int msm_dcvs_freq_sink_start(int dcvs_core_id)
 {
 	int ret = -EINVAL;
 	struct dcvs_core *core = NULL;
 	uint32_t ret1;
-	uint32_t ret2;
+	unsigned long flags;
+	int new_freq;
+	int timer_interval_us;
 
-	if (!drv || !drv->core_name)
-		return ret;
+	if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
+		__err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
+				__func__, dcvs_core_id);
+		return -EINVAL;
+	}
 
-	core = msm_dcvs_get_core(drv->core_name, true);
+	core = msm_dcvs_get_core(dcvs_core_id);
 	if (!core)
 		return ret;
 
-	mutex_lock(&core->lock);
-	if (core->freq_driver && (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER))
-		__info("Frequency notifier for %s being replaced\n",
-				core->core_name);
-	core->freq_driver = drv;
-	if (IS_ERR(core->task)) {
-		mutex_unlock(&core->lock);
-		return -EFAULT;
+	core->actual_freq = core->get_frequency(core->type_core_num);
+
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
+	/* mark that we are ready to accept new frequencies */
+	request_freq_change(core, NO_OUTSTANDING_FREQ_CHANGE);
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+	core->idle_entered = -1;
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+
+	/* Notify TZ to start receiving idle info for the core */
+	ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_DCVS_ENABLE, 1, &ret1);
+
+	ret = msm_dcvs_scm_event(
+		core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE, core->actual_freq,
+		0, &new_freq, &timer_interval_us);
+	if (ret)
+		__err("Error (%d) DCVS sending online for %s\n",
+				ret, core->core_name);
+
+	if (new_freq != 0) {
+		spin_lock_irqsave(&core->pending_freq_lock, flags);
+		request_freq_change(core, new_freq);
+		spin_unlock_irqrestore(&core->pending_freq_lock, flags);
 	}
+	force_start_slack_timer(core, timer_interval_us);
 
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-		__info("Enabling idle pulse for %s\n", core->core_name);
 
-	if (core->idle_driver) {
-		core->actual_freq = core->freq_driver->get_frequency(drv);
-		/* Notify TZ to start receiving idle info for the core */
-		ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_DCVS_ENABLE, 1,
-					   &ret1, &ret2);
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_ENABLE_IDLE_PULSE);
-	}
-
-	mutex_unlock(&core->lock);
-
-	return core->handle;
+	core->idle_enable(core->type_core_num, MSM_DCVS_ENABLE_IDLE_PULSE);
+	return 0;
 }
-EXPORT_SYMBOL(msm_dcvs_freq_sink_register);
+EXPORT_SYMBOL(msm_dcvs_freq_sink_start);
 
-int msm_dcvs_freq_sink_unregister(struct msm_dcvs_freq *drv)
+int msm_dcvs_freq_sink_stop(int dcvs_core_id)
 {
 	int ret = -EINVAL;
 	struct dcvs_core *core = NULL;
 	uint32_t ret1;
-	uint32_t ret2;
+	uint32_t freq;
+	unsigned long flags;
 
-	if (!drv || !drv->core_name)
-		return ret;
-
-	core = msm_dcvs_get_core(drv->core_name, false);
-	if (!core)
-		return ret;
-
-	mutex_lock(&core->lock);
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-		__info("Disabling idle pulse for %s\n", core->core_name);
-	if (core->idle_driver) {
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_DISABLE_IDLE_PULSE);
-		/* Notify TZ to stop receiving idle info for the core */
-		ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_DCVS_ENABLE, 0,
-					   &ret1, &ret2);
-		hrtimer_cancel(&core->timer);
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
-		if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-			__info("Enabling LPM for %s\n", core->core_name);
+	if (dcvs_core_id < 0 || dcvs_core_id > CORES_MAX) {
+		pr_err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
+				__func__, dcvs_core_id);
+		return -EINVAL;
 	}
-	core->freq_driver = NULL;
-	mutex_unlock(&core->lock);
+
+	core = msm_dcvs_get_core(dcvs_core_id);
+	if (!core) {
+		__err("couldn't find core for coreid = %d\n", dcvs_core_id);
+		return ret;
+	}
+
+	core->idle_enable(core->type_core_num, MSM_DCVS_DISABLE_IDLE_PULSE);
+	/* Notify TZ to stop receiving idle info for the core */
+	ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_DCVS_ENABLE,
+				0, core->actual_freq, &freq, &ret1);
+	core->idle_enable(core->type_core_num,
+			MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
+	/* flush out all the pending freq changes */
+	request_freq_change(core, STOP_FREQ_CHANGE);
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+	force_stop_slack_timer(core);
 
 	return 0;
 }
-EXPORT_SYMBOL(msm_dcvs_freq_sink_unregister);
+EXPORT_SYMBOL(msm_dcvs_freq_sink_stop);
 
-int msm_dcvs_idle_source_register(struct msm_dcvs_idle *drv)
-{
-	int ret = -EINVAL;
-	struct dcvs_core *core = NULL;
-
-	if (!drv || !drv->core_name)
-		return ret;
-
-	core = msm_dcvs_get_core(drv->core_name, true);
-	if (!core)
-		return ret;
-
-	mutex_lock(&core->lock);
-	if (core->idle_driver && (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER))
-		__info("Idle notifier for %s being replaced\n",
-				core->core_name);
-	core->idle_driver = drv;
-	mutex_unlock(&core->lock);
-
-	return core->handle;
-}
-EXPORT_SYMBOL(msm_dcvs_idle_source_register);
-
-int msm_dcvs_idle_source_unregister(struct msm_dcvs_idle *drv)
-{
-	int ret = -EINVAL;
-	struct dcvs_core *core = NULL;
-
-	if (!drv || !drv->core_name)
-		return ret;
-
-	core = msm_dcvs_get_core(drv->core_name, false);
-	if (!core)
-		return ret;
-
-	mutex_lock(&core->lock);
-	core->idle_driver = NULL;
-	mutex_unlock(&core->lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_dcvs_idle_source_unregister);
-
-int msm_dcvs_idle(int handle, enum msm_core_idle_state state, uint32_t iowaited)
+int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state,
+						uint32_t iowaited)
 {
 	int ret = 0;
 	struct dcvs_core *core = NULL;
 	uint32_t timer_interval_us = 0;
 	uint32_t r0, r1;
-	uint32_t freq_changed = 0;
 
-	if (handle >= CORE_HANDLE_OFFSET &&
-			(handle - CORE_HANDLE_OFFSET) < CORES_MAX)
-		core = &core_list[handle - CORE_HANDLE_OFFSET];
+	if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
+		pr_err("invalid dcvs_core_id = %d ret -EINVAL\n", dcvs_core_id);
+		return -EINVAL;
+	}
 
-	BUG_ON(!core);
-
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-		__info("Core %s idle state %d\n", core->core_name, state);
+	core = msm_dcvs_get_core(dcvs_core_id);
 
 	switch (state) {
 	case MSM_DCVS_IDLE_ENTER:
-		hrtimer_cancel(&core->timer);
-		ret = msm_dcvs_scm_event(core->handle,
+		stop_slack_timer(core);
+		ret = msm_dcvs_scm_event(core->dcvs_core_id,
 				MSM_DCVS_SCM_IDLE_ENTER, 0, 0, &r0, &r1);
-		if (ret)
+		if (ret < 0 && ret != -13)
 			__err("Error (%d) sending idle enter for %s\n",
 					ret, core->core_name);
+		trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 1);
 		break;
 
 	case MSM_DCVS_IDLE_EXIT:
-		hrtimer_cancel(&core->timer);
 		ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_IDLE_EXIT,
-				iowaited, &timer_interval_us, &freq_changed);
+						iowaited, &timer_interval_us);
 		if (ret)
 			__err("Error (%d) sending idle exit for %s\n",
 					ret, core->core_name);
-		/* only start slack timer if change_freq won't */
-		if (freq_changed)
-			break;
-		if (timer_interval_us && !core->timer_disabled) {
-			ret = hrtimer_start(&core->timer,
-				ktime_set(0, timer_interval_us * 1000),
-				HRTIMER_MODE_REL_PINNED);
-
-			if (ret)
-				__err("Failed to register timer for core %s\n",
-				      core->core_name);
-		}
+		start_slack_timer(core, timer_interval_us);
+		trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 0);
+		trace_msm_dcvs_iowait("iowait", core->core_name, iowaited);
+		trace_msm_dcvs_slack_time("slack_timer_dcvs", core->core_name,
+							timer_interval_us);
 		break;
 	}
 
@@ -836,13 +1004,6 @@
 		goto err;
 	}
 
-	if (!debugfs_create_u32("debug_mask", S_IRUGO | S_IWUSR,
-				debugfs_base, &msm_dcvs_debug)) {
-		__err("Cannot create debugfs entry %s\n", "debug_mask");
-		ret = -ENOMEM;
-		goto err;
-	}
-
 err:
 	if (ret) {
 		kobject_del(cores_kobj);
@@ -857,16 +1018,24 @@
 static int __init msm_dcvs_early_init(void)
 {
 	int ret = 0;
+	int i;
 
 	if (!msm_dcvs_enabled) {
 		__info("Not enabled (%d)\n", msm_dcvs_enabled);
 		return 0;
 	}
 
-	ret = msm_dcvs_scm_init(10 * 1024);
-	if (ret)
-		__err("Unable to initialize DCVS err=%d\n", ret);
 
+	/* Only need about 32kBytes for normal operation */
+	ret = msm_dcvs_scm_init(SZ_32K);
+	if (ret) {
+		__err("Unable to initialize DCVS err=%d\n", ret);
+		goto done;
+	}
+
+	for (i = 0; i < CORES_MAX; i++)
+		core_list[i].dcvs_core_id = -1;
+done:
 	return ret;
 }
 postcore_initcall(msm_dcvs_early_init);
diff --git a/arch/arm/mach-msm/msm_dcvs_idle.c b/arch/arm/mach-msm/msm_dcvs_idle.c
deleted file mode 100644
index 179e170..0000000
--- a/arch/arm/mach-msm/msm_dcvs_idle.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cpu_pm.h>
-#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <mach/msm_dcvs.h>
-
-struct cpu_idle_info {
-	int cpu;
-	int enabled;
-	int handle;
-	struct msm_dcvs_idle dcvs_notifier;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_idle_info, cpu_idle_info);
-static DEFINE_PER_CPU_SHARED_ALIGNED(u64, iowait_on_cpu);
-static char core_name[NR_CPUS][10];
-static struct pm_qos_request qos_req;
-static uint32_t latency;
-
-static int msm_dcvs_idle_notifier(struct msm_dcvs_idle *self,
-		enum msm_core_control_event event)
-{
-	struct cpu_idle_info *info = container_of(self,
-				struct cpu_idle_info, dcvs_notifier);
-
-	switch (event) {
-	case MSM_DCVS_ENABLE_IDLE_PULSE:
-		info->enabled = true;
-		break;
-
-	case MSM_DCVS_DISABLE_IDLE_PULSE:
-		info->enabled = false;
-		break;
-
-	case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
-		pm_qos_update_request(&qos_req, PM_QOS_DEFAULT_VALUE);
-		break;
-
-	case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
-		pm_qos_update_request(&qos_req, latency);
-		break;
-	}
-
-	return 0;
-}
-
-static int msm_cpuidle_notifier(struct notifier_block *self, unsigned long cmd,
-		void *v)
-{
-	struct cpu_idle_info *info =
-		&per_cpu(cpu_idle_info, smp_processor_id());
-	u64 io_wait_us = 0;
-	u64 prev_io_wait_us = 0;
-	u64 last_update_time = 0;
-	u64 val = 0;
-	uint32_t iowaited = 0;
-
-	if (!info->enabled)
-		return NOTIFY_OK;
-
-	switch (cmd) {
-	case CPU_PM_ENTER:
-		val = get_cpu_iowait_time_us(smp_processor_id(),
-					&last_update_time);
-		/* val could be -1 when NOHZ is not enabled */
-		if (val == (u64)-1)
-			val = 0;
-		per_cpu(iowait_on_cpu, smp_processor_id()) = val;
-		msm_dcvs_idle(info->handle, MSM_DCVS_IDLE_ENTER, 0);
-		break;
-
-	case CPU_PM_ENTER_FAILED:
-	case CPU_PM_EXIT:
-		prev_io_wait_us = per_cpu(iowait_on_cpu, smp_processor_id());
-		val = get_cpu_iowait_time_us(smp_processor_id(),
-				&last_update_time);
-		if (val == (u64)-1)
-			val = 0;
-		io_wait_us = val;
-		iowaited = (io_wait_us - prev_io_wait_us);
-		msm_dcvs_idle(info->handle, MSM_DCVS_IDLE_EXIT, iowaited);
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block idle_nb = {
-	.notifier_call = msm_cpuidle_notifier,
-};
-
-static int msm_dcvs_idle_probe(struct platform_device *pdev)
-{
-	int cpu;
-	struct cpu_idle_info *info = NULL;
-	struct msm_dcvs_idle *inotify = NULL;
-
-	for_each_possible_cpu(cpu) {
-		info = &per_cpu(cpu_idle_info, cpu);
-		info->cpu = cpu;
-		inotify = &info->dcvs_notifier;
-		snprintf(core_name[cpu], 10, "cpu%d", cpu);
-		inotify->core_name = core_name[cpu];
-		inotify->enable = msm_dcvs_idle_notifier;
-		info->handle = msm_dcvs_idle_source_register(inotify);
-		BUG_ON(info->handle < 0);
-	}
-
-	latency = *((uint32_t *)pdev->dev.platform_data);
-	pm_qos_add_request(&qos_req, PM_QOS_CPU_DMA_LATENCY,
-				PM_QOS_DEFAULT_VALUE);
-
-	return cpu_pm_register_notifier(&idle_nb);
-}
-
-static int msm_dcvs_idle_remove(struct platform_device *pdev)
-{
-	int ret = 0;
-	int rc = 0;
-	int cpu = 0;
-	struct msm_dcvs_idle *inotify = NULL;
-	struct cpu_idle_info *info = NULL;
-
-	rc = cpu_pm_unregister_notifier(&idle_nb);
-
-	for_each_possible_cpu(cpu) {
-		info = &per_cpu(cpu_idle_info, cpu);
-		inotify = &info->dcvs_notifier;
-		ret = msm_dcvs_idle_source_unregister(inotify);
-		if (ret) {
-			rc = -EFAULT;
-			pr_err("Error de-registering core %d idle notifier.\n",
-					cpu);
-		}
-	}
-
-	return rc;
-}
-
-static struct platform_driver idle_pdrv = {
-	.probe = msm_dcvs_idle_probe,
-	.remove = __devexit_p(msm_dcvs_idle_remove),
-	.driver = {
-		.name  = "msm_cpu_idle",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int msm_dcvs_idle_init(void)
-{
-	return platform_driver_register(&idle_pdrv);
-}
-late_initcall(msm_dcvs_idle_init);
diff --git a/arch/arm/mach-msm/msm_dcvs_scm.c b/arch/arm/mach-msm/msm_dcvs_scm.c
index df6c44f..78d62ac 100644
--- a/arch/arm/mach-msm/msm_dcvs_scm.c
+++ b/arch/arm/mach-msm/msm_dcvs_scm.c
@@ -20,6 +20,7 @@
 #include <mach/memory.h>
 #include <mach/scm.h>
 #include <mach/msm_dcvs_scm.h>
+#include <trace/events/mpdcvs_trace.h>
 
 #define DCVS_CMD_REGISTER_CORE		2
 #define DCVS_CMD_SET_ALGO_PARAM		3
@@ -225,6 +226,9 @@
 	ret = scm_call_atomic4_3(SCM_SVC_DCVS, DCVS_CMD_EVENT,
 			core_id, event_id, param0, param1, ret0, ret1);
 
+	trace_msm_dcvs_scm_event(core_id, (int)event_id, param0, param1,
+							*ret0, *ret1);
+
 	return ret;
 }
 EXPORT_SYMBOL(msm_dcvs_scm_event);
diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c
index 056e4eb..184ec61 100644
--- a/arch/arm/mach-msm/msm_mpdecision.c
+++ b/arch/arm/mach-msm/msm_mpdecision.c
@@ -37,6 +37,8 @@
 #include <asm/page.h>
 #include <mach/msm_dcvs.h>
 #include <mach/msm_dcvs_scm.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mpdcvs_trace.h>
 
 #define DEFAULT_RQ_AVG_POLL_MS    (1)
 
@@ -168,6 +170,8 @@
 	if (nr > num_present_hundreds)
 		nr = num_present_hundreds;
 
+	trace_msm_mp_runq("nr_running", nr);
+
 	if (ok_to_update_tz(nr, last_nr)) {
 		hrtimer_try_to_cancel(&msm_mpd.slack_timer);
 		msm_mpd.data.nr = nr;
@@ -263,6 +267,8 @@
 		return ret;
 	}
 
+	trace_msm_mp_cpusonline("cpu_online_mp", req_cpu_mask);
+	trace_msm_mp_slacktime("slack_time_mp", slack_us);
 	msm_mpd.slack_us = slack_us;
 	atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask);
 	msm_mpd.hpupdate = HPUPDATE_SCHEDULED;
@@ -286,6 +292,8 @@
 {
 	unsigned long flags;
 
+	trace_printk("mpd:slack_timer_fired!\n");
+
 	spin_lock_irqsave(&rq_avg_lock, flags);
 	if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE)
 		goto out;
diff --git a/arch/arm/mach-msm/pil-q6v4-lpass.c b/arch/arm/mach-msm/pil-q6v4-lpass.c
new file mode 100644
index 0000000..222bd10
--- /dev/null
+++ b/arch/arm/mach-msm/pil-q6v4-lpass.c
@@ -0,0 +1,143 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v4.h"
+#include "scm-pas.h"
+
+static int pil_q6v4_lpass_boot(struct pil_desc *pil)
+{
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
+	int err;
+
+	err = pil_q6v4_power_up(drv);
+	if (err)
+		return err;
+
+	return pil_q6v4_boot(pil);
+}
+
+static int pil_q6v4_lpass_shutdown(struct pil_desc *pil)
+{
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
+	int ret;
+
+	ret = pil_q6v4_shutdown(pil);
+	if (ret)
+		return ret;
+	pil_q6v4_power_down(drv);
+	return 0;
+}
+
+static struct pil_reset_ops pil_q6v4_lpass_ops = {
+	.init_image = pil_q6v4_init_image,
+	.auth_and_reset = pil_q6v4_lpass_boot,
+	.shutdown = pil_q6v4_lpass_shutdown,
+	.proxy_vote = pil_q6v4_make_proxy_votes,
+	.proxy_unvote = pil_q6v4_remove_proxy_votes,
+};
+
+static struct pil_reset_ops pil_q6v4_lpass_ops_trusted = {
+	.init_image = pil_q6v4_init_image_trusted,
+	.auth_and_reset = pil_q6v4_boot_trusted,
+	.shutdown = pil_q6v4_shutdown_trusted,
+	.proxy_vote = pil_q6v4_make_proxy_votes,
+	.proxy_unvote = pil_q6v4_remove_proxy_votes,
+};
+
+static int __devinit pil_q6v4_lpass_driver_probe(struct platform_device *pdev)
+{
+	const struct pil_q6v4_pdata *pdata = pdev->dev.platform_data;
+	struct q6v4_data *drv;
+	struct pil_desc *desc;
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	drv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!drv->base)
+		return -ENOMEM;
+
+	drv->vreg = devm_regulator_get(&pdev->dev, "core_vdd");
+	if (IS_ERR(drv->vreg))
+		return PTR_ERR(drv->vreg);
+
+	drv->xo = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(drv->xo))
+		return PTR_ERR(drv->xo);
+
+	desc = &drv->desc;
+	desc->name = pdata->name;
+	desc->dev = &pdev->dev;
+	desc->owner = THIS_MODULE;
+	desc->proxy_timeout = 10000;
+	pil_q6v4_init(drv, pdata);
+
+	if (pas_supported(pdata->pas_id) > 0) {
+		desc->ops = &pil_q6v4_lpass_ops_trusted;
+		dev_info(&pdev->dev, "using secure boot\n");
+	} else {
+		desc->ops = &pil_q6v4_lpass_ops;
+		dev_info(&pdev->dev, "using non-secure boot\n");
+	}
+
+	drv->pil = msm_pil_register(desc);
+	if (IS_ERR(drv->pil))
+		return PTR_ERR(drv->pil);
+	return 0;
+}
+
+static int __devexit pil_q6v4_lpass_driver_exit(struct platform_device *pdev)
+{
+	struct q6v4_data *drv = platform_get_drvdata(pdev);
+	msm_pil_unregister(drv->pil);
+	return 0;
+}
+
+static struct platform_driver pil_q6v4_lpass_driver = {
+	.probe = pil_q6v4_lpass_driver_probe,
+	.remove = __devexit_p(pil_q6v4_lpass_driver_exit),
+	.driver = {
+		.name = "pil-q6v4-lpass",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_q6v4_lpass_init(void)
+{
+	return platform_driver_register(&pil_q6v4_lpass_driver);
+}
+module_init(pil_q6v4_lpass_init);
+
+static void __exit pil_q6v4_lpass_exit(void)
+{
+	platform_driver_unregister(&pil_q6v4_lpass_driver);
+}
+module_exit(pil_q6v4_lpass_exit);
+
+MODULE_DESCRIPTION("Support for booting QDSP6v4 (Hexagon) processors");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/pil-q6v4-mss.c b/arch/arm/mach-msm/pil-q6v4-mss.c
new file mode 100644
index 0000000..65b56d3
--- /dev/null
+++ b/arch/arm/mach-msm/pil-q6v4-mss.c
@@ -0,0 +1,264 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <mach/msm_iomap.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v4.h"
+#include "scm-pas.h"
+
+#define MSS_S_HCLK_CTL		(MSM_CLK_CTL_BASE + 0x2C70)
+#define MSS_SLP_CLK_CTL		(MSM_CLK_CTL_BASE + 0x2C60)
+#define SFAB_MSS_M_ACLK_CTL	(MSM_CLK_CTL_BASE + 0x2340)
+#define SFAB_MSS_S_HCLK_CTL	(MSM_CLK_CTL_BASE + 0x2C00)
+#define MSS_RESET		(MSM_CLK_CTL_BASE + 0x2C64)
+
+struct q6v4_modem {
+	struct q6v4_data q6_fw;
+	struct q6v4_data q6_sw;
+	void __iomem *modem_base;
+};
+
+static DEFINE_MUTEX(pil_q6v4_modem_lock);
+static unsigned pil_q6v4_modem_count;
+
+/* Bring modem subsystem out of reset */
+static void pil_q6v4_init_modem(void __iomem *base, void __iomem *jtag_clk)
+{
+	mutex_lock(&pil_q6v4_modem_lock);
+	if (!pil_q6v4_modem_count) {
+		/* Enable MSS clocks */
+		writel_relaxed(0x10, SFAB_MSS_M_ACLK_CTL);
+		writel_relaxed(0x10, SFAB_MSS_S_HCLK_CTL);
+		writel_relaxed(0x10, MSS_S_HCLK_CTL);
+		writel_relaxed(0x10, MSS_SLP_CLK_CTL);
+		/* Wait for clocks to enable */
+		mb();
+		udelay(10);
+
+		/* De-assert MSS reset */
+		writel_relaxed(0x0, MSS_RESET);
+		mb();
+		udelay(10);
+		/* Enable MSS */
+		writel_relaxed(0x7, base);
+	}
+
+	/* Enable JTAG clocks */
+	/* TODO: Remove if/when Q6 software enables them? */
+	writel_relaxed(0x10, jtag_clk);
+
+	pil_q6v4_modem_count++;
+	mutex_unlock(&pil_q6v4_modem_lock);
+}
+
+/* Put modem subsystem back into reset */
+static void pil_q6v4_shutdown_modem(void)
+{
+	mutex_lock(&pil_q6v4_modem_lock);
+	if (pil_q6v4_modem_count)
+		pil_q6v4_modem_count--;
+	if (pil_q6v4_modem_count == 0)
+		writel_relaxed(0x1, MSS_RESET);
+	mutex_unlock(&pil_q6v4_modem_lock);
+}
+
+static int pil_q6v4_modem_boot(struct pil_desc *pil)
+{
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
+	struct q6v4_modem *mdm = dev_get_drvdata(pil->dev);
+	int err;
+
+	err = pil_q6v4_power_up(drv);
+	if (err)
+		return err;
+
+	pil_q6v4_init_modem(mdm->modem_base, drv->jtag_clk_reg);
+	return pil_q6v4_boot(pil);
+}
+
+static int pil_q6v4_modem_shutdown(struct pil_desc *pil)
+{
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
+	int ret;
+
+	ret = pil_q6v4_shutdown(pil);
+	if (ret)
+		return ret;
+	pil_q6v4_shutdown_modem();
+	pil_q6v4_power_down(drv);
+	return 0;
+}
+
+static struct pil_reset_ops pil_q6v4_modem_ops = {
+	.init_image = pil_q6v4_init_image,
+	.auth_and_reset = pil_q6v4_modem_boot,
+	.shutdown = pil_q6v4_modem_shutdown,
+	.proxy_vote = pil_q6v4_make_proxy_votes,
+	.proxy_unvote = pil_q6v4_remove_proxy_votes,
+};
+
+static struct pil_reset_ops pil_q6v4_modem_ops_trusted = {
+	.init_image = pil_q6v4_init_image_trusted,
+	.auth_and_reset = pil_q6v4_boot_trusted,
+	.shutdown = pil_q6v4_shutdown_trusted,
+	.proxy_vote = pil_q6v4_make_proxy_votes,
+	.proxy_unvote = pil_q6v4_remove_proxy_votes,
+};
+
+static int __devinit
+pil_q6v4_proc_init(struct q6v4_data *drv, struct platform_device *pdev, int i)
+{
+	static const char *name[2] = { "fw", "sw" };
+	const struct pil_q6v4_pdata *pdata_p = pdev->dev.platform_data;
+	const struct pil_q6v4_pdata *pdata = pdata_p + i;
+	char reg_name[12];
+	struct pil_desc *desc;
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+	if (!res)
+		return -EINVAL;
+
+	drv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!drv->base)
+		return -ENOMEM;
+
+	snprintf(reg_name, sizeof(reg_name), "%s_core_vdd", name[i]);
+	drv->vreg = devm_regulator_get(&pdev->dev, reg_name);
+	if (IS_ERR(drv->vreg))
+		return PTR_ERR(drv->vreg);
+
+	drv->xo = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(drv->xo))
+		return PTR_ERR(drv->xo);
+
+	desc = &drv->desc;
+	desc->name = pdata->name;
+	desc->depends_on = pdata->depends;
+	desc->dev = &pdev->dev;
+	desc->owner = THIS_MODULE;
+	desc->proxy_timeout = 10000;
+	pil_q6v4_init(drv, pdata);
+
+	if (pas_supported(pdata->pas_id) > 0) {
+		desc->ops = &pil_q6v4_modem_ops_trusted;
+		dev_info(&pdev->dev, "using secure boot for %s\n", name[i]);
+	} else {
+		desc->ops = &pil_q6v4_modem_ops;
+		dev_info(&pdev->dev, "using non-secure boot for %s\n", name[i]);
+	}
+	return 0;
+}
+
+static int __devinit pil_q6v4_modem_driver_probe(struct platform_device *pdev)
+{
+	struct q6v4_data *drv_fw, *drv_sw;
+	struct q6v4_modem *drv;
+	struct resource *res;
+	struct regulator *pll_supply;
+	int ret;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	drv_fw = &drv->q6_fw;
+	drv_sw = &drv->q6_sw;
+
+	ret = pil_q6v4_proc_init(drv_fw, pdev, 0);
+	if (ret)
+		return ret;
+
+	ret = pil_q6v4_proc_init(drv_sw, pdev, 1);
+	if (ret)
+		return ret;
+
+	pll_supply = devm_regulator_get(&pdev->dev, "pll_vdd");
+	drv_fw->pll_supply = drv_sw->pll_supply = pll_supply;
+	if (IS_ERR(pll_supply))
+		return PTR_ERR(pll_supply);
+
+	ret = regulator_set_voltage(pll_supply, 1800000, 1800000);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to set pll voltage\n");
+		return ret;
+	}
+
+	ret = regulator_set_optimum_mode(pll_supply, 100000);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to set pll optimum mode\n");
+		return ret;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	drv->modem_base = devm_ioremap(&pdev->dev, res->start,
+			resource_size(res));
+	if (!drv->modem_base)
+		return -ENOMEM;
+
+	drv_fw->pil = msm_pil_register(&drv_fw->desc);
+	if (IS_ERR(drv_fw->pil))
+		return PTR_ERR(drv_fw->pil);
+
+	drv_sw->pil = msm_pil_register(&drv_sw->desc);
+	if (IS_ERR(drv_sw->pil)) {
+		msm_pil_unregister(drv_fw->pil);
+		return PTR_ERR(drv_sw->pil);
+	}
+	return 0;
+}
+
+static int __devexit pil_q6v4_modem_driver_exit(struct platform_device *pdev)
+{
+	struct q6v4_modem *drv = platform_get_drvdata(pdev);
+	msm_pil_unregister(drv->q6_sw.pil);
+	msm_pil_unregister(drv->q6_fw.pil);
+	return 0;
+}
+
+static struct platform_driver pil_q6v4_modem_driver = {
+	.probe = pil_q6v4_modem_driver_probe,
+	.remove = __devexit_p(pil_q6v4_modem_driver_exit),
+	.driver = {
+		.name = "pil-q6v4-modem",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_q6v4_modem_init(void)
+{
+	return platform_driver_register(&pil_q6v4_modem_driver);
+}
+module_init(pil_q6v4_modem_init);
+
+static void __exit pil_q6v4_modem_exit(void)
+{
+	platform_driver_unregister(&pil_q6v4_modem_driver);
+}
+module_exit(pil_q6v4_modem_exit);
+
+MODULE_DESCRIPTION("Support for booting QDSP6v4 (Hexagon) processors");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 32cce1d..47033fc 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -22,7 +22,6 @@
 #include <linux/clk.h>
 
 #include <mach/msm_bus.h>
-#include <mach/msm_iomap.h>
 
 #include "peripheral-loader.h"
 #include "pil-q6v4.h"
@@ -35,12 +34,6 @@
 #define QDSP6SS_GFMUX_CTL	0x30
 #define QDSP6SS_PWR_CTL		0x38
 
-#define MSS_S_HCLK_CTL		(MSM_CLK_CTL_BASE + 0x2C70)
-#define MSS_SLP_CLK_CTL		(MSM_CLK_CTL_BASE + 0x2C60)
-#define SFAB_MSS_M_ACLK_CTL	(MSM_CLK_CTL_BASE + 0x2340)
-#define SFAB_MSS_S_HCLK_CTL	(MSM_CLK_CTL_BASE + 0x2C00)
-#define MSS_RESET		(MSM_CLK_CTL_BASE + 0x2C64)
-
 #define Q6SS_SS_ARES		BIT(0)
 #define Q6SS_CORE_ARES		BIT(1)
 #define Q6SS_ISDB_ARES		BIT(2)
@@ -59,29 +52,19 @@
 #define Q6SS_CLK_ENA		BIT(1)
 #define Q6SS_SRC_SWITCH_CLK_OVR	BIT(8)
 
-struct q6v4_data {
-	void __iomem *base;
-	void __iomem *modem_base;
-	unsigned long start_addr;
-	struct regulator *vreg;
-	struct regulator *pll_supply;
-	bool vreg_enabled;
-	struct clk *xo;
-	struct pil_device *pil;
-};
-
-static int pil_q6v4_init_image(struct pil_desc *pil, const u8 *metadata,
+int pil_q6v4_init_image(struct pil_desc *pil, const u8 *metadata,
 		size_t size)
 {
 	const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
-	struct q6v4_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
 	drv->start_addr = ehdr->e_entry;
 	return 0;
 }
+EXPORT_SYMBOL(pil_q6v4_init_image);
 
-static int pil_q6v4_make_proxy_votes(struct pil_desc *pil)
+int pil_q6v4_make_proxy_votes(struct pil_desc *pil)
 {
-	const struct q6v4_data *drv = dev_get_drvdata(pil->dev);
+	const struct q6v4_data *drv = pil_to_q6v4_data(pil);
 	int ret;
 
 	ret = clk_prepare_enable(drv->xo);
@@ -102,19 +85,21 @@
 err:
 	return ret;
 }
+EXPORT_SYMBOL(pil_q6v4_make_proxy_votes);
 
-static void pil_q6v4_remove_proxy_votes(struct pil_desc *pil)
+void pil_q6v4_remove_proxy_votes(struct pil_desc *pil)
 {
-	const struct q6v4_data *drv = dev_get_drvdata(pil->dev);
+	const struct q6v4_data *drv = pil_to_q6v4_data(pil);
 	if (drv->pll_supply)
 		regulator_disable(drv->pll_supply);
 	clk_disable_unprepare(drv->xo);
 }
+EXPORT_SYMBOL(pil_q6v4_remove_proxy_votes);
 
-static int pil_q6v4_power_up(struct device *dev)
+int pil_q6v4_power_up(struct q6v4_data *drv)
 {
 	int err;
-	struct q6v4_data *drv = dev_get_drvdata(dev);
+	struct device *dev = drv->desc.dev;
 
 	err = regulator_set_voltage(drv->vreg, 743750, 743750);
 	if (err) {
@@ -141,68 +126,27 @@
 	drv->vreg_enabled = true;
 	return 0;
 }
+EXPORT_SYMBOL(pil_q6v4_power_up);
 
-static DEFINE_MUTEX(pil_q6v4_modem_lock);
-static unsigned pil_q6v4_modem_count;
-
-/* Bring modem subsystem out of reset */
-static void pil_q6v4_init_modem(void __iomem *base, void __iomem *jtag_clk)
+void pil_q6v4_power_down(struct q6v4_data *drv)
 {
-	mutex_lock(&pil_q6v4_modem_lock);
-	if (!pil_q6v4_modem_count) {
-		/* Enable MSS clocks */
-		writel_relaxed(0x10, SFAB_MSS_M_ACLK_CTL);
-		writel_relaxed(0x10, SFAB_MSS_S_HCLK_CTL);
-		writel_relaxed(0x10, MSS_S_HCLK_CTL);
-		writel_relaxed(0x10, MSS_SLP_CLK_CTL);
-		/* Wait for clocks to enable */
-		mb();
-		udelay(10);
-
-		/* De-assert MSS reset */
-		writel_relaxed(0x0, MSS_RESET);
-		mb();
-		udelay(10);
-		/* Enable MSS */
-		writel_relaxed(0x7, base);
+	if (drv->vreg_enabled) {
+		regulator_disable(drv->vreg);
+		drv->vreg_enabled = false;
 	}
-
-	/* Enable JTAG clocks */
-	/* TODO: Remove if/when Q6 software enables them? */
-	writel_relaxed(0x10, jtag_clk);
-
-	pil_q6v4_modem_count++;
-	mutex_unlock(&pil_q6v4_modem_lock);
 }
+EXPORT_SYMBOL(pil_q6v4_power_down);
 
-/* Put modem subsystem back into reset */
-static void pil_q6v4_shutdown_modem(void)
-{
-	mutex_lock(&pil_q6v4_modem_lock);
-	if (pil_q6v4_modem_count)
-		pil_q6v4_modem_count--;
-	if (pil_q6v4_modem_count == 0)
-		writel_relaxed(0x1, MSS_RESET);
-	mutex_unlock(&pil_q6v4_modem_lock);
-}
-
-static int pil_q6v4_reset(struct pil_desc *pil)
+int pil_q6v4_boot(struct pil_desc *pil)
 {
 	u32 reg, err;
-	const struct q6v4_data *drv = dev_get_drvdata(pil->dev);
-	const struct pil_q6v4_pdata *pdata = pil->dev->platform_data;
+	const struct q6v4_data *drv = pil_to_q6v4_data(pil);
 
-	err = pil_q6v4_power_up(pil->dev);
-	if (err)
-		return err;
 	/* Enable Q6 ACLK */
-	writel_relaxed(0x10, pdata->aclk_reg);
-
-	if (drv->modem_base)
-		pil_q6v4_init_modem(drv->modem_base, pdata->jtag_clk_reg);
+	writel_relaxed(0x10, drv->aclk_reg);
 
 	/* Unhalt bus port */
-	err = msm_bus_axi_portunhalt(pdata->bus_port);
+	err = msm_bus_axi_portunhalt(drv->bus_port);
 	if (err)
 		dev_err(pil->dev, "Failed to unhalt bus port\n");
 
@@ -216,8 +160,8 @@
 			drv->base + QDSP6SS_RST_EVB);
 
 	/* Program TCM and AHB address ranges */
-	writel_relaxed(pdata->strap_tcm_base, drv->base + QDSP6SS_STRAP_TCM);
-	writel_relaxed(pdata->strap_ahb_upper | pdata->strap_ahb_lower,
+	writel_relaxed(drv->strap_tcm_base, drv->base + QDSP6SS_STRAP_TCM);
+	writel_relaxed(drv->strap_ahb_upper | drv->strap_ahb_lower,
 		       drv->base + QDSP6SS_STRAP_AHB);
 
 	/* Turn off Q6 core clock */
@@ -257,15 +201,15 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(pil_q6v4_boot);
 
-static int pil_q6v4_shutdown(struct pil_desc *pil)
+int pil_q6v4_shutdown(struct pil_desc *pil)
 {
 	u32 reg;
-	struct q6v4_data *drv = dev_get_drvdata(pil->dev);
-	const struct pil_q6v4_pdata *pdata = pil->dev->platform_data;
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
 
 	/* Make sure bus port is halted */
-	msm_bus_axi_porthalt(pdata->bus_port);
+	msm_bus_axi_porthalt(drv->bus_port);
 
 	/* Turn off Q6 core clock */
 	writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
@@ -279,188 +223,67 @@
 	/* Turn off Q6 memories */
 	writel_relaxed(Q6SS_CLAMP_IO, drv->base + QDSP6SS_PWR_CTL);
 
-	if (drv->modem_base)
-		pil_q6v4_shutdown_modem();
-
-	if (drv->vreg_enabled) {
-		regulator_disable(drv->vreg);
-		drv->vreg_enabled = false;
-	}
-
 	return 0;
 }
+EXPORT_SYMBOL(pil_q6v4_shutdown);
 
-static struct pil_reset_ops pil_q6v4_ops = {
-	.init_image = pil_q6v4_init_image,
-	.auth_and_reset = pil_q6v4_reset,
-	.shutdown = pil_q6v4_shutdown,
-	.proxy_vote = pil_q6v4_make_proxy_votes,
-	.proxy_unvote = pil_q6v4_remove_proxy_votes,
-};
-
-static int pil_q6v4_init_image_trusted(struct pil_desc *pil,
+int pil_q6v4_init_image_trusted(struct pil_desc *pil,
 		const u8 *metadata, size_t size)
 {
-	const struct pil_q6v4_pdata *pdata = pil->dev->platform_data;
-	return pas_init_image(pdata->pas_id, metadata, size);
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
+	return pas_init_image(drv->pas_id, metadata, size);
 }
+EXPORT_SYMBOL(pil_q6v4_init_image_trusted);
 
-static int pil_q6v4_reset_trusted(struct pil_desc *pil)
+int pil_q6v4_boot_trusted(struct pil_desc *pil)
 {
-	const struct pil_q6v4_pdata *pdata = pil->dev->platform_data;
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
 	int err;
 
-	err = pil_q6v4_power_up(pil->dev);
+	err = pil_q6v4_power_up(drv);
 	if (err)
 		return err;
 
 	/* Unhalt bus port */
-	err = msm_bus_axi_portunhalt(pdata->bus_port);
+	err = msm_bus_axi_portunhalt(drv->bus_port);
 	if (err)
 		dev_err(pil->dev, "Failed to unhalt bus port\n");
-	return pas_auth_and_reset(pdata->pas_id);
+	return pas_auth_and_reset(drv->pas_id);
 }
+EXPORT_SYMBOL(pil_q6v4_boot_trusted);
 
-static int pil_q6v4_shutdown_trusted(struct pil_desc *pil)
+int pil_q6v4_shutdown_trusted(struct pil_desc *pil)
 {
 	int ret;
-	struct q6v4_data *drv = dev_get_drvdata(pil->dev);
-	struct pil_q6v4_pdata *pdata = pil->dev->platform_data;
+	struct q6v4_data *drv = pil_to_q6v4_data(pil);
 
 	/* Make sure bus port is halted */
-	msm_bus_axi_porthalt(pdata->bus_port);
+	msm_bus_axi_porthalt(drv->bus_port);
 
-	ret = pas_shutdown(pdata->pas_id);
+	ret = pas_shutdown(drv->pas_id);
 	if (ret)
 		return ret;
 
-	if (drv->vreg_enabled) {
-		regulator_disable(drv->vreg);
-		drv->vreg_enabled = false;
-	}
+	pil_q6v4_power_down(drv);
 
 	return ret;
 }
+EXPORT_SYMBOL(pil_q6v4_shutdown_trusted);
 
-static struct pil_reset_ops pil_q6v4_ops_trusted = {
-	.init_image = pil_q6v4_init_image_trusted,
-	.auth_and_reset = pil_q6v4_reset_trusted,
-	.shutdown = pil_q6v4_shutdown_trusted,
-	.proxy_vote = pil_q6v4_make_proxy_votes,
-	.proxy_unvote = pil_q6v4_remove_proxy_votes,
-};
-
-static int __devinit pil_q6v4_driver_probe(struct platform_device *pdev)
+void __devinit
+pil_q6v4_init(struct q6v4_data *drv, const struct pil_q6v4_pdata *pdata)
 {
-	const struct pil_q6v4_pdata *pdata = pdev->dev.platform_data;
-	struct q6v4_data *drv;
-	struct resource *res;
-	struct pil_desc *desc;
-	int ret;
+	drv->strap_tcm_base	= pdata->strap_tcm_base;
+	drv->strap_ahb_upper	= pdata->strap_ahb_upper;
+	drv->strap_ahb_lower	= pdata->strap_ahb_lower;
+	drv->aclk_reg		= pdata->aclk_reg;
+	drv->jtag_clk_reg	= pdata->jtag_clk_reg;
+	drv->pas_id		= pdata->pas_id;
+	drv->bus_port		= pdata->bus_port;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
-
-	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
-	if (!drv)
-		return -ENOMEM;
-	platform_set_drvdata(pdev, drv);
-
-	drv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (!drv->base)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (res) {
-		drv->modem_base = devm_ioremap(&pdev->dev, res->start,
-				resource_size(res));
-		if (!drv->modem_base)
-			return -ENOMEM;
-	}
-
-	desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
-	if (!desc)
-		return -ENOMEM;
-
-	drv->pll_supply = devm_regulator_get(&pdev->dev, "pll_vdd");
-	if (IS_ERR(drv->pll_supply)) {
-		drv->pll_supply = NULL;
-	} else {
-		ret = regulator_set_voltage(drv->pll_supply, 1800000, 1800000);
-		if (ret) {
-			dev_err(&pdev->dev, "failed to set pll voltage\n");
-			return ret;
-		}
-
-		ret = regulator_set_optimum_mode(drv->pll_supply, 100000);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "failed to set pll optimum mode\n");
-			return ret;
-		}
-	}
-
-	desc->name = pdata->name;
-	desc->depends_on = pdata->depends;
-	desc->dev = &pdev->dev;
-	desc->owner = THIS_MODULE;
-	desc->proxy_timeout = 10000;
-
-	if (pas_supported(pdata->pas_id) > 0) {
-		desc->ops = &pil_q6v4_ops_trusted;
-		dev_info(&pdev->dev, "using secure boot\n");
-	} else {
-		desc->ops = &pil_q6v4_ops;
-		dev_info(&pdev->dev, "using non-secure boot\n");
-	}
-
-	drv->vreg = devm_regulator_get(&pdev->dev, "core_vdd");
-	if (IS_ERR(drv->vreg))
-		return PTR_ERR(drv->vreg);
-
-	ret = regulator_set_optimum_mode(drv->vreg, 100000);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
-		return ret;
-	}
-
-	drv->xo = devm_clk_get(&pdev->dev, "xo");
-	if (IS_ERR(drv->xo))
-		return PTR_ERR(drv->xo);
-
-	drv->pil = msm_pil_register(desc);
-	if (IS_ERR(drv->pil))
-		return PTR_ERR(drv->pil);
-	return 0;
+	regulator_set_optimum_mode(drv->vreg, 100000);
 }
-
-static int __devexit pil_q6v4_driver_exit(struct platform_device *pdev)
-{
-	struct q6v4_data *drv = platform_get_drvdata(pdev);
-	msm_pil_unregister(drv->pil);
-	return 0;
-}
-
-static struct platform_driver pil_q6v4_driver = {
-	.probe = pil_q6v4_driver_probe,
-	.remove = __devexit_p(pil_q6v4_driver_exit),
-	.driver = {
-		.name = "pil_qdsp6v4",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init pil_q6v4_init(void)
-{
-	return platform_driver_register(&pil_q6v4_driver);
-}
-module_init(pil_q6v4_init);
-
-static void __exit pil_q6v4_exit(void)
-{
-	platform_driver_unregister(&pil_q6v4_driver);
-}
-module_exit(pil_q6v4_exit);
+EXPORT_SYMBOL(pil_q6v4_init);
 
 MODULE_DESCRIPTION("Support for booting QDSP6v4 (Hexagon) processors");
 MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/pil-q6v4.h b/arch/arm/mach-msm/pil-q6v4.h
index b0b97d0..d280d84 100644
--- a/arch/arm/mach-msm/pil-q6v4.h
+++ b/arch/arm/mach-msm/pil-q6v4.h
@@ -12,6 +12,8 @@
 #ifndef __MSM_PIL_Q6V4_H
 #define __MSM_PIL_Q6V4_H
 
+#include "peripheral-loader.h"
+
 struct pil_q6v4_pdata {
 	const unsigned long strap_tcm_base;
 	const unsigned long strap_ahb_upper;
@@ -23,4 +25,50 @@
 	const unsigned pas_id;
 	int bus_port;
 };
+
+struct clk;
+struct pil_device;
+struct regulator;
+
+/**
+ * struct q6v4_data - Q6 processor
+ */
+struct q6v4_data {
+	void __iomem *base;
+	unsigned long start_addr;
+	unsigned long strap_tcm_base;
+	unsigned long strap_ahb_upper;
+	unsigned long strap_ahb_lower;
+	void __iomem *aclk_reg;
+	void __iomem *jtag_clk_reg;
+	unsigned pas_id;
+	int bus_port;
+
+	struct regulator *vreg;
+	struct regulator *pll_supply;
+	bool vreg_enabled;
+	struct clk *xo;
+
+	struct pil_device *pil;
+	struct pil_desc desc;
+};
+
+#define pil_to_q6v4_data(p) container_of(p, struct q6v4_data, desc)
+
+extern int pil_q6v4_init_image(struct pil_desc *pil, const u8 *metadata,
+		size_t size);
+extern int pil_q6v4_make_proxy_votes(struct pil_desc *pil);
+extern void pil_q6v4_remove_proxy_votes(struct pil_desc *pil);
+extern int pil_q6v4_power_up(struct q6v4_data *drv);
+extern void pil_q6v4_power_down(struct q6v4_data *drv);
+extern int pil_q6v4_boot(struct pil_desc *pil);
+extern int pil_q6v4_shutdown(struct pil_desc *pil);
+
+extern int pil_q6v4_init_image_trusted(struct pil_desc *pil,
+		const u8 *metadata, size_t size);
+extern int pil_q6v4_boot_trusted(struct pil_desc *pil);
+extern int pil_q6v4_shutdown_trusted(struct pil_desc *pil);
+extern void __devinit
+pil_q6v4_init(struct q6v4_data *drv, const struct pil_q6v4_pdata *p);
+
 #endif
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 7c31e76..f97eb9a 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -943,7 +943,7 @@
 
 static bool msm_rpm_set_standalone(void)
 {
-	if (machine_is_msm8974()) {
+	if (machine_is_msm9625()) {
 		pr_warn("%s(): Running in standalone mode, requests "
 				"will not be sent to RPM\n", __func__);
 		standalone = true;
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index ac077e9..969af98 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -286,6 +286,9 @@
 	/* 8092 IDs */
 	[146] = MSM_CPU_8092,
 
+	/* 8910 IDs */
+	[147] = MSM_CPU_8910,
+
 	/* 8064AB IDs */
 	[153] = MSM_CPU_8064AB,
 
@@ -732,6 +735,10 @@
 		dummy_socinfo.id = 146;
 		strlcpy(dummy_socinfo.build_id, "mpq8092 - ",
 		sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8910()) {
+		dummy_socinfo.id = 147;
+		strlcpy(dummy_socinfo.build_id, "msm8910 - ",
+			sizeof(dummy_socinfo.build_id));
 	}
 	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
 		sizeof(dummy_socinfo.build_id));
diff --git a/drivers/cpufreq/cpufreq_gov_msm.c b/drivers/cpufreq/cpufreq_gov_msm.c
index 11eb9f5..6ddbf4e 100644
--- a/drivers/cpufreq/cpufreq_gov_msm.c
+++ b/drivers/cpufreq/cpufreq_gov_msm.c
@@ -18,20 +18,114 @@
 #include <linux/kobject.h>
 #include <linux/cpufreq.h>
 #include <linux/platform_device.h>
+#include <linux/cpu_pm.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
 #include <mach/msm_dcvs.h>
 
+struct cpu_idle_info {
+	int			enabled;
+	int			dcvs_core_id;
+	struct pm_qos_request	pm_qos_req;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_idle_info, cpu_idle_info);
+static DEFINE_PER_CPU_SHARED_ALIGNED(u64, iowait_on_cpu);
+static uint32_t latency;
+
+static int msm_dcvs_idle_notifier(int core_num,
+		enum msm_core_control_event event)
+{
+	struct cpu_idle_info *info = &per_cpu(cpu_idle_info, core_num);
+
+	switch (event) {
+	case MSM_DCVS_ENABLE_IDLE_PULSE:
+		info->enabled = true;
+		break;
+
+	case MSM_DCVS_DISABLE_IDLE_PULSE:
+		info->enabled = false;
+		break;
+
+	case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
+		pm_qos_update_request(&info->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+		break;
+
+	case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
+		pm_qos_update_request(&info->pm_qos_req, latency);
+		break;
+	}
+
+	return 0;
+}
+
+static int msm_cpuidle_notifier(struct notifier_block *self, unsigned long cmd,
+		void *v)
+{
+	struct cpu_idle_info *info =
+		&per_cpu(cpu_idle_info, smp_processor_id());
+	u64 io_wait_us = 0;
+	u64 prev_io_wait_us = 0;
+	u64 last_update_time = 0;
+	u64 val = 0;
+	uint32_t iowaited = 0;
+
+	if (!info->enabled)
+		return NOTIFY_OK;
+
+	switch (cmd) {
+	case CPU_PM_ENTER:
+		val = get_cpu_iowait_time_us(smp_processor_id(),
+					&last_update_time);
+		/* val could be -1 when NOHZ is not enabled */
+		if (val == (u64)-1)
+			val = 0;
+		per_cpu(iowait_on_cpu, smp_processor_id()) = val;
+		msm_dcvs_idle(info->dcvs_core_id, MSM_DCVS_IDLE_ENTER, 0);
+		break;
+
+	case CPU_PM_EXIT:
+		prev_io_wait_us = per_cpu(iowait_on_cpu, smp_processor_id());
+		val = get_cpu_iowait_time_us(smp_processor_id(),
+				&last_update_time);
+		if (val == (u64)-1)
+			val = 0;
+		io_wait_us = val;
+		iowaited = (io_wait_us - prev_io_wait_us);
+		msm_dcvs_idle(info->dcvs_core_id, MSM_DCVS_IDLE_EXIT, iowaited);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block idle_nb = {
+	.notifier_call = msm_cpuidle_notifier,
+};
+
+static void msm_gov_idle_source_init(int cpu, int dcvs_core_id)
+{
+	struct cpu_idle_info *info = NULL;
+
+	info = &per_cpu(cpu_idle_info, cpu);
+	info->dcvs_core_id = dcvs_core_id;
+
+	pm_qos_add_request(&info->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+}
+
 struct msm_gov {
-	int cpu;
-	unsigned int cur_freq;
-	unsigned int min_freq;
-	unsigned int max_freq;
-	struct msm_dcvs_freq gov_notifier;
-	struct cpufreq_policy *policy;
+	int			cpu;
+	unsigned int		cur_freq;
+	unsigned int		min_freq;
+	unsigned int		max_freq;
+	struct cpufreq_policy	*policy;
+	int			dcvs_core_id;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct mutex, gov_mutex);
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_gov, msm_gov_info);
-static char core_name[NR_CPUS][10];
 
 static void msm_gov_check_limits(struct cpufreq_policy *policy)
 {
@@ -40,7 +134,7 @@
 	if (policy->max < gov->cur_freq)
 		__cpufreq_driver_target(policy, policy->max,
 				CPUFREQ_RELATION_H);
-	else if (policy->min > gov->min_freq)
+	else if (policy->min > gov->cur_freq)
 		__cpufreq_driver_target(policy, policy->min,
 				CPUFREQ_RELATION_L);
 	else
@@ -50,14 +144,14 @@
 	gov->cur_freq = policy->cur;
 	gov->min_freq = policy->min;
 	gov->max_freq = policy->max;
+	msm_dcvs_update_limits(gov->dcvs_core_id);
 }
 
-static int msm_dcvs_freq_set(struct msm_dcvs_freq *self,
+static int msm_dcvs_freq_set(int core_num,
 		unsigned int freq)
 {
 	int ret = -EINVAL;
-	struct msm_gov *gov =
-		container_of(self, struct msm_gov, gov_notifier);
+	struct msm_gov *gov = &per_cpu(msm_gov_info, core_num);
 
 	mutex_lock(&per_cpu(gov_mutex, gov->cpu));
 
@@ -81,17 +175,15 @@
 	return ret;
 }
 
-static unsigned int msm_dcvs_freq_get(struct msm_dcvs_freq *self)
+static unsigned int msm_dcvs_freq_get(int core_num)
 {
-	struct msm_gov *gov =
-		container_of(self, struct msm_gov, gov_notifier);
-
+	struct msm_gov *gov = &per_cpu(msm_gov_info, core_num);
 	/*
 	 * the rw_sem in cpufreq is always held when this is called.
 	 * The policy->cur won't be updated in this case - so it is safe to
 	 * access policy->cur
 	 */
-	return gov->cur_freq;
+	return gov->policy->cur;
 }
 
 static int cpufreq_governor_msm(struct cpufreq_policy *policy,
@@ -101,8 +193,6 @@
 	int ret = 0;
 	int handle = 0;
 	struct msm_gov *gov = &per_cpu(msm_gov_info, policy->cpu);
-	struct msm_dcvs_freq *dcvs_notifier =
-			&(per_cpu(msm_gov_info, cpu).gov_notifier);
 
 	switch (event) {
 	case CPUFREQ_GOV_START:
@@ -112,17 +202,14 @@
 		mutex_lock(&per_cpu(gov_mutex, cpu));
 		per_cpu(msm_gov_info, cpu).cpu = cpu;
 		gov->policy = policy;
-		dcvs_notifier->core_name = core_name[cpu];
-		dcvs_notifier->set_frequency = msm_dcvs_freq_set;
-		dcvs_notifier->get_frequency = msm_dcvs_freq_get;
-		handle = msm_dcvs_freq_sink_register(dcvs_notifier);
+		handle = msm_dcvs_freq_sink_start(gov->dcvs_core_id);
 		BUG_ON(handle < 0);
 		msm_gov_check_limits(policy);
 		mutex_unlock(&per_cpu(gov_mutex, cpu));
 		break;
 
 	case CPUFREQ_GOV_STOP:
-		msm_dcvs_freq_sink_unregister(dcvs_notifier);
+		msm_dcvs_freq_sink_stop(gov->dcvs_core_id);
 		break;
 
 	case CPUFREQ_GOV_LIMITS:
@@ -143,23 +230,40 @@
 
 static int __devinit msm_gov_probe(struct platform_device *pdev)
 {
-	int ret = 0;
 	int cpu;
 	struct msm_dcvs_core_info *core = NULL;
+	struct msm_dcvs_core_info *core_info = NULL;
+	struct msm_gov_platform_data *pdata = pdev->dev.platform_data;
 	int sensor = 0;
 
 	core = pdev->dev.platform_data;
+	core_info = pdata->info;
+	latency = pdata->latency;
 
 	for_each_possible_cpu(cpu) {
+		struct msm_gov *gov = &per_cpu(msm_gov_info, cpu);
+
 		mutex_init(&per_cpu(gov_mutex, cpu));
-		snprintf(core_name[cpu], 10, "cpu%d", cpu);
 		if (cpu < core->num_cores)
-			sensor = core->sensors[cpu];
-		ret = msm_dcvs_register_core(core_name[cpu], core, sensor);
-		if (ret)
+			sensor = core_info->sensors[cpu];
+		gov->dcvs_core_id = msm_dcvs_register_core(
+						MSM_DCVS_CORE_TYPE_CPU,
+						cpu,
+						core_info,
+						msm_dcvs_freq_set,
+						msm_dcvs_freq_get,
+						msm_dcvs_idle_notifier,
+						sensor);
+		if (gov->dcvs_core_id < 0) {
 			pr_err("Unable to register core for %d\n", cpu);
+			return -EINVAL;
+		}
+
+		msm_gov_idle_source_init(cpu, gov->dcvs_core_id);
 	}
 
+	cpu_pm_register_notifier(&idle_nb);
+
 	return cpufreq_register_governor(&cpufreq_gov_msm);
 }
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 97711e2..4eca776 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2263,12 +2263,15 @@
 	static uint io_cnt;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct adreno_context *adreno_ctx = context->devctxt;
 	int retries = 0;
 	unsigned int ts_issued;
 	unsigned int context_id = _get_context_id(context);
 	unsigned int time_elapsed = 0;
 	unsigned int prev_reg_val[hang_detect_regs_count];
 	unsigned int wait;
+	unsigned int retry_ts_cmp = 0;
+	unsigned int retry_ts_cmp_msecs = KGSL_SYNCOBJ_SERVER_TIMEOUT;
 
 	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
@@ -2278,12 +2281,20 @@
 	if (msecs == KGSL_TIMEOUT_DEFAULT)
 		msecs = adreno_dev->wait_timeout;
 
+	/*
+	 * With user generated ts, if this check fails perform this check
+	 * again after 'retry_ts_cmp_msecs' milliseconds.
+	 */
 	if (timestamp_cmp(timestamp, ts_issued) > 0) {
-		KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, "
-			"last issued ts <%d:0x%x>\n",
-			context_id, timestamp, context_id, ts_issued);
-		status = -EINVAL;
-		goto done;
+		if (!(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
+			KGSL_DRV_ERR(device,
+				"Cannot wait for invalid ts <%d:0x%x>, "
+				"last issued ts <%d:0x%x>\n",
+				context_id, timestamp, context_id, ts_issued);
+			status = -EINVAL;
+			goto done;
+		} else
+			retry_ts_cmp = 1;
 	}
 
 	/*
@@ -2353,7 +2364,22 @@
 		time_elapsed += wait;
 		wait = KGSL_TIMEOUT_PART;
 
-		retries++;
+		if (!retry_ts_cmp)
+			retries++;
+		else if (time_elapsed >= retry_ts_cmp_msecs) {
+			ts_issued =
+				adreno_dev->ringbuffer.timestamp[context_id];
+			if (timestamp_cmp(timestamp, ts_issued) > 0) {
+				KGSL_DRV_ERR(device,
+				"Cannot wait for user-generated ts <%d:0x%x>, "
+				"not submitted within server timeout period. "
+				"last issued ts <%d:0x%x>\n",
+				context_id, timestamp, context_id, ts_issued);
+				status = -EINVAL;
+				goto done;
+			}
+			retry_ts_cmp = 0;
+		}
 
 	} while (!msecs || time_elapsed < msecs);
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 66402fe..c525943 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -33,7 +33,7 @@
 /* Flags to control command packet settings */
 #define KGSL_CMD_FLAGS_NONE             0x00000000
 #define KGSL_CMD_FLAGS_PMODE		0x00000001
-#define KGSL_CMD_FLAGS_DUMMY_INTR_CMD	0x00000002
+#define KGSL_CMD_FLAGS_INTERNAL_ISSUE	0x00000002
 
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 7cbc7a8..a107a27 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -169,6 +169,14 @@
 	if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
 		drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
 
+	if (flags & KGSL_CONTEXT_USER_GENERATED_TS) {
+		if (!(flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
+	}
+
 	ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
 	if (ret)
 		goto err;
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 034d6e9..58e4791 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -48,6 +48,8 @@
 #define CTXT_FLAGS_GPU_HANG_RECOVERED	BIT(12)
 /* Context is being destroyed so dont save it */
 #define CTXT_FLAGS_BEING_DESTROYED	BIT(13)
+/* User mode generated timestamps enabled */
+#define CTXT_FLAGS_USER_GENERATED_TS    BIT(14)
 
 struct kgsl_device;
 struct adreno_device;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 0dd140b..da9daf7 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -487,11 +487,10 @@
 adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 				struct adreno_context *context,
 				unsigned int flags, unsigned int *cmds,
-				int sizedwords)
+				int sizedwords, uint32_t timestamp)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
 	unsigned int *ringcmds;
-	unsigned int timestamp;
 	unsigned int total_sizedwords = sizedwords;
 	unsigned int i;
 	unsigned int rcmd_gpu;
@@ -506,19 +505,38 @@
 	if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
 		context_id = context->id;
 
+	if ((context->flags & CTXT_FLAGS_USER_GENERATED_TS) &&
+			(!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))) {
+		if (timestamp_cmp(rb->timestamp[context_id],
+						timestamp) >= 0) {
+			KGSL_DRV_ERR(rb->device,
+				"Invalid user generated ts <%d:0x%x>, "
+				"less than last issued ts <%d:0x%x>\n",
+				context_id, timestamp, context_id,
+				rb->timestamp[context_id]);
+			return -ERANGE;
+		}
+	}
+
 	/* reserve space to temporarily turn off protected mode
 	*  error checking if needed
 	*/
 	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
 	/* 2 dwords to store the start of command sequence */
 	total_sizedwords += 2;
-	total_sizedwords += context ? 7 : 0;
+	/*
+	 * Add CP_COND_EXEC commands to generate CP_INTERRUPT only
+	 * for submissions from userspace.
+	 */
+	total_sizedwords += (context &&
+			!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) ? 7 : 0;
 
 	if (adreno_is_a3xx(adreno_dev))
 		total_sizedwords += 7;
 
 	total_sizedwords += 2; /* scratchpad ts for recovery */
-	if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
+	if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS &&
+			!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
 		total_sizedwords += 3; /* sop timestamp */
 		total_sizedwords += 4; /* eop timestamp */
 		total_sizedwords += 3; /* global timestamp without cache
@@ -564,10 +582,13 @@
 	/* always increment the global timestamp. once. */
 	rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
 
-	if (context && !(flags & KGSL_CMD_FLAGS_DUMMY_INTR_CMD)) {
+	/* Do not update context's timestamp for internal submissions */
+	if (context && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
 		if (context_id == KGSL_MEMSTORE_GLOBAL)
 			rb->timestamp[context->id] =
 				rb->timestamp[KGSL_MEMSTORE_GLOBAL];
+		else if (context->flags & CTXT_FLAGS_USER_GENERATED_TS)
+			rb->timestamp[context_id] = timestamp;
 		else
 			rb->timestamp[context_id]++;
 	}
@@ -591,7 +612,8 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
 	}
 
-	if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
+	if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS
+			&& !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
 		/* start-of-pipeline timestamp */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
@@ -619,11 +641,13 @@
 			cp_type3_packet(CP_EVENT_WRITE, 3));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[context_id]);
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+						eoptimestamp)));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+				rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 	}
 
-	if (context) {
+	if (context && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
 		/* Conditional execution based on memory values */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_COND_EXEC, 4));
@@ -675,8 +699,8 @@
 		device->state & KGSL_STATE_HUNG)
 		return;
 
-	adreno_ringbuffer_addcmds(rb, a_ctxt, KGSL_CMD_FLAGS_DUMMY_INTR_CMD,
-			cmds, sizedwords);
+	adreno_ringbuffer_addcmds(rb, a_ctxt, KGSL_CMD_FLAGS_INTERNAL_ISSUE,
+					cmds, sizedwords, 0);
 }
 
 unsigned int
@@ -692,7 +716,11 @@
 	if (device->state & KGSL_STATE_HUNG)
 		return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
 					KGSL_TIMESTAMP_RETIRED);
-	return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords);
+
+	flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
+
+	return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
+							sizedwords, 0);
 }
 
 static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -973,8 +1001,9 @@
 	adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
 
 	*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
-					drawctxt, 0,
-					&link[0], (cmds - link));
+					drawctxt,
+					0,
+					&link[0], (cmds - link), *timestamp);
 
 	KGSL_CMD_INFO(device, "<%d:0x%x> g %08x numibs %d\n",
 		context->id, *timestamp, (unsigned int)ibdesc, numibs);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 5ba844a..0c61d7f 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -722,6 +722,7 @@
 	list_add(&private->list, &kgsl_driver.process_list);
 
 	kgsl_process_init_sysfs(private);
+	kgsl_process_init_debugfs(private);
 
 out:
 	mutex_unlock(&kgsl_driver.process_mutex);
@@ -744,6 +745,7 @@
 		goto unlock;
 
 	kgsl_process_uninit_sysfs(private);
+	debugfs_remove_recursive(private->debug_root);
 
 	list_del(&private->list);
 
@@ -1080,6 +1082,7 @@
 				      unsigned int cmd, void *data)
 {
 	int result = 0;
+	int i = 0;
 	struct kgsl_ringbuffer_issueibcmds *param = data;
 	struct kgsl_ibdesc *ibdesc;
 	struct kgsl_context *context;
@@ -1141,6 +1144,16 @@
 		param->numibs = 1;
 	}
 
+	for (i = 0; i < param->numibs; i++) {
+		if (!kgsl_mmu_gpuaddr_in_range(ibdesc[i].gpuaddr)) {
+			result = -ERANGE;
+			KGSL_DRV_ERR(dev_priv->device,
+				     "invalid ib base GPU virtual addr %x\n",
+				     ibdesc[i].gpuaddr);
+			goto free_ibdesc;
+		}
+	}
+
 	result = dev_priv->device->ftbl->issueibcmds(dev_priv,
 					     context,
 					     ibdesc,
@@ -1632,11 +1645,16 @@
 #endif
 
 static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
-		struct kgsl_pagetable *pagetable, int fd)
+		struct kgsl_pagetable *pagetable, void *data)
 {
 	struct ion_handle *handle;
 	struct scatterlist *s;
 	struct sg_table *sg_table;
+	struct kgsl_map_user_mem *param = data;
+	int fd = param->fd;
+
+	if (!param->len)
+		return -EINVAL;
 
 	if (IS_ERR_OR_NULL(kgsl_ion_client))
 		return -ENODEV;
@@ -1741,8 +1759,7 @@
 		entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
 		break;
 	case KGSL_USER_MEM_TYPE_ION:
-		result = kgsl_setup_ion(entry, private->pagetable,
-			param->fd);
+		result = kgsl_setup_ion(entry, private->pagetable, data);
 		break;
 	default:
 		KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 416eda9..472474b 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -182,6 +182,8 @@
 	struct kgsl_process_private *private, unsigned int gpuaddr,
 	size_t size);
 
+void kgsl_get_memory_usage(char *str, size_t len, unsigned int memflags);
+
 int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
 	void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
 	void *owner);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 545d2b3..40ed7ca 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -16,6 +16,7 @@
 
 #include "kgsl.h"
 #include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
 
 /*default log levels is error for everything*/
 #define KGSL_LOG_LEVEL_DEFAULT 3
@@ -23,6 +24,7 @@
 
 struct dentry *kgsl_debugfs_dir;
 static struct dentry *pm_d_debugfs;
+struct dentry *proc_d_debugfs;
 
 static int pm_dump_set(void *data, u64 val)
 {
@@ -146,9 +148,80 @@
 
 }
 
+static const char * const memtype_strings[] = {
+	"gpumem",
+	"pmem",
+	"ashmem",
+	"usermap",
+	"ion",
+};
+
+static const char *memtype_str(int memtype)
+{
+	if (memtype < ARRAY_SIZE(memtype_strings))
+		return memtype_strings[memtype];
+	return "unknown";
+}
+
+static int process_mem_print(struct seq_file *s, void *unused)
+{
+	struct kgsl_mem_entry *entry;
+	struct rb_node *node;
+	struct kgsl_process_private *private = s->private;
+	char flags[3];
+	char usage[16];
+
+	spin_lock(&private->mem_lock);
+	seq_printf(s, "%8s %8s %5s %10s %16s %5s\n",
+		   "gpuaddr", "size", "flags", "type", "usage", "sglen");
+	for (node = rb_first(&private->mem_rb); node; node = rb_next(node)) {
+		struct kgsl_memdesc *m;
+
+		entry = rb_entry(node, struct kgsl_mem_entry, node);
+		m = &entry->memdesc;
+
+		flags[0] = m->priv & KGSL_MEMFLAGS_GLOBAL ?  'g' : '-';
+		flags[1] = m->priv & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
+		flags[2] = '\0';
+
+		kgsl_get_memory_usage(usage, sizeof(usage), m->priv);
+
+		seq_printf(s, "%08x %8d %5s %10s %16s %5d\n",
+			   m->gpuaddr, m->size, flags,
+			   memtype_str(entry->memtype), usage, m->sglen);
+	}
+	spin_unlock(&private->mem_lock);
+	return 0;
+}
+
+static int process_mem_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, process_mem_print, inode->i_private);
+}
+
+static const struct file_operations process_mem_fops = {
+	.open = process_mem_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void
+kgsl_process_init_debugfs(struct kgsl_process_private *private)
+{
+	unsigned char name[16];
+
+	snprintf(name, sizeof(name), "%d", private->pid);
+
+	private->debug_root = debugfs_create_dir(name, proc_d_debugfs);
+	debugfs_create_file("mem", 0400, private->debug_root, private,
+			    &process_mem_fops);
+}
+
 void kgsl_core_debugfs_init(void)
 {
 	kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0);
+	proc_d_debugfs = debugfs_create_dir("proc", kgsl_debugfs_dir);
 }
 
 void kgsl_core_debugfs_close(void)
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
index 5e10988..898c4e9 100644
--- a/drivers/gpu/msm/kgsl_debugfs.h
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -15,6 +15,7 @@
 #define _KGSL_DEBUGFS_H
 
 struct kgsl_device;
+struct kgsl_process_private;
 
 #ifdef CONFIG_DEBUG_FS
 void kgsl_core_debugfs_init(void);
@@ -28,11 +29,16 @@
 	return kgsl_debugfs_dir;
 }
 
+int kgsl_process_init_debugfs(struct kgsl_process_private *);
 #else
 static inline void kgsl_core_debugfs_init(void) { }
 static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
 static inline void kgsl_core_debugfs_close(void) { }
 static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
+static inline int kgsl_process_init_debugfs(struct kgsl_process_private *)
+{
+	return 0;
+}
 
 #endif
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index dc597f5..4394118 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -251,6 +251,7 @@
 	struct kgsl_pagetable *pagetable;
 	struct list_head list;
 	struct kobject kobj;
+	struct dentry *debug_root;
 
 	struct {
 		unsigned int cur;
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
index 5b66e70..bb4ecd1 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -20,21 +20,21 @@
 #include "kgsl_trace.h"
 
 struct msm_priv {
-	struct kgsl_device *device;
-	int enabled;
-	int handle;
-	unsigned int cur_freq;
-	struct msm_dcvs_idle idle_source;
-	struct msm_dcvs_freq freq_sink;
-	struct msm_dcvs_core_info *core_info;
-	int gpu_busy;
+	struct kgsl_device		*device;
+	int				enabled;
+	unsigned int			cur_freq;
+	struct msm_dcvs_core_info	*core_info;
+	int				gpu_busy;
+	int				dcvs_core_id;
 };
 
-static int msm_idle_enable(struct msm_dcvs_idle *self,
-					enum msm_core_control_event event)
+/* reference to be used in idle and freq callbacks */
+static struct msm_priv *the_msm_priv;
+
+static int msm_idle_enable(int type_core_num,
+		enum msm_core_control_event event)
 {
-	struct msm_priv *priv = container_of(self, struct msm_priv,
-								idle_source);
+	struct msm_priv *priv = the_msm_priv;
 
 	switch (event) {
 	case MSM_DCVS_ENABLE_IDLE_PULSE:
@@ -53,12 +53,10 @@
 /* Set the requested frequency if it is within 5MHz (delta) of a
  * supported frequency.
  */
-static int msm_set_freq(struct msm_dcvs_freq *self,
-						unsigned int freq)
+static int msm_set_freq(int core_num, unsigned int freq)
 {
 	int i, delta = 5000000;
-	struct msm_priv *priv = container_of(self, struct msm_priv,
-								freq_sink);
+	struct msm_priv *priv = the_msm_priv;
 	struct kgsl_device *device = priv->device;
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 
@@ -79,10 +77,10 @@
 	return priv->cur_freq / 1000;
 }
 
-static unsigned int msm_get_freq(struct msm_dcvs_freq *self)
+static unsigned int msm_get_freq(int core_num)
 {
-	struct msm_priv *priv = container_of(self, struct msm_priv,
-								freq_sink);
+	struct msm_priv *priv = the_msm_priv;
+
 	/* return current frequency in kHz */
 	return priv->cur_freq / 1000;
 }
@@ -92,7 +90,7 @@
 {
 	struct msm_priv *priv = pwrscale->priv;
 	if (priv->enabled && !priv->gpu_busy) {
-		msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_EXIT, 0);
+		msm_dcvs_idle(priv->dcvs_core_id, MSM_DCVS_IDLE_EXIT, 0);
 		trace_kgsl_mpdcvs(device, 1);
 		priv->gpu_busy = 1;
 	}
@@ -106,7 +104,8 @@
 
 	if (priv->enabled && priv->gpu_busy)
 		if (device->ftbl->isidle(device)) {
-			msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+			msm_dcvs_idle(priv->dcvs_core_id,
+					MSM_DCVS_IDLE_ENTER, 0);
 			trace_kgsl_mpdcvs(device, 0);
 			priv->gpu_busy = 0;
 		}
@@ -119,7 +118,7 @@
 	struct msm_priv *priv = pwrscale->priv;
 
 	if (priv->enabled && priv->gpu_busy) {
-		msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+		msm_dcvs_idle(priv->dcvs_core_id, MSM_DCVS_IDLE_ENTER, 0);
 		trace_kgsl_mpdcvs(device, 0);
 		priv->gpu_busy = 0;
 	}
@@ -154,7 +153,7 @@
 {
 	struct msm_priv *priv;
 	struct msm_dcvs_freq_entry *tbl;
-	int i, ret, low_level;
+	int i, ret = -EINVAL, low_level;
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	struct platform_device *pdev =
 		container_of(device->parentdev, struct platform_device, dev);
@@ -171,31 +170,24 @@
 	low_level = pwr->num_pwrlevels - KGSL_PWRLEVEL_LAST_OFFSET;
 	for (i = 0; i <= low_level; i++)
 		tbl[i].freq = pwr->pwrlevels[low_level - i].gpu_freq / 1000;
-	ret = msm_dcvs_register_core(device->name, priv->core_info,
-			priv->core_info->sensors[0]);
-	if (ret) {
+	priv->dcvs_core_id = msm_dcvs_register_core(MSM_DCVS_CORE_TYPE_GPU, 0,
+				priv->core_info,
+				msm_set_freq, msm_get_freq, msm_idle_enable,
+				priv->core_info->sensors[0]);
+	if (priv->dcvs_core_id < 0) {
 		KGSL_PWR_ERR(device, "msm_dcvs_register_core failed");
 		goto err;
 	}
 
 	priv->device = device;
-	priv->idle_source.enable = msm_idle_enable;
-	priv->idle_source.core_name = device->name;
-	priv->handle = msm_dcvs_idle_source_register(&priv->idle_source);
-	if (priv->handle < 0) {
-		ret = priv->handle;
-		KGSL_PWR_ERR(device, "msm_dcvs_idle_source_register failed\n");
-		goto err;
-	}
 
-	priv->freq_sink.core_name = device->name;
-	priv->freq_sink.set_frequency = msm_set_freq;
-	priv->freq_sink.get_frequency = msm_get_freq;
-	ret = msm_dcvs_freq_sink_register(&priv->freq_sink);
+	the_msm_priv = priv;
+	ret = msm_dcvs_freq_sink_start(priv->dcvs_core_id);
 	if (ret >= 0) {
 		if (device->ftbl->isidle(device)) {
 			priv->gpu_busy = 0;
-			msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+			msm_dcvs_idle(priv->dcvs_core_id,
+					MSM_DCVS_IDLE_ENTER, 0);
 		} else {
 			priv->gpu_busy = 1;
 		}
@@ -204,7 +196,6 @@
 	}
 
 	KGSL_PWR_ERR(device, "msm_dcvs_freq_sink_register failed\n");
-	msm_dcvs_idle_source_unregister(&priv->idle_source);
 
 err:
 	kfree(pwrscale->priv);
@@ -220,8 +211,7 @@
 
 	if (pwrscale->priv == NULL)
 		return;
-	msm_dcvs_idle_source_unregister(&priv->idle_source);
-	msm_dcvs_freq_sink_unregister(&priv->freq_sink);
+	msm_dcvs_freq_sink_stop(priv->dcvs_core_id);
 	kfree(pwrscale->priv);
 	pwrscale->priv = NULL;
 	msm_restore_io_fraction(device);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index bdc5686..d48337a 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -919,3 +919,42 @@
 	return 0;
 }
 EXPORT_SYMBOL(kgsl_sharedmem_map_vma);
+
+static const char * const memtype_str[] = {
+	[KGSL_MEMTYPE_OBJECTANY] = "any(0)",
+	[KGSL_MEMTYPE_FRAMEBUFFER] = "framebuffer",
+	[KGSL_MEMTYPE_RENDERBUFFER] = "renderbuffer",
+	[KGSL_MEMTYPE_ARRAYBUFFER] = "arraybuffer",
+	[KGSL_MEMTYPE_ELEMENTARRAYBUFFER] = "elementarraybuffer",
+	[KGSL_MEMTYPE_VERTEXARRAYBUFFER] = "vertexarraybuffer",
+	[KGSL_MEMTYPE_TEXTURE] = "texture",
+	[KGSL_MEMTYPE_SURFACE] = "surface",
+	[KGSL_MEMTYPE_EGL_SURFACE] = "egl_surface",
+	[KGSL_MEMTYPE_GL] = "gl",
+	[KGSL_MEMTYPE_CL] = "cl",
+	[KGSL_MEMTYPE_CL_BUFFER_MAP] = "cl_buffer_map",
+	[KGSL_MEMTYPE_CL_BUFFER_NOMAP] = "cl_buffer_nomap",
+	[KGSL_MEMTYPE_CL_IMAGE_MAP] = "cl_image_map",
+	[KGSL_MEMTYPE_CL_IMAGE_NOMAP] = "cl_image_nomap",
+	[KGSL_MEMTYPE_CL_KERNEL_STACK] = "cl_kernel_stack",
+	[KGSL_MEMTYPE_COMMAND] = "command",
+	[KGSL_MEMTYPE_2D] = "2d",
+	[KGSL_MEMTYPE_EGL_IMAGE] = "egl_image",
+	[KGSL_MEMTYPE_EGL_SHADOW] = "egl_shadow",
+	[KGSL_MEMTYPE_MULTISAMPLE] = "egl_multisample",
+	/* KGSL_MEMTYPE_KERNEL handled below, to avoid huge array */
+};
+
+void kgsl_get_memory_usage(char *name, size_t name_size, unsigned int memflags)
+{
+	unsigned char type;
+
+	type = (memflags & KGSL_MEMTYPE_MASK) >> KGSL_MEMTYPE_SHIFT;
+	if (type == KGSL_MEMTYPE_KERNEL)
+		strlcpy(name, "kernel", name_size);
+	else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
+		strlcpy(name, memtype_str[type], name_size);
+	else
+		snprintf(name, name_size, "unknown(%3d)", type);
+}
+EXPORT_SYMBOL(kgsl_get_memory_usage);
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index bba06bc..81cb34f 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -316,17 +316,21 @@
 		__field(unsigned int, gpuaddr)
 		__field(unsigned int, size)
 		__field(unsigned int, tgid)
+		__array(char, usage, 16)
 	),
 
 	TP_fast_assign(
 		__entry->gpuaddr = mem_entry->memdesc.gpuaddr;
 		__entry->size = mem_entry->memdesc.size;
 		__entry->tgid = mem_entry->priv->pid;
+		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+				     mem_entry->memdesc.priv);
 	),
 
 	TP_printk(
-		"gpuaddr=0x%08x size=%d tgid=%d",
-		__entry->gpuaddr, __entry->size, __entry->tgid
+		"gpuaddr=0x%08x size=%d tgid=%d usage=%s",
+		__entry->gpuaddr, __entry->size, __entry->tgid,
+		__entry->usage
 	)
 );
 
@@ -342,6 +346,7 @@
 		__field(int, fd)
 		__field(int, type)
 		__field(unsigned int, tgid)
+		__array(char, usage, 16)
 	),
 
 	TP_fast_assign(
@@ -350,12 +355,15 @@
 		__entry->fd = fd;
 		__entry->type = mem_entry->memtype;
 		__entry->tgid = mem_entry->priv->pid;
+		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+				     mem_entry->memdesc.priv);
 	),
 
 	TP_printk(
-		"gpuaddr=0x%08x size=%d type=%d fd=%d tgid=%d",
+		"gpuaddr=0x%08x size=%d type=%d fd=%d tgid=%d usage %s",
 		__entry->gpuaddr, __entry->size,
-		__entry->type, __entry->fd, __entry->tgid
+		__entry->type, __entry->fd, __entry->tgid,
+		__entry->usage
 	)
 );
 
@@ -371,6 +379,7 @@
 		__field(int, type)
 		__field(int, fd)
 		__field(unsigned int, tgid)
+		__array(char, usage, 16)
 	),
 
 	TP_fast_assign(
@@ -378,12 +387,14 @@
 		__entry->size = mem_entry->memdesc.size;
 		__entry->type = mem_entry->memtype;
 		__entry->tgid = mem_entry->priv->pid;
+		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+				     mem_entry->memdesc.priv);
 	),
 
 	TP_printk(
-		"gpuaddr=0x%08x size=%d type=%d tgid=%d",
+		"gpuaddr=0x%08x size=%d type=%d tgid=%d usage=%s",
 		__entry->gpuaddr, __entry->size, __entry->type,
-		__entry->tgid
+		__entry->tgid, __entry->usage
 	)
 );
 
@@ -399,6 +410,7 @@
 		__field(unsigned int, gpuaddr)
 		__field(unsigned int, size)
 		__field(int, type)
+		__array(char, usage, 16)
 		__field(unsigned int, drawctxt_id)
 		__field(unsigned int, curr_ts)
 		__field(unsigned int, free_ts)
@@ -408,6 +420,8 @@
 		__assign_str(device_name, device->name);
 		__entry->gpuaddr = mem_entry->memdesc.gpuaddr;
 		__entry->size = mem_entry->memdesc.size;
+		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+				     mem_entry->memdesc.priv);
 		__entry->drawctxt_id = id;
 		__entry->type = mem_entry->memtype;
 		__entry->curr_ts = curr_ts;
@@ -415,12 +429,13 @@
 	),
 
 	TP_printk(
-		"d_name=%s gpuaddr=0x%08x size=%d type=%d ctx=%u"
+		"d_name=%s gpuaddr=0x%08x size=%d type=%d usage=%s ctx=%u"
 		" curr_ts=0x%x free_ts=0x%x",
 		__get_str(device_name),
 		__entry->gpuaddr,
 		__entry->size,
 		__entry->type,
+		__entry->usage,
 		__entry->drawctxt_id,
 		__entry->curr_ts,
 		__entry->free_ts
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index f49d009..9d88fdd 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -148,9 +148,9 @@
 	return ret;
 }
 
-static void __reset_iommu(void __iomem *base, int smt_size)
+static void __reset_iommu(void __iomem *base)
 {
-	int i;
+	int i, smt_size;
 
 	SET_ACR(base, 0);
 	SET_NSACR(base, 0);
@@ -162,6 +162,7 @@
 	SET_PMCR(base, 0);
 	SET_SCR1(base, 0);
 	SET_SSDR_N(base, 0, 0);
+	smt_size = GET_IDR0_NUMSMRG(base);
 
 	for (i = 0; i < smt_size; i++)
 		SET_SMR_VALID(base, i, 0);
@@ -169,11 +170,11 @@
 	mb();
 }
 
-static void __program_iommu(void __iomem *base, int smt_size,
+static void __program_iommu(void __iomem *base,
 			    struct msm_iommu_bfb_settings *bfb_settings)
 {
 	int i;
-	__reset_iommu(base, smt_size);
+	__reset_iommu(base);
 
 	SET_CR0_SMCFCFG(base, 1);
 	SET_CR0_USFCFG(base, 1);
@@ -208,9 +209,10 @@
 	mb();
 }
 
-static void __release_smg(void __iomem *base, int ctx, int smt_size)
+static void __release_smg(void __iomem *base, int ctx)
 {
-	int i;
+	int i, smt_size;
+	smt_size = GET_IDR0_NUMSMRG(base);
 
 	/* Invalidate any SMGs associated with this context */
 	for (i = 0; i < smt_size; i++)
@@ -221,14 +223,14 @@
 
 static void __program_context(void __iomem *base, int ctx, int ncb,
 				phys_addr_t pgtable, int redirect,
-				u32 *sids, int len, int smt_size)
+				u32 *sids, int len)
 {
 	unsigned int prrr, nmrr;
 	unsigned int pn;
-	int i, j, found, num = 0;
+	int i, j, found, num = 0, smt_size;
 
 	__reset_context(base, ctx);
-
+	smt_size = GET_IDR0_NUMSMRG(base);
 	pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
 	SET_TTBCR(base, ctx, 0);
 	SET_CB_TTBR0_ADDR(base, ctx, pn);
@@ -435,13 +437,12 @@
 	}
 
 	if (!msm_iommu_ctx_attached(dev->parent))
-		__program_iommu(iommu_drvdata->base, iommu_drvdata->nsmr,
+		__program_iommu(iommu_drvdata->base,
 				iommu_drvdata->bfb_settings);
 
 	__program_context(iommu_drvdata->base, ctx_drvdata->num,
 		iommu_drvdata->ncb, __pa(priv->pt.fl_table),
-		priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid,
-		iommu_drvdata->nsmr);
+		priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid);
 	__disable_clocks(iommu_drvdata);
 
 	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
@@ -478,8 +479,7 @@
 		GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
 
 	__reset_context(iommu_drvdata->base, ctx_drvdata->num);
-	__release_smg(iommu_drvdata->base, ctx_drvdata->num,
-		      iommu_drvdata->nsmr);
+	__release_smg(iommu_drvdata->base, ctx_drvdata->num);
 
 	__disable_clocks(iommu_drvdata);
 
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
index 237d601..68612ba 100644
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ b/drivers/iommu/msm_iommu_dev-v2.c
@@ -94,7 +94,6 @@
 {
 	struct device_node *child;
 	int ret = 0;
-	u32 nsmr;
 
 	ret = device_move(&pdev->dev, &msm_iommu_root_dev->dev, DPM_ORDER_NONE);
 	if (ret)
@@ -104,18 +103,6 @@
 	if (ret)
 		goto fail;
 
-	ret = of_property_read_u32(pdev->dev.of_node, "qcom,iommu-smt-size",
-				   &nsmr);
-	if (ret)
-		goto fail;
-
-	if (nsmr > MAX_NUM_SMR) {
-		pr_err("Invalid SMT size: %d\n", nsmr);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	drvdata->nsmr = nsmr;
 	for_each_child_of_node(pdev->dev.of_node, child) {
 		drvdata->ncb++;
 		if (!of_platform_device_create(child, NULL, &pdev->dev))
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index 948676a..525835e9 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -626,7 +626,9 @@
 		}
 		*num_planes = 1;
 		spin_lock_irqsave(&inst->lock, flags);
-		*num_buffers = inst->buff_req.buffer[0].buffer_count_actual;
+		*num_buffers = inst->buff_req.buffer[0].buffer_count_actual =
+			max(*num_buffers, inst->buff_req.buffer[0].
+			buffer_count_actual);
 		spin_unlock_irqrestore(&inst->lock, flags);
 		dprintk(VIDC_DBG, "size = %d, alignment = %d, count = %d\n",
 				inst->buff_req.buffer[0].buffer_size,
diff --git a/drivers/media/video/msm_vidc/msm_vidc_debug.c b/drivers/media/video/msm_vidc/msm_vidc_debug.c
index 7921f84..fa62988 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_debug.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_debug.c
@@ -104,6 +104,7 @@
 		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
 		goto failed_create_dir;
 	}
+	msm_vidc_debug = 0x3;
 failed_create_dir:
 	return dir;
 }
diff --git a/drivers/media/video/msm_vidc/msm_vidc_debug.h b/drivers/media/video/msm_vidc/msm_vidc_debug.h
index b7928e9..f7aa742 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_debug.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_debug.h
@@ -18,19 +18,24 @@
 
 #define VIDC_DBG_TAG "msm_vidc: %d: "
 
+/*To enable messages OR these values and
+* echo the result to debugfs file*/
+
 enum vidc_msg_prio {
-	VIDC_ERR,
-	VIDC_WARN,
-	VIDC_INFO,
-	VIDC_DBG,
+	VIDC_ERR  = 0x0001,
+	VIDC_WARN = 0x0002,
+	VIDC_INFO = 0x0004,
+	VIDC_DBG  = 0x0008,
+	VIDC_PROF = 0x0010,
+	VIDC_FW   = 0x1000,
 };
 
 extern int msm_vidc_debug;
-#define dprintk(level, fmt, arg...)	\
-	do {							\
-		if (msm_vidc_debug >= level) \
-			printk(KERN_DEBUG VIDC_DBG_TAG fmt, \
-				level, ## arg); \
+#define dprintk(__level, __fmt, arg...)	\
+	do { \
+		if (msm_vidc_debug & __level) \
+			printk(KERN_DEBUG VIDC_DBG_TAG __fmt,\
+				__level, ## arg); \
 	} while (0)
 
 struct dentry *msm_vidc_debugfs_init_core(struct msm_vidc_core *core,
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 0ea843c..e7e11d0 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -263,6 +263,7 @@
 	{{0x0, 0x0, 0x2, 0x1}, taiko_devs, ARRAY_SIZE(taiko_devs)},
 	{{0x0, 0x0, 0x0, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs)},
 	{{0x1, 0x0, 0x1, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs)},
+	{{0x2, 0x0, 0x1, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs)},
 };
 
 static void wcd9xxx_bring_up(struct wcd9xxx *wcd9xxx)
@@ -314,18 +315,17 @@
 					struct mfd_cell **wcd9xxx_dev,
 					int *wcd9xxx_dev_size)
 {
-	struct wcd9xx_codec_type *cdc = wcd9xxx_codecs;
-	int index;
+	int i;
 	int ret;
-	index = WCD9XXX_A_CHIP_ID_BYTE_0;
-	while (index <= WCD9XXX_A_CHIP_ID_BYTE_3) {
-		ret = wcd9xxx_reg_read(wcd9xxx, index);
+	i = WCD9XXX_A_CHIP_ID_BYTE_0;
+	while (i <= WCD9XXX_A_CHIP_ID_BYTE_3) {
+		ret = wcd9xxx_reg_read(wcd9xxx, i);
 		if (ret < 0)
 			goto exit;
-		wcd9xxx->idbyte[index-WCD9XXX_A_CHIP_ID_BYTE_0] = (u8)ret;
+		wcd9xxx->idbyte[i-WCD9XXX_A_CHIP_ID_BYTE_0] = (u8)ret;
 		pr_debug("%s: wcd9xx read = %x, byte = %x\n", __func__, ret,
-			index);
-		index++;
+			i);
+		i++;
 	}
 
 	/* Read codec version */
@@ -333,18 +333,19 @@
 	if (ret < 0)
 		goto exit;
 	wcd9xxx->version = (u8)ret & 0x1F;
-
-	while (cdc < (cdc + ARRAY_SIZE(wcd9xxx_codecs)) && cdc != NULL) {
-		if ((cdc->byte[0] == wcd9xxx->idbyte[0]) &&
-		    (cdc->byte[1] == wcd9xxx->idbyte[1]) &&
-		    (cdc->byte[2] == wcd9xxx->idbyte[2]) &&
-		    (cdc->byte[3] == wcd9xxx->idbyte[3])) {
-			pr_info("%s: codec is %s", __func__, cdc->dev->name);
-			*wcd9xxx_dev = cdc->dev;
-			*wcd9xxx_dev_size = cdc->size;
+	i = 0;
+	while (i < ARRAY_SIZE(wcd9xxx_codecs)) {
+		if ((wcd9xxx_codecs[i].byte[0] == wcd9xxx->idbyte[0]) &&
+		    (wcd9xxx_codecs[i].byte[1] == wcd9xxx->idbyte[1]) &&
+		    (wcd9xxx_codecs[i].byte[2] == wcd9xxx->idbyte[2]) &&
+		    (wcd9xxx_codecs[i].byte[3] == wcd9xxx->idbyte[3])) {
+			pr_info("%s: codec is %s", __func__,
+				wcd9xxx_codecs[i].dev->name);
+			*wcd9xxx_dev = wcd9xxx_codecs[i].dev;
+			*wcd9xxx_dev_size = wcd9xxx_codecs[i].size;
 			break;
 		}
-		cdc++;
+		i++;
 	}
 	if (*wcd9xxx_dev == NULL || *wcd9xxx_dev_size == 0)
 		ret = -ENODEV;
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 68c4557..e9b2ef3 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -25,6 +25,8 @@
 #define BYTE_BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_BYTE))
 #define BIT_BYTE(nr)			((nr) / BITS_PER_BYTE)
 
+#define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
+
 struct wcd9xxx_irq {
 	bool level;
 };
@@ -106,11 +108,17 @@
 {
 	enum wcd9xxx_pm_state os;
 
-	/* wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
+	/*
+	 * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
 	 * and its subroutines only motly.
 	 * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
-	 * it can race with wcd9xxx_irq_thread.
-	 * so need to embrace wlock_holders with mutex.
+	 * It can race with wcd9xxx_irq_thread.
+	 * So need to embrace wlock_holders with mutex.
+	 *
+	 * If system didn't resume, we can simply return false so codec driver's
+	 * IRQ handler can return without handling IRQ.
+	 * As interrupt line is still active, codec will have another IRQ to
+	 * retry shortly.
 	 */
 	mutex_lock(&wcd9xxx->pm_lock);
 	if (wcd9xxx->wlock_holders++ == 0) {
@@ -124,11 +132,11 @@
 						WCD9XXX_PM_AWAKE)) ==
 						    WCD9XXX_PM_SLEEPABLE ||
 			 (os == WCD9XXX_PM_AWAKE)),
-			5 * HZ)) {
-		pr_err("%s: system didn't resume within 5000ms, state %d, "
-		       "wlock %d\n", __func__, wcd9xxx->pm_state,
-		       wcd9xxx->wlock_holders);
-		WARN_ON(1);
+			msecs_to_jiffies(WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
+		pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
+			__func__,
+			WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx->pm_state,
+			wcd9xxx->wlock_holders);
 		wcd9xxx_unlock_sleep(wcd9xxx);
 		return false;
 	}
@@ -141,8 +149,14 @@
 {
 	mutex_lock(&wcd9xxx->pm_lock);
 	if (--wcd9xxx->wlock_holders == 0) {
-		wcd9xxx->pm_state = WCD9XXX_PM_SLEEPABLE;
-		pr_debug("%s: releasing wake lock\n", __func__);
+		pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
+			 __func__, wcd9xxx->pm_state, WCD9XXX_PM_SLEEPABLE);
+		/*
+		 * if wcd9xxx_lock_sleep failed, pm_state would be still
+		 * WCD9XXX_PM_ASLEEP, don't overwrite
+		 */
+		if (likely(wcd9xxx->pm_state == WCD9XXX_PM_AWAKE))
+			wcd9xxx->pm_state = WCD9XXX_PM_SLEEPABLE;
 		pm_qos_update_request(&wcd9xxx->pm_qos_req,
 				PM_QOS_DEFAULT_VALUE);
 	}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a9f1b53..989c53a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -115,6 +115,7 @@
 	unsigned int	part_curr;
 	struct device_attribute force_ro;
 	struct device_attribute power_ro_lock;
+	struct device_attribute num_wr_reqs_to_start_packing;
 	int	area_type;
 };
 
@@ -279,6 +280,38 @@
 	return ret;
 }
 
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int num_wr_reqs_to_start_packing;
+	int ret;
+
+	num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	int value;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+	sscanf(buf, "%d", &value);
+	if (value >= 0)
+		md->queue.num_wr_reqs_to_start_packing = value;
+
+	mmc_blk_put(md);
+	return count;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -1324,6 +1357,48 @@
 	mmc_queue_bounce_pre(mqrq);
 }
 
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+					  struct request *req)
+{
+	struct mmc_host *host = mq->card->host;
+	int data_dir;
+
+	if (!(host->caps2 & MMC_CAP2_PACKED_WR))
+		return;
+
+	/*
+	 * In case the packing control is not supported by the host, it should
+	 * not have an effect on the write packing. Therefore we have to enable
+	 * the write packing
+	 */
+	if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
+		mq->wr_packing_enabled = true;
+		return;
+	}
+
+	if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
+		if (mq->num_of_potential_packed_wr_reqs >
+				mq->num_wr_reqs_to_start_packing)
+			mq->wr_packing_enabled = true;
+		return;
+	}
+
+	data_dir = rq_data_dir(req);
+
+	if (data_dir == READ) {
+		mq->num_of_potential_packed_wr_reqs = 0;
+		mq->wr_packing_enabled = false;
+		return;
+	} else if (data_dir == WRITE) {
+		mq->num_of_potential_packed_wr_reqs++;
+	}
+
+	if (mq->num_of_potential_packed_wr_reqs >
+			mq->num_wr_reqs_to_start_packing)
+		mq->wr_packing_enabled = true;
+
+}
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
 	struct request_queue *q = mq->queue;
@@ -1343,6 +1418,9 @@
 			!card->ext_csd.packed_event_en)
 		goto no_packed;
 
+	if (!mq->wr_packing_enabled)
+		goto no_packed;
+
 	if ((rq_data_dir(cur) == WRITE) &&
 			(card->host->caps2 & MMC_CAP2_PACKED_WR))
 		max_packed_rw = card->ext_csd.max_packed_writes;
@@ -1416,6 +1494,8 @@
 			break;
 		}
 
+		if (rq_data_dir(next) == WRITE)
+			mq->num_of_potential_packed_wr_reqs++;
 		list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
 		cur = next;
 		reqs++;
@@ -1797,6 +1877,8 @@
 		goto out;
 	}
 
+	mmc_blk_write_packing_control(mq, req);
+
 	if (req && req->cmd_flags & REQ_SANITIZE) {
 		/* complete ongoing async transfer before issuing sanitize */
 		if (card->host && card->host->areq)
@@ -2028,6 +2110,8 @@
 
 	if (md) {
 		card = md->queue.card;
+		device_remove_file(disk_to_dev(md->disk),
+				   &md->num_wr_reqs_to_start_packing);
 		if (md->disk->flags & GENHD_FL_UP) {
 			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
 			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2095,6 +2179,16 @@
 			goto power_ro_lock_fail;
 	}
 
+	md->num_wr_reqs_to_start_packing.show =
+		num_wr_reqs_to_start_packing_show;
+	md->num_wr_reqs_to_start_packing.store =
+		num_wr_reqs_to_start_packing_store;
+	sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
+	md->num_wr_reqs_to_start_packing.attr.name =
+		"num_wr_reqs_to_start_packing";
+	md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(disk_to_dev(md->disk),
+				 &md->num_wr_reqs_to_start_packing);
 	if (ret)
 		goto power_ro_lock_fail;
 
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index d818fc4..f3692a9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -25,6 +25,13 @@
 #define MMC_QUEUE_SUSPENDED	(1 << 0)
 
 /*
+ * Based on benchmark tests the default num of requests to trigger the write
+ * packing was determined, to keep the read latency as low as possible and
+ * manage to keep the high write throughput.
+ */
+#define DEFAULT_NUM_REQS_TO_START_PACK 17
+
+/*
  * Prepare a MMC request. This just filters out odd stuff.
  */
 static int mmc_prep_request(struct request_queue *q, struct request *req)
@@ -189,6 +196,7 @@
 	mq->mqrq_cur = mqrq_cur;
 	mq->mqrq_prev = mqrq_prev;
 	mq->queue->queuedata = mq;
+	mq->num_wr_reqs_to_start_packing = DEFAULT_NUM_REQS_TO_START_PACK;
 
 	blk_queue_prep_rq(mq->queue, mmc_prep_request);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 5e04938..93e4b59 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -45,6 +45,9 @@
 	struct mmc_queue_req	mqrq[2];
 	struct mmc_queue_req	*mqrq_cur;
 	struct mmc_queue_req	*mqrq_prev;
+	bool			wr_packing_enabled;
+	int			num_of_potential_packed_wr_reqs;
+	int			num_wr_reqs_to_start_packing;
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index 6f9af36..8809abe 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -856,7 +856,7 @@
 
 	return qpnp_lpg_save_and_write(value, mask,
 		&pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
-		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
+		lpg_config->base_addr, QPNP_ENABLE_CONTROL, 1, chip);
 }
 
 static int qpnp_disable_pwm(struct pwm_device *pwm)
@@ -873,7 +873,7 @@
 
 	return qpnp_lpg_save_and_write(value, mask,
 		&pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
-		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
+		lpg_config->base_addr, QPNP_ENABLE_CONTROL, 1, chip);
 }
 
 static int _pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index e6e2f30..a9b0dbb 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -2332,12 +2332,6 @@
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t pm8921_bms_vsense_avg_handler(int irq, void *data)
-{
-	pr_debug("irq = %d triggered", irq);
-	return IRQ_HANDLED;
-}
-
 struct pm_bms_irq_init_data {
 	unsigned int	irq_id;
 	char		*name;
@@ -2366,8 +2360,6 @@
 				pm8921_bms_ocv_for_r_handler),
 	BMS_IRQ(PM8921_BMS_GOOD_OCV, IRQF_TRIGGER_RISING,
 				pm8921_bms_good_ocv_handler),
-	BMS_IRQ(PM8921_BMS_VSENSE_AVG, IRQF_TRIGGER_RISING,
-				pm8921_bms_vsense_avg_handler),
 };
 
 static void free_irqs(struct pm8921_bms_chip *chip)
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
index 656e379..61e6891 100644
--- a/drivers/usb/misc/ks_bridge.c
+++ b/drivers/usb/misc/ks_bridge.c
@@ -48,6 +48,7 @@
 #define BOOT_BRIDGE_INDEX	0
 #define EFS_BRIDGE_INDEX	1
 #define MAX_DATA_PKT_SIZE	16384
+#define PENDING_URB_TIMEOUT	10
 
 struct ks_bridge {
 	char			*name;
@@ -58,7 +59,10 @@
 	struct list_head	to_mdm_list;
 	struct list_head	to_ks_list;
 	wait_queue_head_t	ks_wait_q;
+	wait_queue_head_t	pending_urb_wait;
 	struct miscdevice	*fs_dev;
+	atomic_t		tx_pending_cnt;
+	atomic_t		rx_pending_cnt;
 
 	/* usb specific */
 	struct usb_device	*udev;
@@ -207,13 +211,16 @@
 	dbg_log_event(ksb, "C TX_URB", urb->status, 0);
 	pr_debug("status:%d", urb->status);
 
-	if (ksb->ifc)
+	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
 		usb_autopm_put_interface_async(ksb->ifc);
 
 	if (urb->status < 0)
 		pr_err_ratelimited("urb failed with err:%d", urb->status);
 
 	ksb_free_data_pkt(pkt);
+
+	atomic_dec(&ksb->tx_pending_cnt);
+	wake_up(&ksb->pending_urb_wait);
 }
 
 static void ksb_tomdm_work(struct work_struct *w)
@@ -252,6 +259,7 @@
 
 		dbg_log_event(ksb, "S TX_URB", pkt->len, 0);
 
+		atomic_inc(&ksb->tx_pending_cnt);
 		ret = usb_submit_urb(urb, GFP_KERNEL);
 		if (ret) {
 			pr_err("out urb submission failed");
@@ -259,6 +267,8 @@
 			usb_free_urb(urb);
 			ksb_free_data_pkt(pkt);
 			usb_autopm_put_interface(ksb->ifc);
+			atomic_dec(&ksb->tx_pending_cnt);
+			wake_up(&ksb->pending_urb_wait);
 			return;
 		}
 
@@ -420,8 +430,15 @@
 			ksb_rx_cb, pkt);
 	usb_anchor_urb(urb, &ksb->submitted);
 
-	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+		usb_unanchor_urb(urb);
+		usb_free_urb(urb);
+		ksb_free_data_pkt(pkt);
+		ksb->alloced_read_pkts--;
+		return;
+	}
 
+	atomic_inc(&ksb->rx_pending_cnt);
 	ret = usb_submit_urb(urb, GFP_ATOMIC);
 	if (ret) {
 		pr_err("in urb submission failed");
@@ -429,9 +446,13 @@
 		usb_free_urb(urb);
 		ksb_free_data_pkt(pkt);
 		ksb->alloced_read_pkts--;
+		atomic_dec(&ksb->rx_pending_cnt);
+		wake_up(&ksb->pending_urb_wait);
 		return;
 	}
 
+	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
 	usb_free_urb(urb);
 }
 static void ksb_rx_cb(struct urb *urb)
@@ -454,7 +475,7 @@
 					urb->status);
 		ksb_free_data_pkt(pkt);
 		ksb->alloced_read_pkts--;
-		return;
+		goto done;
 	}
 
 	if (urb->actual_length == 0) {
@@ -474,7 +495,9 @@
 
 resubmit_urb:
 	submit_one_urb(ksb);
-
+done:
+	atomic_dec(&ksb->rx_pending_cnt);
+	wake_up(&ksb->pending_urb_wait);
 }
 
 static void ksb_start_rx_work(struct work_struct *w)
@@ -487,6 +510,10 @@
 	int ret;
 
 	for (i = 0; i < NO_RX_REQS; i++) {
+
+		if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+			return;
+
 		pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
 		if (IS_ERR(pkt)) {
 			pr_err("unable to allocate data pkt");
@@ -516,6 +543,7 @@
 
 		dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
 
+		atomic_inc(&ksb->rx_pending_cnt);
 		ret = usb_submit_urb(urb, GFP_KERNEL);
 		if (ret) {
 			pr_err("in urb submission failed");
@@ -524,6 +552,8 @@
 			ksb_free_data_pkt(pkt);
 			ksb->alloced_read_pkts--;
 			usb_autopm_put_interface(ksb->ifc);
+			atomic_dec(&ksb->rx_pending_cnt);
+			wake_up(&ksb->pending_urb_wait);
 			return;
 		}
 
@@ -590,6 +620,8 @@
 
 	usb_set_intfdata(ifc, ksb);
 	set_bit(USB_DEV_CONNECTED, &ksb->flags);
+	atomic_set(&ksb->tx_pending_cnt, 0);
+	atomic_set(&ksb->rx_pending_cnt, 0);
 
 	dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);
 
@@ -640,9 +672,16 @@
 	clear_bit(USB_DEV_CONNECTED, &ksb->flags);
 	wake_up(&ksb->ks_wait_q);
 	cancel_work_sync(&ksb->to_mdm_work);
+	cancel_work_sync(&ksb->start_rx_work);
 
 	usb_kill_anchored_urbs(&ksb->submitted);
 
+	wait_event_interruptible_timeout(
+					ksb->pending_urb_wait,
+					!atomic_read(&ksb->tx_pending_cnt) &&
+					!atomic_read(&ksb->rx_pending_cnt),
+					msecs_to_jiffies(PENDING_URB_TIMEOUT));
+
 	spin_lock_irqsave(&ksb->lock, flags);
 	while (!list_empty(&ksb->to_ks_list)) {
 		pkt = list_first_entry(&ksb->to_ks_list,
@@ -741,6 +780,7 @@
 		INIT_LIST_HEAD(&ksb->to_mdm_list);
 		INIT_LIST_HEAD(&ksb->to_ks_list);
 		init_waitqueue_head(&ksb->ks_wait_q);
+		init_waitqueue_head(&ksb->pending_urb_wait);
 		ksb->wq = create_singlethread_workqueue(ksb->name);
 		if (!ksb->wq) {
 			pr_err("unable to allocate workqueue");
diff --git a/drivers/video/msm/mddi_quickvx.c b/drivers/video/msm/mddi_quickvx.c
index 95e7d41..37c147d 100644
--- a/drivers/video/msm/mddi_quickvx.c
+++ b/drivers/video/msm/mddi_quickvx.c
@@ -263,22 +263,10 @@
 
 int ql_mddi_write(uint32 address, uint32 value)
 {
-	uint32 regval = 0;
 	int ret = 0;
 
 	ret = mddi_queue_register_write(address, value, TRUE, 0);
 
-	if (!ret) {
-		ret = mddi_queue_register_read(address, &regval, TRUE, 0);
-		if (regval != value) {
-			MDDI_MSG_DEBUG("\nMismatch: ql_mddi_write[0x%x]->0x%x "
-				"r0x%x\n", address, value, regval);
-		} else {
-			MDDI_MSG_DEBUG("\nMatch: ql_mddi_write[0x%x]->0x%x "
-				"r0x%x\n", address, value, regval);
-		}
-	}
-
 	return ret;
 }
 
@@ -294,8 +282,6 @@
 
 int ql_send_spi_cmd_to_lcd(uint32 index, uint32 cmd)
 {
-	int retry, ret;
-	uint32 readval;
 
 	MDDI_MSG_DEBUG("\n %s(): index 0x%x, cmd 0x%x", __func__, index, cmd);
 	/* do the index phase */
@@ -308,18 +294,6 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
-
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
-
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at index phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
 
 	/* do the command phase */
 	/* send 24 bits in the cmd phase */
@@ -331,18 +305,6 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
-
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
-
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at cmd phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
 
 	return 0;
 }
@@ -350,8 +312,6 @@
 
 int ql_send_spi_data_from_lcd(uint32 index, uint32 *value)
 {
-	int retry, ret;
-	uint32 readval;
 
 	MDDI_MSG_DEBUG("\n %s(): index 0x%x", __func__, index);
 	/* do the index phase */
@@ -364,19 +324,6 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
-
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
-
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at index phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
-
 	/* do the command phase */
 	/* send 8 bits  and read 24 bits in the cmd phase, so total 32 bits */
 	ql_mddi_write(QUICKVX_SPI_TLEN_REG, 31);
@@ -387,29 +334,9 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
 
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
+	return 0;
 
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at cmd phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
-
-	/* value will appear at lower 16 bits */
-	ret = ql_mddi_read(QUICKVX_SPI_RX0_REG, value);
-
-	if (!ret) {
-		*value = *value & 0xffff;
-		MDDI_MSG_DEBUG("\n QUICKVX_SPI_RX0_REG value = 0x%x", *value);
-	} else
-		MDDI_MSG_DEBUG("\n Read QUICKVX_SPI_RX0_REG Failed");
-
-	return ret;
 }
 
 /* Global Variables */
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 6c0d08d..f8ccee9 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -2214,6 +2214,8 @@
 			mfd->panel.type == LCDC_PANEL ||
 			mfd->panel.type == LVDS_PANEL)
 		mdp4_lcdc_off(pdev);
+	else if (mfd->panel.type == MDDI_PANEL)
+		mdp4_mddi_off(pdev);
 
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	ret = panel_next_off(pdev);
@@ -2268,6 +2270,9 @@
 				mfd->panel.type == LCDC_PANEL ||
 				mfd->panel.type == LVDS_PANEL) {
 			mdp4_lcdc_on(pdev);
+		} else if (mfd->panel.type == MDDI_PANEL) {
+			mdp_vsync_cfg_regs(mfd, FALSE);
+			mdp4_mddi_on(pdev);
 		}
 
 		mdp_clk_ctrl(0);
@@ -2706,6 +2711,9 @@
 			  mdp_vsync_resync_workqueue_handler);
 		mfd->hw_refresh = FALSE;
 
+		if (mfd->panel.type == MDDI_PANEL)
+			mdp4_mddi_rdptr_init(0);
+
 		if (mfd->panel.type == EXT_MDDI_PANEL) {
 			/* 15 fps -> 66 msec */
 			mfd->refresh_timer_duration = (66 * HZ / 1000);
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index d939c62..b4a7f79 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -796,6 +796,10 @@
 {
 	return 0;
 }
+static inline int mdp4_mddi_off(struct platform_device *pdev)
+{
+	return 0;
+}
 static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
 {
 	return 0;
@@ -808,6 +812,19 @@
 {
 	return 0;
 }
+static inline int mdp4_mddi_on(struct platform_device *pdev)
+{
+	return 0;
+}
+#endif
+
+
+#ifndef CONFIG_FB_MSM_MDDI
+static inline void mdp4_mddi_rdptr_init(int cndx)
+{
+	/* empty */
+}
+
 #endif
 
 void set_cont_splashScreen_status(int);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 180a18a..67a0429 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -596,9 +596,10 @@
 int mdp4_overlay_pipe_staged(int mixer);
 void mdp4_lcdc_primary_vsyn(void);
 void mdp4_overlay0_done_lcdc(int cndx);
-void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma);
+void mdp4_overlay0_done_mddi(int cndx);
 void mdp4_dma_p_done_mddi(struct mdp_dma_data *dma);
 void mdp4_dmap_done_dsi_cmd(int cndx);
+void mdp4_dmap_done_mddi(int cndx);
 void mdp4_dmap_done_dsi_video(int cndx);
 void mdp4_dmap_done_lcdc(int cndx);
 void mdp4_overlay1_done_dtv(void);
@@ -672,6 +673,12 @@
 void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req);
 void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
+static inline void mdp4_mddi_blt_start(struct msm_fb_data_type *mfd)
+{
+}
+static inline void mdp4_mddi_blt_stop(struct msm_fb_data_type *mfd)
+{
+}
 
 #ifdef CONFIG_FB_MSM_MDP40
 static inline void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
@@ -680,6 +687,8 @@
 }
 #endif
 #else     /* CONFIG_FB_MSM_MIPI_DSI */
+void mdp4_mddi_blt_start(struct msm_fb_data_type *mfd);
+void mdp4_mddi_blt_stop(struct msm_fb_data_type *mfd);
 int mdp4_mddi_overlay_blt_offset(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req);
 void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd,
@@ -687,6 +696,7 @@
 int mdp4_mddi_overlay_blt_start(struct msm_fb_data_type *mfd);
 int mdp4_mddi_overlay_blt_stop(struct msm_fb_data_type *mfd);
 void mdp4_mddi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd);
+void mdp4_mddi_rdptr_init(int cndx);
 static inline int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd)
 {
 	return -ENODEV;
@@ -776,11 +786,37 @@
 {
 	/* empty */
 }
-#else /* CONFIG_FB_MSM_MIPI_DSI */
+#else /* CONFIG_FB_MSM_MDP303 */
 void mdp4_dsi_cmd_del_timer(void);
+static inline int mdp4_mddi_on(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_mddi_off(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void mdp4_mddi_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_mddi_vsync_ctrl(struct fb_info *info,
+				int enable)
+{
+}
+static inline void mdp4_mddi_pipe_queue(int cndx,
+			struct mdp4_overlay_pipe *pipe)
+{
+}
 #endif
 #else  /* CONFIG_FB_MSM_MIPI_DSI */
 
+int mdp4_mddi_off(struct platform_device *pdev);
+int mdp4_mddi_on(struct platform_device *pdev);
+void mdp4_mddi_wait4vsync(int cndx, long long *vtime);
+void mdp4_mddi_vsync_ctrl(struct fb_info *info, int enable);
+void mdp4_mddi_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_update_mddi(struct msm_fb_data_type *mfd);
+
 static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
 {
 	return 0;
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 8499e3e..2188704 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -2868,6 +2868,8 @@
 				mdp4_dsi_video_blt_start(mfd);
 			else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 				mdp4_dsi_cmd_blt_start(mfd);
+			else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+				mdp4_mddi_blt_start(mfd);
 			pr_info("%s mixer0 start blt [%d] from %d to %d.\n",
 				__func__,
 				flag,
@@ -2916,6 +2918,8 @@
 				mdp4_dsi_video_blt_stop(mfd);
 			else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 				mdp4_dsi_cmd_blt_stop(mfd);
+			else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+				mdp4_mddi_blt_stop(mfd);
 			pr_info("%s mixer0 stop blt [%d] from %d to %d.\n",
 				__func__,
 				flag,
@@ -3180,20 +3184,14 @@
 	else {
 		/* mixer 0 */
 		ctrl->mixer0_played = 0;
-		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-			if (mfd->panel_power_on)
-				mdp4_mddi_blt_dmap_busy_wait(mfd);
-		}
+
 	}
 
 	mdp4_overlay_reg_flush(pipe, 1);
 	mdp4_mixer_stage_down(pipe, 0);
 
 	if (pipe->mixer_num == MDP4_MIXER0) {
-		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-			if (mfd->panel_power_on)
-				mdp4_mddi_overlay_restore();
-		}
+
 	} else {	/* mixer1, DTV, ATV */
 		if (ctrl->panel_mode & MDP4_PANEL_DTV)
 			mdp4_overlay_dtv_unset(mfd, pipe);
@@ -3217,6 +3215,8 @@
 			mdp4_dsi_cmd_wait4vsync(0, vtime);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 			mdp4_lcdc_wait4vsync(0, vtime);
+		else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+			mdp4_mddi_wait4vsync(0, vtime);
 	} else if (hdmi_prim_display || info->node == 1) {
 		mdp4_dtv_wait4vsync(0, vtime);
 	}
@@ -3240,6 +3240,8 @@
 			mdp4_dsi_cmd_vsync_ctrl(info, cmd);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 			mdp4_lcdc_vsync_ctrl(info, cmd);
+		else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+			mdp4_mddi_vsync_ctrl(info, cmd);
 	} else if (hdmi_prim_display || info->node == 1)
 		mdp4_dtv_vsync_ctrl(info, cmd);
 
@@ -3460,9 +3462,6 @@
 
 	mdp4_overlay_mdp_perf_req(mfd, ctrl->plist);
 
-	if (pipe->mixer_num == MDP4_MIXER2 ||
-				ctrl->panel_mode & MDP4_PANEL_MDDI)
-		goto mddi;
 
 	if (pipe->mixer_num == MDP4_MIXER0) {
 		if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
@@ -3475,43 +3474,35 @@
 			/* cndx = 0 */
 			mdp4_lcdc_pipe_queue(0, pipe);
 		}
+		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+			/* cndx = 0 */
+			mdp4_mddi_pipe_queue(0, pipe);
+		}
 	} else if (pipe->mixer_num == MDP4_MIXER1) {
 		if (ctrl->panel_mode & MDP4_PANEL_DTV)
 			mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
-	}
+	} else if (pipe->mixer_num == MDP4_MIXER2) {
 
-	mutex_unlock(&mfd->dma->ov_mutex);
-	return ret;
+		if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
+			mdp4_overlay_vg_setup(pipe);  /* video/graphic pipe */
+		else
+			mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
 
-mddi:
-	if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
-		mdp4_overlay_vg_setup(pipe);    /* video/graphic pipe */
-	} else {
-		mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
-	}
+		mdp4_mixer_stage_up(pipe, 0);
 
-	mdp4_mixer_stage_up(pipe, 0);
-
-	if (pipe->mixer_num == MDP4_MIXER2) {
 		ctrl->mixer2_played++;
 		if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
 			mdp4_writeback_dma_busy_wait(mfd);
 			mdp4_writeback_kickoff_video(mfd, pipe);
 		}
-	} else if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-		if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
-			mdp4_stat.overlay_play[pipe->mixer_num]++;
-			mutex_unlock(&mfd->dma->ov_mutex);
-			goto end;
-		}
-		mdp4_mixer_stage_commit(pipe->mixer_num);
-		mdp4_mddi_dma_busy_wait(mfd);
-		mdp4_mddi_kickoff_video(mfd, pipe);
+
+		if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
+			mdp4_iommu_unmap(pipe);
+		mdp4_stat.overlay_play[pipe->mixer_num]++;
 	}
 
-	if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
-		mdp4_iommu_unmap(pipe);
-	mdp4_stat.overlay_play[pipe->mixer_num]++;
+	mutex_unlock(&mfd->dma->ov_mutex);
+	return ret;
 
 end:
 	mutex_unlock(&mfd->dma->ov_mutex);
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index be4a89a..ca84eca 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2012, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,38 +17,680 @@
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/hrtimer.h>
 #include <linux/delay.h>
-#include <mach/hardware.h>
 #include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
 #include <linux/semaphore.h>
 #include <linux/spinlock.h>
-
 #include <linux/fb.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
 
 #include "mdp.h"
 #include "msm_fb.h"
 #include "mdp4.h"
 
-static struct mdp4_overlay_pipe *mddi_pipe;
-static struct msm_fb_data_type *mddi_mfd;
-static int busy_wait_cnt;
+static int mddi_state;
+
+#define TOUT_PERIOD	HZ	/* 1 second */
+#define MS_100		(HZ/10)	/* 100 ms */
 
 static int vsync_start_y_adjust = 4;
 
-static int dmap_vsync_enable;
+#define MAX_CONTROLLER	1
+#define VSYNC_EXPIRE_TICK 8
 
-void mdp_dmap_vsync_set(int enable)
+static struct vsycn_ctrl {
+	struct device *dev;
+	int inited;
+	int update_ndx;
+	int expire_tick;
+	int blt_wait;
+	u32 ov_koff;
+	u32 ov_done;
+	u32 dmap_koff;
+	u32 dmap_done;
+	uint32 rdptr_intr_tot;
+	uint32 rdptr_sirq_tot;
+	atomic_t suspend;
+	int wait_vsync_cnt;
+	int blt_change;
+	int blt_free;
+	int blt_end;
+	int uevent;
+	struct mutex update_lock;
+	struct completion ov_comp;
+	struct completion dmap_comp;
+	struct completion vsync_comp;
+	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *base_pipe;
+	struct vsync_update vlist[2];
+	int vsync_enabled;
+	int clk_enabled;
+	int clk_control;
+	int new_update;
+	ktime_t vsync_time;
+	struct work_struct vsync_work;
+	struct work_struct clk_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
 {
-	dmap_vsync_enable = enable;
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clear other interrupts for comamnd mode */
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 }
 
-int mdp_dmap_vsync_get(void)
+static void vsync_irq_disable(int intr, int term)
 {
-	return dmap_vsync_enable;
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clrear other interrupts for comamnd mode */
+	mdp_intr_mask &= ~intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_disable_irq_nosync(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static void mdp4_mddi_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+	char *overlay_base;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+	bpp = 3; /* overlay ouput is RGB888 */
+	off = 0;
+	if (pipe->ov_cnt & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->ov_blt_addr + off;
+	/* overlay 0 */
+	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+	outpdw(overlay_base + 0x000c, addr);
+	outpdw(overlay_base + 0x001c, addr);
+}
+
+static void mdp4_mddi_blt_dmap_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+	bpp = 3; /* overlay ouput is RGB888 */
+	off = 0;
+	if (pipe->dmap_cnt & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->dma_blt_addr + off;
+
+	/* dmap */
+	MDP_OUTP(MDP_BASE + 0x90008, addr);
+}
+
+static void mdp4_mddi_wait4dmap(int cndx);
+static void mdp4_mddi_wait4ov(int cndx);
+
+static void mdp4_mddi_do_blt(struct msm_fb_data_type *mfd, int enable)
+{
+	unsigned long flags;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int need_wait;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
+
+	if (mfd->ov0_wb_buf->write_addr == 0) {
+		pr_err("%s: no blt_base assigned\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (enable && pipe->ov_blt_addr == 0) {
+		vctrl->blt_change++;
+		if (vctrl->dmap_koff != vctrl->dmap_done) {
+			INIT_COMPLETION(vctrl->dmap_comp);
+			need_wait = 1;
+		}
+	} else if (enable == 0 && pipe->ov_blt_addr) {
+		vctrl->blt_change++;
+		if (vctrl->ov_koff != vctrl->dmap_done) {
+			INIT_COMPLETION(vctrl->dmap_comp);
+			need_wait = 1;
+		}
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (need_wait)
+		mdp4_mddi_wait4dmap(0);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (enable && pipe->ov_blt_addr == 0) {
+		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+		pipe->ov_cnt = 0;
+		pipe->dmap_cnt = 0;
+		vctrl->ov_koff = vctrl->dmap_koff;
+		vctrl->ov_done = vctrl->dmap_done;
+		vctrl->blt_free = 0;
+		vctrl->blt_wait = 0;
+		vctrl->blt_end = 0;
+		mdp4_stat.blt_mddi++;
+	} else if (enable == 0 && pipe->ov_blt_addr) {
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr =  0;
+		vctrl->blt_end = 1;
+		vctrl->blt_free = 4;	/* 4 commits to free wb buf */
+	}
+
+	pr_debug("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
+		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+}
+
+/*
+ * mdp4_mddi_do_update:
+ * called from thread context
+ */
+void mdp4_mddi_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pp;
+	int undx;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+
+	pp = &vp->plist[pipe->pipe_ndx - 1];	/* ndx start form 1 */
+
+	pr_debug("%s: vndx=%d pipe_ndx=%d expire=%x pid=%d\n", __func__,
+		undx, pipe->pipe_ndx, vctrl->expire_tick, current->pid);
+
+	*pp = *pipe;	/* clone it */
+	vp->update_cnt++;
+
+	mutex_unlock(&vctrl->update_lock);
+	mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+static void mdp4_mddi_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+
+int mdp4_mddi_pipe_commit(void)
+{
+	int  i, undx;
+	int mixer = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pipe;
+	struct mdp4_overlay_pipe *real_pipe;
+	unsigned long flags;
+	int need_dmap_wait = 0;
+	int need_ov_wait = 0;
+	int cnt = 0;
+
+	vctrl = &vsync_ctrl_db[0];
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	pipe = vctrl->base_pipe;
+	mixer = pipe->mixer_num;
+
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
+
+	vctrl->update_ndx++;
+	vctrl->update_ndx &= 0x01;
+	vp->update_cnt = 0;     /* reset */
+	if (vctrl->blt_free) {
+		vctrl->blt_free--;
+		if (vctrl->blt_free == 0)
+			mdp4_free_writeback_buf(vctrl->mfd, mixer);
+	}
+	mutex_unlock(&vctrl->update_lock);
+
+	/* free previous committed iommu back to pool */
+	mdp4_overlay_iommu_unmap_freelist(mixer);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (pipe->ov_blt_addr) {
+		/* Blt */
+		if (vctrl->blt_wait)
+			need_dmap_wait = 1;
+		if (vctrl->ov_koff != vctrl->ov_done) {
+			INIT_COMPLETION(vctrl->ov_comp);
+			need_ov_wait = 1;
+		}
+	} else {
+		/* direct out */
+		if (vctrl->dmap_koff != vctrl->dmap_done) {
+			INIT_COMPLETION(vctrl->dmap_comp);
+			pr_debug("%s: wait, ok=%d od=%d dk=%d dd=%d cpu=%d\n",
+			 __func__, vctrl->ov_koff, vctrl->ov_done,
+			vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id());
+			need_dmap_wait = 1;
+		}
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (need_dmap_wait) {
+		pr_debug("%s: wait4dmap\n", __func__);
+		mdp4_mddi_wait4dmap(0);
+	}
+
+	if (need_ov_wait) {
+		pr_debug("%s: wait4ov\n", __func__);
+		mdp4_mddi_wait4ov(0);
+	}
+
+	if (pipe->ov_blt_addr) {
+		if (vctrl->blt_end) {
+			vctrl->blt_end = 0;
+			pipe->ov_blt_addr = 0;
+			pipe->dma_blt_addr =  0;
+		}
+	}
+
+	if (vctrl->blt_change) {
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmap_xy(pipe);
+		vctrl->blt_change = 0;
+	}
+
+	pipe = vp->plist;
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+		if (pipe->pipe_used) {
+			cnt++;
+			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
+			if (real_pipe && real_pipe->pipe_used) {
+				/* pipe not unset */
+			mdp4_overlay_vsync_commit(pipe);
+			}
+			/* free previous iommu to freelist
+			* which will be freed at next
+			* pipe_commit
+			*/
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+			pipe->pipe_used = 0; /* clear */
+		}
+	}
+
+	mdp4_mixer_stage_commit(mixer);
+
+	pipe = vctrl->base_pipe;
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (pipe->ov_blt_addr) {
+		mdp4_mddi_blt_ov_update(pipe);
+		pipe->ov_cnt++;
+		vctrl->ov_koff++;
+		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	} else {
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+		vctrl->dmap_koff++;
+	}
+	pr_debug("%s: kickoff\n", __func__);
+	/* kickoff overlay engine */
+	mdp4_stat.kickoff_ov0++;
+	outpdw(MDP_BASE + 0x0004, 0);
+	mb(); /* make sure kickoff ececuted */
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+	return cnt;
+}
+
+void mdp4_mddi_vsync_ctrl(struct fb_info *info, int enable)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct vsycn_ctrl *vctrl;
+	unsigned long flags;
+	int clk_set_on = 0;
+	int cndx = 0;
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	pr_debug("%s: clk_enabled=%d vsycn_enabeld=%d req=%d\n", __func__,
+		vctrl->clk_enabled, vctrl->vsync_enabled, enable);
+
+	mutex_lock(&vctrl->update_lock);
+
+	if (vctrl->vsync_enabled == enable) {
+		mutex_unlock(&vctrl->update_lock);
+		return;
+	}
+
+	vctrl->vsync_enabled = enable;
+
+	if (enable) {
+		if (vctrl->clk_enabled == 0) {
+			pr_debug("%s: SET_CLK_ON\n", __func__);
+			mdp_clk_ctrl(1);
+			vctrl->clk_enabled = 1;
+			clk_set_on = 1;
+		}
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		vctrl->clk_control = 0;
+		vctrl->expire_tick = 0;
+		vctrl->uevent = 1;
+		vctrl->new_update = 1;
+		if (clk_set_on) {
+			vsync_irq_enable(INTR_PRIMARY_RDPTR,
+						MDP_PRIM_RDPTR_TERM);
+	}
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+		mdp4_overlay_update_mddi(mfd);
+	} else {
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		vctrl->clk_control = 1;
+		vctrl->uevent = 0;
+		if (vctrl->clk_enabled)
+			vctrl->expire_tick = VSYNC_EXPIRE_TICK;
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+	}
+	mutex_unlock(&vctrl->update_lock);
+}
+
+void mdp4_mddi_wait4vsync(int cndx, long long *vtime)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (atomic_read(&vctrl->suspend) > 0) {
+		*vtime = -1;
+		return;
+	}
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->wait_vsync_cnt == 0)
+		INIT_COMPLETION(vctrl->vsync_comp);
+	vctrl->wait_vsync_cnt++;
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	wait_for_completion(&vctrl->vsync_comp);
+	mdp4_stat.wait4vsync0++;
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+static void mdp4_mddi_wait4dmap(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->dmap_comp);
+}
+
+static void mdp4_mddi_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
+}
+
+/*
+ * primary_rdptr_isr:
+ * called from interrupt context
+ */
+static void primary_rdptr_isr(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pr_debug("%s: ISR, cpu=%d\n", __func__, smp_processor_id());
+	vctrl->rdptr_intr_tot++;
+	vctrl->vsync_time = ktime_get();
+
+	spin_lock(&vctrl->spin_lock);
+
+	if (vctrl->uevent)
+		schedule_work(&vctrl->vsync_work);
+
+	if (vctrl->wait_vsync_cnt) {
+		complete(&vctrl->vsync_comp);
+		vctrl->wait_vsync_cnt = 0;
+	}
+
+	if (vctrl->expire_tick) {
+		vctrl->expire_tick--;
+		if (vctrl->expire_tick == 0)
+			schedule_work(&vctrl->clk_work);
+	}
+	spin_unlock(&vctrl->spin_lock);
+}
+
+void mdp4_dmap_done_mddi(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int diff;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	 /* blt enabled */
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	vctrl->dmap_done++;
+	diff = vctrl->ov_done - vctrl->dmap_done;
+	pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n",
+		__func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff,
+		vctrl->dmap_done, smp_processor_id());
+	complete_all(&vctrl->dmap_comp);
+	if (diff <= 0) {
+		if (vctrl->blt_wait)
+			vctrl->blt_wait = 0;
+		spin_unlock(&vctrl->spin_lock);
+		return;
+	}
+
+	/* kick dmap */
+	mdp4_mddi_blt_dmap_update(pipe);
+	pipe->dmap_cnt++;
+	mdp4_stat.kickoff_dmap++;
+	vctrl->dmap_koff++;
+	vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
+	mb(); /* make sure kickoff executed */
+	spin_unlock(&vctrl->spin_lock);
+}
+
+/*
+ * mdp4_overlay0_done_mddi: called from isr
+ */
+void mdp4_overlay0_done_mddi(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int diff;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	vctrl->ov_done++;
+	complete_all(&vctrl->ov_comp);
+	diff = vctrl->ov_done - vctrl->dmap_done;
+
+	pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n",
+		__func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff,
+		vctrl->dmap_done, smp_processor_id());
+
+	if (pipe->ov_blt_addr == 0) {
+		/* blt disabled */
+		spin_unlock(&vctrl->spin_lock);
+		return;
+	}
+
+	if (diff > 1) {
+		/*
+		 * two overlay_done and none dmap_done yet
+		 * let dmap_done kickoff dmap
+		 * and put pipe_commit to wait
+		 */
+		vctrl->blt_wait = 1;
+		pr_debug("%s: blt_wait set\n", __func__);
+		spin_unlock(&vctrl->spin_lock);
+		return;
+	}
+	mdp4_mddi_blt_dmap_update(pipe);
+	pipe->dmap_cnt++;
+	mdp4_stat.kickoff_dmap++;
+	vctrl->dmap_koff++;
+	vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
+	mb(); /* make sure kickoff executed */
+	spin_unlock(&vctrl->spin_lock);
+}
+
+static void clk_ctrl_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), clk_work);
+	unsigned long flags;
+
+	mutex_lock(&vctrl->update_lock);
+	if (vctrl->clk_control && vctrl->clk_enabled) {
+		pr_debug("%s: SET_CLK_OFF\n", __func__);
+			mdp_clk_ctrl(0);
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+			vctrl->clk_enabled = 0;
+		vctrl->clk_control = 0;
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+	}
+	mutex_unlock(&vctrl->update_lock);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), vsync_work);
+	char buf[64];
+	char *envp[2];
+
+	snprintf(buf, sizeof(buf), "VSYNC=%llu",
+			ktime_to_ns(vctrl->vsync_time));
+	envp[0] = buf;
+	envp[1] = NULL;
+	kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+
+void mdp4_mddi_rdptr_init(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->inited)
+		return;
+
+	vctrl->inited = 1;
+	vctrl->update_ndx = 0;
+	mutex_init(&vctrl->update_lock);
+	init_completion(&vctrl->ov_comp);
+	init_completion(&vctrl->dmap_comp);
+	init_completion(&vctrl->vsync_comp);
+	spin_lock_init(&vctrl->spin_lock);
+	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+	INIT_WORK(&vctrl->clk_work, clk_ctrl_work);
+}
+
+void mdp4_primary_rdptr(void)
+{
+	primary_rdptr_isr(0);
+}
+
+void mdp4_overlay_mddi_state_set(int state)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mddi_state = state;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+int mdp4_overlay_mddi_state_get(void)
+{
+	return mddi_state;
+}
+
+static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+{
+	/*
+	 * The adreno GPU hardware requires that the pitch be aligned to
+	 * 32 pixels for color buffers, so for the cases where the GPU
+	 * is writing directly to fb0, the framebuffer pitch
+	 * also needs to be 32 pixel aligned
+	 */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
 }
 
 void mdp4_mddi_vsync_enable(struct msm_fb_data_type *mfd,
@@ -61,13 +703,6 @@
 	if ((mfd->use_mdp_vsync) && (mfd->ibuf.vsync_enable) &&
 		(mfd->panel_info.lcd.vsync_enable)) {
 
-		if (mdp_hw_revision < MDP4_REVISION_V2_1) {
-			/* need dmas dmap switch */
-			if (which == 0 && dmap_vsync_enable == 0 &&
-				mfd->panel_info.lcd.rev < 2) /* dma_p */
-				return;
-		}
-
 		if (vsync_start_y_adjust <= pipe->dst_y)
 			start_y = pipe->dst_y - vsync_start_y_adjust;
 		else
@@ -88,633 +723,337 @@
 	}
 }
 
-#define WHOLESCREEN
+void mdp4_mddi_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
 
-void mdp4_overlay_update_lcd(struct msm_fb_data_type *mfd)
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->base_pipe = pipe;
+}
+
+static void mdp4_overlay_setup_pipe_addr(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe)
 {
 	MDPIBUF *iBuf = &mfd->ibuf;
+	struct fb_info *fbi;
+	int bpp;
 	uint8 *src;
+
+	/* whole screen for base layer */
+	src = (uint8 *) iBuf->buf;
+	fbi = mfd->fbi;
+
+	if (pipe->is_3d) {
+		bpp = fbi->var.bits_per_pixel / 8;
+		pipe->src_height = pipe->src_height_3d;
+		pipe->src_width = pipe->src_width_3d;
+		pipe->src_h = pipe->src_height_3d;
+		pipe->src_w = pipe->src_width_3d;
+		pipe->dst_h = pipe->src_height_3d;
+		pipe->dst_w = pipe->src_width_3d;
+		pipe->srcp0_ystride = msm_fb_line_length(0,
+						pipe->src_width, bpp);
+	} else {
+		 /* 2D */
+		pipe->src_height = fbi->var.yres;
+		pipe->src_width = fbi->var.xres;
+		pipe->src_h = fbi->var.yres;
+		pipe->src_w = fbi->var.xres;
+		pipe->dst_h = fbi->var.yres;
+		pipe->dst_w = fbi->var.xres;
+		pipe->srcp0_ystride = fbi->fix.line_length;
+	}
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->dst_y = 0;
+	pipe->dst_x = 0;
+	pipe->srcp0_addr = (uint32)src;
+}
+
+void mdp4_overlay_update_mddi(struct msm_fb_data_type *mfd)
+{
 	int ptype;
 	uint32 mddi_ld_param;
 	uint16 mddi_vdo_packet_reg;
 	struct mdp4_overlay_pipe *pipe;
+	uint32	data;
 	int ret;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 
 	if (mfd->key != MFD_KEY)
 		return;
 
-	mddi_mfd = mfd;		/* keep it */
+	vctrl = &vsync_ctrl_db[cndx];
 
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-	if (mddi_pipe == NULL) {
+	if (vctrl->base_pipe == NULL) {
 		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+
 		if (ptype < 0)
-			printk(KERN_INFO "%s: format2type failed\n", __func__);
+			pr_err("%s: format2type failed\n", __func__);
+
 		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
-		if (pipe == NULL)
-			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
+		if (pipe == NULL) {
+			pr_err("%s: pipe_alloc failed\n", __func__);
+			return;
+		}
 		pipe->pipe_used++;
+		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
 		pipe->mixer_num  = MDP4_MIXER0;
 		pipe->src_format = mfd->fb_imgType;
 		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_MDDI);
 		ret = mdp4_overlay_format2pipe(pipe);
 		if (ret < 0)
-			printk(KERN_INFO "%s: format2type failed\n", __func__);
+			pr_err("%s: format2type failed\n", __func__);
 
-		mddi_pipe = pipe; /* keep it */
-		mddi_ld_param = 0;
-		mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
-
-		if (mdp_hw_revision == MDP4_REVISION_V2_1) {
-			uint32	data;
-
-			data = inpdw(MDP_BASE + 0x0028);
-			data &= ~0x0300;	/* bit 8, 9, MASTER4 */
-			if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
-				data |= 0x0200;
-			else
-				data |= 0x0100;
-
-			MDP_OUTP(MDP_BASE + 0x00028, data);
-		}
-
-		if (mfd->panel_info.type == MDDI_PANEL) {
-			if (mfd->panel_info.pdest == DISPLAY_1)
-				mddi_ld_param = 0;
-			else
-				mddi_ld_param = 1;
-		} else {
-			mddi_ld_param = 2;
-		}
-
-		MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
-
-		if (mfd->panel_info.bpp == 24)
-			MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
-		else if (mfd->panel_info.bpp == 16)
-			MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
-		else
-			MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
-
-		MDP_OUTP(MDP_BASE + 0x00098, 0x01);
+		vctrl->base_pipe = pipe; /* keep it */
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr = 0;
 	} else {
-		pipe = mddi_pipe;
+		pipe = vctrl->base_pipe;
 	}
 
-	/* 0 for dma_p, client_id = 0 */
-	MDP_OUTP(MDP_BASE + 0x00090, 0);
+	MDP_OUTP(MDP_BASE + 0x021c, 10); /* read pointer */
 
+	mddi_ld_param = 0;
+	mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
 
-	src = (uint8 *) iBuf->buf;
+	if (mdp_hw_revision == MDP4_REVISION_V2_1) {
+		data = inpdw(MDP_BASE + 0x0028);
+		data &= ~0x0300;	/* bit 8, 9, MASTER4 */
+		if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
+			data |= 0x0200;
+		else
+			data |= 0x0100;
 
-#ifdef WHOLESCREEN
-
-	{
-		struct fb_info *fbi;
-
-		fbi = mfd->fbi;
-		pipe->src_height = fbi->var.yres;
-		pipe->src_width = fbi->var.xres;
-		pipe->src_h = fbi->var.yres;
-		pipe->src_w = fbi->var.xres;
-		pipe->src_y = 0;
-		pipe->src_x = 0;
-		pipe->dst_h = fbi->var.yres;
-		pipe->dst_w = fbi->var.xres;
-		pipe->dst_y = 0;
-		pipe->dst_x = 0;
-		pipe->srcp0_addr = (uint32)src;
-		pipe->srcp0_ystride = fbi->fix.line_length;
+			MDP_OUTP(MDP_BASE + 0x00028, data);
 	}
 
-#else
-	if (mdp4_overlay_active(MDP4_MIXER0)) {
-		struct fb_info *fbi;
-
-		fbi = mfd->fbi;
-		pipe->src_height = fbi->var.yres;
-		pipe->src_width = fbi->var.xres;
-		pipe->src_h = fbi->var.yres;
-		pipe->src_w = fbi->var.xres;
-		pipe->src_y = 0;
-		pipe->src_x = 0;
-		pipe->dst_h = fbi->var.yres;
-		pipe->dst_w = fbi->var.xres;
-		pipe->dst_y = 0;
-		pipe->dst_x = 0;
-		pipe->srcp0_addr = (uint32) src;
-		pipe->srcp0_ystride = fbi->fix.line_length;
+	if (mfd->panel_info.type == MDDI_PANEL) {
+		if (mfd->panel_info.pdest == DISPLAY_1)
+			mddi_ld_param = 0;
+		else
+			mddi_ld_param = 1;
 	} else {
-		/* starting input address */
-		src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width)
-					* iBuf->bpp;
-
-		pipe->src_height = iBuf->dma_h;
-		pipe->src_width = iBuf->dma_w;
-		pipe->src_h = iBuf->dma_h;
-		pipe->src_w = iBuf->dma_w;
-		pipe->src_y = 0;
-		pipe->src_x = 0;
-		pipe->dst_h = iBuf->dma_h;
-		pipe->dst_w = iBuf->dma_w;
-		pipe->dst_y = iBuf->dma_y;
-		pipe->dst_x = iBuf->dma_x;
-		pipe->srcp0_addr = (uint32) src;
-		pipe->srcp0_ystride = iBuf->ibuf_width * iBuf->bpp;
+		mddi_ld_param = 2;
 	}
-#endif
 
-	pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+	MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
+
+	if (mfd->panel_info.bpp == 24)
+		MDP_OUTP(MDP_BASE + 0x00094,
+		 (MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
+	else if (mfd->panel_info.bpp == 16)
+		MDP_OUTP(MDP_BASE + 0x00094,
+		 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
+	else
+		MDP_OUTP(MDP_BASE + 0x00094,
+		 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
+
+		MDP_OUTP(MDP_BASE + 0x00098, 0x01);
+
+
+	mdp4_overlay_setup_pipe_addr(mfd, pipe);
 
 	mdp4_overlay_rgb_setup(pipe);
 
-	mdp4_mixer_stage_up(pipe, 1);
+	mdp4_overlay_reg_flush(pipe, 1);
+
+	mdp4_mixer_stage_up(pipe, 0);
 
 	mdp4_overlayproc_cfg(pipe);
 
 	mdp4_overlay_dmap_xy(pipe);
 
 	mdp4_overlay_dmap_cfg(mfd, 0);
+
 	mdp4_mixer_stage_commit(pipe->mixer_num);
-	mdp4_mddi_vsync_enable(mfd, pipe, 0);
 
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	wmb();
 }
 
-int mdp4_mddi_overlay_blt_start(struct msm_fb_data_type *mfd)
+void mdp4_mddi_blt_start(struct msm_fb_data_type *mfd)
 {
-	unsigned long flag;
-
-	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
-		__func__, mddi_pipe->blt_end,
-		(int)mddi_pipe->ov_blt_addr, current->pid);
-
-	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
-
-	if (mfd->ov0_wb_buf->write_addr == 0) {
-		pr_info("%s: no blt_base assigned\n", __func__);
-		return -EBUSY;
-	}
-
-	if (mddi_pipe->ov_blt_addr == 0) {
-		mdp4_mddi_dma_busy_wait(mfd);
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		mddi_pipe->blt_end = 0;
-		mddi_pipe->blt_cnt = 0;
-		mddi_pipe->ov_cnt = 0;
-		mddi_pipe->dmap_cnt = 0;
-		mddi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		mddi_pipe->dma_blt_addr = mfd->ov0_wb_buf->write_addr;
-		mdp4_stat.blt_mddi++;
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	return 0;
+	mdp4_mddi_do_blt(mfd, 1);
 }
 
-	return -EBUSY;
-}
-
-int mdp4_mddi_overlay_blt_stop(struct msm_fb_data_type *mfd)
+void mdp4_mddi_blt_stop(struct msm_fb_data_type *mfd)
 {
-	unsigned long flag;
-
-	pr_debug("%s: blt_end=%d blt_addr=%x\n",
-		 __func__, mddi_pipe->blt_end, (int)mddi_pipe->ov_blt_addr);
-
-	if ((mddi_pipe->blt_end == 0) && mddi_pipe->ov_blt_addr) {
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		mddi_pipe->blt_end = 1;	/* mark as end */
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
-		return 0;
-	}
-
-	return -EBUSY;
-}
-
-int mdp4_mddi_overlay_blt_offset(struct msm_fb_data_type *mfd,
-					struct msmfb_overlay_blt *req)
-{
-	req->offset = 0;
-	req->width = mddi_pipe->src_width;
-	req->height = mddi_pipe->src_height;
-	req->bpp = mddi_pipe->bpp;
-
-	return sizeof(*req);
+	mdp4_mddi_do_blt(mfd, 0);
 }
 
 void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req)
 {
-	if (req->enable)
-		mdp4_mddi_overlay_blt_start(mfd);
-	else if (req->enable == 0)
-		mdp4_mddi_overlay_blt_stop(mfd);
-
+	mdp4_mddi_do_blt(mfd, req->enable);
 }
 
-void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
+int mdp4_mddi_on(struct platform_device *pdev)
 {
-	uint32 off, addr, addr2;
-	int bpp;
-	char *overlay_base;
+	int ret = 0;
+	int cndx = 0;
+	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
 
-	if (pipe->ov_blt_addr == 0)
-		return;
+	pr_debug("%s+:\n", __func__);
 
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
-#ifdef BLT_RGB565
-	bpp = 2; /* overlay ouput is RGB565 */
-#else
-	bpp = 3; /* overlay ouput is RGB888 */
-#endif
-	off = 0;
-	if (pipe->dmap_cnt & 0x01)
-		off = pipe->src_height * pipe->src_width * bpp;
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->mfd = mfd;
+	vctrl->dev = mfd->fbi->dev;
 
-	addr = pipe->ov_blt_addr + off;
+	mdp_clk_ctrl(1);
+	mdp4_overlay_update_mddi(mfd);
+	mdp_clk_ctrl(0);
 
-	/* dmap */
-	MDP_OUTP(MDP_BASE + 0x90008, addr);
+	mdp4_iommu_attach();
 
-	off = 0;
-	if (pipe->ov_cnt & 0x01)
-		off = pipe->src_height * pipe->src_width * bpp;
-	addr2 = pipe->ov_blt_addr + off;
-	/* overlay 0 */
-	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
-	outpdw(overlay_base + 0x000c, addr2);
-	outpdw(overlay_base + 0x001c, addr2);
+	atomic_set(&vctrl->suspend, 0);
+	pr_debug("%s-:\n", __func__);
+
+	return ret;
 }
 
-void mdp4_primary_rdptr(void)
+int mdp4_mddi_off(struct platform_device *pdev)
 {
-}
+	int ret = 0;
+	int cndx = 0;
+	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
-/*
- * mdp4_dmap_done_mddi: called from isr
- */
-void mdp4_dma_p_done_mddi(struct mdp_dma_data *dma)
-{
-	int diff;
+	pr_debug("%s+:\n", __func__);
 
-	mddi_pipe->dmap_cnt++;
-	diff = mddi_pipe->ov_cnt - mddi_pipe->dmap_cnt;
-	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
-			__func__, mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
-	if (diff <= 0) {
-		spin_lock(&mdp_spin_lock);
-		dma->dmap_busy = FALSE;
-		complete(&dma->dmap_comp);
-		spin_unlock(&mdp_spin_lock);
-
-		if (mddi_pipe->blt_end) {
-			mddi_pipe->blt_end = 0;
-			mddi_pipe->ov_blt_addr = 0;
-			mddi_pipe->dma_blt_addr = 0;
-			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n", __func__,
-				mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
-			mdp_intr_mask &= ~INTR_DMA_P_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		}
-
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-		mdp_disable_irq_nosync(MDP_DMA2_TERM);  /* disable intr */
-		return;
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		return ret;
 	}
 
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
-	if (busy_wait_cnt)
-		busy_wait_cnt--;
+	atomic_set(&vctrl->suspend, 1);
 
-	pr_debug("%s: kickoff dmap\n", __func__);
+	/* sanity check, free pipes besides base layer */
+	mdp4_overlay_unset_mixer(pipe->mixer_num);
+	mdp4_mixer_stage_down(pipe, 1);
+	mdp4_overlay_pipe_free(pipe);
+	vctrl->base_pipe = NULL;
 
-	mdp4_blt_xy_update(mddi_pipe);
-	/* kick off dmap */
-	outpdw(MDP_BASE + 0x000c, 0x0);
-	mdp4_stat.kickoff_dmap++;
-	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-}
-
-/*
- * mdp4_overlay0_done_mddi: called from isr
- */
-void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma)
-{
-	int diff;
-
-	if (mddi_pipe->ov_blt_addr == 0) {
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-		spin_lock(&mdp_spin_lock);
-		dma->busy = FALSE;
-		spin_unlock(&mdp_spin_lock);
-		complete(&dma->comp);
-
-		if (busy_wait_cnt)
-			busy_wait_cnt--;
-		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-
-		return;
+	if (vctrl->clk_enabled) {
+		/*
+		 * in case of suspend, vsycn_ctrl off is not
+		 * received from frame work which left clock on
+		 * then, clock need to be turned off here
+		 */
+		mdp_clk_ctrl(0);
 	}
 
-	/* blt enabled */
-	if (mddi_pipe->blt_end == 0)
-		mddi_pipe->ov_cnt++;
+	vctrl->clk_enabled = 0;
+	vctrl->vsync_enabled = 0;
+	vctrl->clk_control = 0;
+	vctrl->expire_tick = 0;
+	vctrl->uevent = 0;
 
-	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
-			__func__, mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
+	vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
 
-	if (mddi_pipe->blt_cnt == 0) {
-		/* first kickoff since blt enabled */
-		mdp_intr_mask |= INTR_DMA_P_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	}
+	pr_debug("%s-:\n", __func__);
 
-	mddi_pipe->blt_cnt++;
-
-	diff = mddi_pipe->ov_cnt - mddi_pipe->dmap_cnt;
-	if (diff >= 2) {
-		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-		return;
-	}
-
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	dma->dmap_busy = TRUE;
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
-
-	if (busy_wait_cnt)
-		busy_wait_cnt--;
-
-	pr_debug("%s: kickoff dmap\n", __func__);
-
-	mdp4_blt_xy_update(mddi_pipe);
-	mdp_enable_irq(MDP_DMA2_TERM);	/* enable intr */
-	/* kick off dmap */
-	outpdw(MDP_BASE + 0x000c, 0x0);
-	mdp4_stat.kickoff_dmap++;
-	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-}
-
-void mdp4_mddi_overlay_restore(void)
-{
-	if (mddi_mfd == NULL)
-		return;
-
-	pr_debug("%s: resotre, pid=%d\n", __func__, current->pid);
-
-	if (mddi_mfd->panel_power_on == 0)
-		return;
-	if (mddi_mfd && mddi_pipe) {
-		mdp4_mddi_dma_busy_wait(mddi_mfd);
-		mdp4_overlay_update_lcd(mddi_mfd);
-
-		if (mddi_pipe->ov_blt_addr)
-			mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
-		mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
-		mddi_mfd->dma_update_flag = 1;
-	}
-	if (mdp_hw_revision < MDP4_REVISION_V2_1) /* need dmas dmap switch */
-		mdp4_mddi_overlay_dmas_restore();
-}
-
-void mdp4_mddi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->dmap_busy == TRUE) {
-		INIT_COMPLETION(mfd->dma->dmap_comp);
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		wait_for_completion(&mfd->dma->dmap_comp);
-	}
-}
-
-/*
- * mdp4_mddi_cmd_dma_busy_wait: check mddi link activity
- * mddi link is a shared resource and it can only be used
- * while it is in idle state.
- * ov_mutex need to be acquired before call this function.
- */
-void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-	pr_debug("%s: START, pid=%d\n", __func__, current->pid);
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->busy == TRUE) {
-		if (busy_wait_cnt == 0)
-			INIT_COMPLETION(mfd->dma->comp);
-		busy_wait_cnt++;
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		pr_debug("%s: PENDING, pid=%d\n", __func__, current->pid);
-		wait_for_completion(&mfd->dma->comp);
-	}
-	pr_debug("%s: DONE, pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
 	/*
-	 * a video kickoff may happen before UI kickoff after
-	 * blt enabled. mdp4_overlay_update_lcd() need
-	 * to be called before kickoff.
-	 * vice versa for blt disabled.
+	 * footswitch off
+	 * this will casue all mdp register
+	 * to be reset to default
+	 * after footswitch on later
 	 */
-	if (mddi_pipe->ov_blt_addr && mddi_pipe->blt_cnt == 0)
-		mdp4_overlay_update_lcd(mfd); /* first time */
-	else if (mddi_pipe->ov_blt_addr == 0  && mddi_pipe->blt_cnt) {
-		mdp4_overlay_update_lcd(mfd); /* last time */
-		mddi_pipe->blt_cnt = 0;
-	}
 
-	pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
-		__func__, (int)mddi_pipe->ov_blt_addr, mddi_pipe->blt_cnt);
-
-	if (mddi_pipe->ov_blt_addr)
-		mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
-	mdp4_mddi_overlay_kickoff(mfd, pipe);
+	return ret;
 }
 
-void mdp4_mddi_kickoff_ui(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
+void mdp_mddi_overlay_suspend(struct msm_fb_data_type *mfd)
 {
-	pr_debug("%s: pid=%d\n", __func__, current->pid);
-	mdp4_mddi_overlay_kickoff(mfd, pipe);
-}
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+	/* dis-engage rgb0 from mixer0 */
+	if (pipe) {
+		if (mfd->ref_cnt == 0) {
+			/* adb stop */
+			if (pipe->pipe_type == OVERLAY_TYPE_BF)
+				mdp4_overlay_borderfill_stage_down(pipe);
 
-void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	unsigned long flag;
-
-	mdp_enable_irq(MDP_OVERLAY0_TERM);
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mfd->dma->busy = TRUE;
-	if (mddi_pipe->ov_blt_addr)
-		mfd->dma->dmap_busy = TRUE;
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	/* start OVERLAY pipe */
-	mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
-	mdp4_stat.kickoff_ov0++;
-}
-
-void mdp4_dma_s_update_lcd(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	MDPIBUF *iBuf = &mfd->ibuf;
-	uint32 outBpp = iBuf->bpp;
-	uint16 mddi_vdo_packet_reg;
-	uint32 dma_s_cfg_reg;
-
-	dma_s_cfg_reg = 0;
-
-	if (mfd->fb_imgType == MDP_RGBA_8888)
-		dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR; /* on purpose */
-	else if (mfd->fb_imgType == MDP_BGR_565)
-		dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR;
-	else
-		dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB;
-
-	if (outBpp == 4)
-		dma_s_cfg_reg |= (1 << 26); /* xRGB8888 */
-	else if (outBpp == 2)
-		dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
-
-	dma_s_cfg_reg |= DMA_DITHER_EN;
-
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	/* PIXELSIZE */
-	MDP_OUTP(MDP_BASE + 0xa0004, (pipe->dst_h << 16 | pipe->dst_w));
-	MDP_OUTP(MDP_BASE + 0xa0008, pipe->srcp0_addr);	/* ibuf address */
-	MDP_OUTP(MDP_BASE + 0xa000c, pipe->srcp0_ystride);/* ystride */
-
-	if (mfd->panel_info.bpp == 24) {
-		dma_s_cfg_reg |= DMA_DSTC0G_8BITS |	/* 666 18BPP */
-		    DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
-	} else if (mfd->panel_info.bpp == 18) {
-		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 666 18BPP */
-		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
-	} else {
-		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 565 16BPP */
-		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
-	}
-
-	MDP_OUTP(MDP_BASE + 0xa0010, (pipe->dst_y << 16) | pipe->dst_x);
-
-	/* 1 for dma_s, client_id = 0 */
-	MDP_OUTP(MDP_BASE + 0x00090, 1);
-
-	mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
-
-	if (mfd->panel_info.bpp == 24)
-		MDP_OUTP(MDP_BASE + 0x00094,
-			(MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
-	else if (mfd->panel_info.bpp == 16)
-		MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
-	else
-		MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
-
-	MDP_OUTP(MDP_BASE + 0x00098, 0x01);
-
-	MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg);
-
-	mdp4_mddi_vsync_enable(mfd, pipe, 1);
-
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-}
-
-void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	mdp_enable_irq(MDP_DMA_S_TERM);
-
-	if (mddi_pipe->ov_blt_addr == 0)
-		mfd->dma->busy = TRUE;
-
-	mfd->ibuf_flushed = TRUE;
-	/* start dma_s pipe */
-	mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd);
-	mdp4_stat.kickoff_dmas++;
-
-	/* wait until DMA finishes the current job */
-	wait_for_completion(&mfd->dma->comp);
-	mdp_disable_irq(MDP_DMA_S_TERM);
-}
-
-void mdp4_mddi_overlay_dmas_restore(void)
-{
-	/* mutex held by caller */
-	if (mddi_mfd && mddi_pipe) {
-		mdp4_mddi_dma_busy_wait(mddi_mfd);
-		mdp4_dma_s_update_lcd(mddi_mfd, mddi_pipe);
-		mdp4_mddi_dma_s_kickoff(mddi_mfd, mddi_pipe);
-		mddi_mfd->dma_update_flag = 1;
+			/* pipe == rgb1 */
+			mdp4_overlay_unset_mixer(pipe->mixer_num);
+			vctrl->base_pipe = NULL;
+		} else {
+			mdp4_mixer_stage_down(pipe, 1);
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
+		}
 	}
 }
 
 void mdp4_mddi_overlay(struct msm_fb_data_type *mfd)
 {
-	mutex_lock(&mfd->dma->ov_mutex);
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+	long long xx;
 
-	if (mfd && mfd->panel_power_on) {
-		mdp4_mddi_dma_busy_wait(mfd);
+	vctrl = &vsync_ctrl_db[cndx];
 
-		if (mddi_pipe && mddi_pipe->ov_blt_addr)
-			mdp4_mddi_blt_dmap_busy_wait(mfd);
-		mdp4_overlay_mdp_perf_upd(mfd, 0);
-		mdp4_overlay_update_lcd(mfd);
+	if (!mfd->panel_power_on)
+		return;
 
-		mdp4_overlay_mdp_perf_upd(mfd, 1);
-		if (mdp_hw_revision < MDP4_REVISION_V2_1) {
-			/* dmas dmap switch */
-			if (mdp4_overlay_mixer_play(mddi_pipe->mixer_num)
-						== 0) {
-				mdp4_dma_s_update_lcd(mfd, mddi_pipe);
-				mdp4_mddi_dma_s_kickoff(mfd, mddi_pipe);
-			} else
-				mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
-		} else	/* no dams dmap switch  */
-			mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
-
-	/* signal if pan function is waiting for the update completion */
-		if (mfd->pan_waiting) {
-			mfd->pan_waiting = FALSE;
-			complete(&mfd->pan_comp);
-		}
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		return;
 	}
+
+	mutex_lock(&vctrl->update_lock);
+	if (!vctrl->clk_enabled) {
+		pr_err("%s: mdp clocks disabled\n", __func__);
+		mutex_unlock(&vctrl->update_lock);
+		return;
+
+	}
+	mutex_unlock(&vctrl->update_lock);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->expire_tick) {
+		/*
+		 * in the middle of shutting clocks down
+		 * delay to allow pan display to go through
+		 */
+		vctrl->expire_tick = VSYNC_EXPIRE_TICK;
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) {
+		mdp4_mddi_vsync_enable(mfd, pipe, 0);
+		mdp4_overlay_setup_pipe_addr(mfd, pipe);
+		mdp4_mddi_pipe_queue(0, pipe);
+	}
+
+	mdp4_overlay_mdp_perf_upd(mfd, 1);
+
+	mutex_lock(&mfd->dma->ov_mutex);
+	mdp4_mddi_pipe_commit();
 	mutex_unlock(&mfd->dma->ov_mutex);
+	mdp4_mddi_wait4vsync(0, &xx);
+
+	mdp4_overlay_mdp_perf_upd(mfd, 0);
 }
 
 int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor)
@@ -722,7 +1061,6 @@
 	struct msm_fb_data_type *mfd = info->par;
 	mutex_lock(&mfd->dma->ov_mutex);
 	if (mfd && mfd->panel_power_on) {
-		mdp4_mddi_dma_busy_wait(mfd);
 		mdp_hw_cursor_update(info, cursor);
 	}
 	mutex_unlock(&mfd->dma->ov_mutex);
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 87921e6..82b4e80 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -557,7 +557,7 @@
 			mdp4_dmap_done_dsi_cmd(0);
 #else
 		else { /* MDDI */
-			mdp4_dma_p_done_mddi(dma);
+			mdp4_dmap_done_mddi(0);
 			mdp_pipe_ctrl(MDP_DMA2_BLOCK,
 				MDP_BLOCK_POWER_OFF, TRUE);
 			complete(&dma->comp);
@@ -608,7 +608,7 @@
 				mdp4_overlay0_done_dsi_cmd(0);
 #else
 			if (panel & MDP4_PANEL_MDDI)
-				mdp4_overlay0_done_mddi(dma);
+				mdp4_overlay0_done_mddi(0);
 #endif
 		}
 		mdp_hw_cursor_done();
diff --git a/drivers/video/msm/mdp_debugfs.c b/drivers/video/msm/mdp_debugfs.c
index 0fad0a7..54f5ef5 100644
--- a/drivers/video/msm/mdp_debugfs.c
+++ b/drivers/video/msm/mdp_debugfs.c
@@ -719,84 +719,6 @@
 	.write = pmdh_reg_write,
 };
 
-
-
-#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
-static int vsync_reg_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	return 0;
-}
-
-static int vsync_reg_release(struct inode *inode, struct file *file)
-{
-	return 0;
-}
-
-static ssize_t vsync_reg_write(
-	struct file *file,
-	const char __user *buff,
-	size_t count,
-	loff_t *ppos)
-{
-	uint32 enable;
-	int cnt;
-
-	if (count >= sizeof(debug_buf))
-		return -EFAULT;
-
-	if (copy_from_user(debug_buf, buff, count))
-		return -EFAULT;
-
-	debug_buf[count] = 0;	/* end of string */
-
-	cnt = sscanf(debug_buf, "%x", &enable);
-
-	mdp_dmap_vsync_set(enable);
-
-	return count;
-}
-
-static ssize_t vsync_reg_read(
-	struct file *file,
-	char __user *buff,
-	size_t count,
-	loff_t *ppos)
-{
-	char *bp;
-	int len = 0;
-	int tot = 0;
-	int dlen;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	bp = debug_buf;
-	dlen = sizeof(debug_buf);
-	len = snprintf(bp, dlen, "%x\n", mdp_dmap_vsync_get());
-	tot += len;
-	bp += len;
-	*bp = 0;
-	tot++;
-
-	if (copy_to_user(buff, debug_buf, tot))
-		return -EFAULT;
-
-	*ppos += tot;	/* increase offset */
-
-	return tot;
-}
-
-
-static const struct file_operations vsync_fops = {
-	.open = vsync_reg_open,
-	.release = vsync_reg_release,
-	.read = vsync_reg_read,
-	.write = vsync_reg_write,
-};
-#endif
-
 static ssize_t emdh_reg_write(
 	struct file *file,
 	const char __user *buff,
@@ -1342,15 +1264,6 @@
 		return -1;
 	}
 
-#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
-	if (debugfs_create_file("vsync", 0644, dent, 0, &vsync_fops)
-			== NULL) {
-		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
-			__FILE__, __LINE__);
-		return -1;
-	}
-#endif
-
 	dent = debugfs_create_dir("emdh", NULL);
 
 	if (IS_ERR(dent)) {
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index 6e332ef..5fdee02 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -920,7 +920,7 @@
 	u32 i;
 	u32 found = false;
 
-	for (i = 0; i <= pool->count && !found; i++) {
+	for (i = 1; i <= pool->count && !found; i++) {
 		if (pool->entries[i].virtual == addr)
 			found = true;
 
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 714cc76..9e536be 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -245,6 +245,8 @@
 #define MMC_CAP2_PACKED_WR	(1 << 11)	/* Allow packed write */
 #define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
 				 MMC_CAP2_PACKED_WR) /* Allow packed commands */
+#define MMC_CAP2_PACKED_WR_CONTROL (1 << 12) /* Allow write packing control */
+
 #define MMC_CAP2_SANITIZE	(1 << 13)		/* Support Sanitize */
 #define MMC_CAP2_INIT_BKOPS	    (1 << 15)	/* Need to set BKOPS_EN */
 #define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND	(1 << 16)
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 5e1395e..62b8a73 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -2,7 +2,7 @@
 #define _MSM_KGSL_H
 
 #define KGSL_VERSION_MAJOR        3
-#define KGSL_VERSION_MINOR        13
+#define KGSL_VERSION_MINOR        14
 
 /*context flags */
 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
@@ -12,6 +12,7 @@
 #define KGSL_CONTEXT_PREAMBLE		0x00000010
 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
+#define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
 
 #define KGSL_CONTEXT_INVALID 0xffffffff
 
@@ -66,6 +67,9 @@
 #define KGSL_CLK_MEM_IFACE 0x00000010
 #define KGSL_CLK_AXI	0x00000020
 
+/* Server Side Sync Timeout in milliseconds */
+#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
+
 /*
  * Reset status values for context
  */
diff --git a/include/trace/events/mpdcvs_trace.h b/include/trace/events/mpdcvs_trace.h
new file mode 100644
index 0000000..0db1378
--- /dev/null
+++ b/include/trace/events/mpdcvs_trace.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2012, Free Software Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM  mpdcvs_trace
+
+#if !defined(_TRACE_MPDCVS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MPDCVS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(msm_mp,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, mp_val)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->mp_val = mp_val;
+	),
+
+	TP_printk("ev_name=%s ev_level=%d",
+		__get_str(name),
+		__entry->mp_val)
+);
+
+/* Core function of run_q */
+
+DEFINE_EVENT(msm_mp, msm_mp_runq,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val)
+);
+
+DEFINE_EVENT(msm_mp, msm_mp_cpusonline,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val)
+);
+
+DEFINE_EVENT(msm_mp, msm_mp_slacktime,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val)
+);
+
+DECLARE_EVENT_CLASS(msm_dcvs,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__string(cpuid, cpuid)
+		__field(int, val)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__assign_str(cpuid, cpuid);
+		__entry->val = val;
+	),
+
+	TP_printk("ev_name=%s d_name=%s ev_level=%d",
+		__get_str(name),
+		__get_str(cpuid),
+		__entry->val)
+);
+
+/* Core function of dcvs */
+
+DEFINE_EVENT(msm_dcvs, msm_dcvs_idle,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val)
+);
+
+DEFINE_EVENT(msm_dcvs, msm_dcvs_iowait,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val)
+);
+
+DEFINE_EVENT(msm_dcvs, msm_dcvs_slack_time,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val)
+);
+
+DECLARE_EVENT_CLASS(msm_dcvs_scm,
+
+	TP_PROTO(unsigned long cpuid, int ev_type, unsigned long param0,
+		unsigned long param1, unsigned long ret0, unsigned long ret1),
+
+	TP_ARGS(cpuid, ev_type, param0, param1, ret0, ret1),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, cpuid)
+		__field(int, ev_type)
+		__field(unsigned long, param0)
+		__field(unsigned long, param1)
+		__field(unsigned long, ret0)
+		__field(unsigned long, ret1)
+	),
+
+	TP_fast_assign(
+		__entry->cpuid = cpuid;
+		__entry->ev_type = ev_type;
+		__entry->param0 = param0;
+		__entry->param1 = param1;
+		__entry->ret0 = ret0;
+		__entry->ret1 = ret1;
+	),
+
+	TP_printk("dev=%lu ev_type=%d ev_param0=%lu ev_param1=%lu ev_ret0=%lu ev_ret1=%lu",
+		__entry->cpuid,
+		__entry->ev_type,
+		__entry->param0,
+		__entry->param1,
+		__entry->ret0,
+		__entry->ret1)
+);
+
+DEFINE_EVENT(msm_dcvs_scm, msm_dcvs_scm_event,
+
+	TP_PROTO(unsigned long cpuid, int ev_type, unsigned long param0,
+		unsigned long param1, unsigned long ret0, unsigned long ret1),
+
+	TP_ARGS(cpuid, ev_type, param0, param1, ret0, ret1)
+);
+
+#endif /* _TRACE_MPDCVS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4c655c2..d2f60a0 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1629,6 +1629,8 @@
 			 $exec_file =~ /^.+\.ihex$/ or
 			 $exec_file =~ /^.+\.hex$/ or
 			 $exec_file =~ /^.+\.HEX$/ or
+			 $exec_file =~ /^.+\.dts$/ or
+			 $exec_file =~ /^.+\.dtsi$/ or
 			 $exec_file =~ /^.+defconfig$/ or
 			 $exec_file =~ /^Makefile$/ or
 			 $exec_file =~ /^Kconfig$/) &&
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index deddbe8..5a819c9 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -33,6 +33,9 @@
 #include <linux/pm_runtime.h>
 #include <linux/kernel.h>
 #include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/wakelock.h>
+#include <linux/suspend.h>
 #include "wcd9310.h"
 
 static int cfilt_adjust_ms = 10;
@@ -340,6 +343,9 @@
 	 */
 	struct work_struct hs_correct_plug_work_nogpio;
 
+	bool gpio_irq_resend;
+	struct wake_lock irq_resend_wlock;
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_poke;
 	struct dentry *debugfs_mbhc;
@@ -7352,9 +7358,18 @@
 {
 	int r = IRQ_HANDLED;
 	struct snd_soc_codec *codec = data;
+	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
 
 	if (unlikely(wcd9xxx_lock_sleep(codec->control_data) == false)) {
 		pr_warn("%s: failed to hold suspend\n", __func__);
+		/*
+		 * Give up this IRQ for now and resend this IRQ so IRQ can be
+		 * handled after system resume
+		 */
+		TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
+		tabla->gpio_irq_resend = true;
+		TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
+		wake_lock_timeout(&tabla->irq_resend_wlock, HZ);
 		r = IRQ_NONE;
 	} else {
 		tabla_hs_gpio_handler(codec);
@@ -8267,6 +8282,15 @@
 		goto err_hphr_ocp_irq;
 	}
 	wcd9xxx_disable_irq(codec->control_data, TABLA_IRQ_HPH_PA_OCPR_FAULT);
+
+	/*
+	 * Register suspend lock and notifier to resend edge triggered
+	 * gpio IRQs
+	 */
+	wake_lock_init(&tabla->irq_resend_wlock, WAKE_LOCK_SUSPEND,
+		       "tabla_gpio_irq_resend");
+	tabla->gpio_irq_resend = false;
+
 	for (i = 0; i < ARRAY_SIZE(tabla_dai); i++) {
 		switch (tabla_dai[i].id) {
 		case AIF1_PB:
@@ -8331,6 +8355,9 @@
 {
 	int i;
 	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+	wake_lock_destroy(&tabla->irq_resend_wlock);
+
 	wcd9xxx_free_irq(codec->control_data, TABLA_IRQ_SLIMBUS, tabla);
 	wcd9xxx_free_irq(codec->control_data, TABLA_IRQ_MBHC_RELEASE, tabla);
 	wcd9xxx_free_irq(codec->control_data, TABLA_IRQ_MBHC_POTENTIAL, tabla);
@@ -8380,11 +8407,29 @@
 
 static int tabla_resume(struct device *dev)
 {
+	int irq;
 	struct platform_device *pdev = to_platform_device(dev);
 	struct tabla_priv *tabla = platform_get_drvdata(pdev);
+
 	dev_dbg(dev, "%s: system resume tabla %p\n", __func__, tabla);
-	if (tabla)
+	if (tabla) {
+		TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
 		tabla->mbhc_last_resume = jiffies;
+		if (tabla->gpio_irq_resend) {
+			WARN_ON(!tabla->mbhc_cfg.gpio_irq);
+			tabla->gpio_irq_resend = false;
+
+			irq = tabla->mbhc_cfg.gpio_irq;
+			pr_debug("%s: Resending GPIO IRQ %d\n", __func__, irq);
+			irq_set_pending(irq);
+			check_irq_resend(irq_to_desc(irq), irq);
+
+			/* release suspend lock */
+			wake_unlock(&tabla->irq_resend_wlock);
+		}
+		TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
+	}
+
 	return 0;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 62257b4..fac55b4 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -116,8 +116,18 @@
 				wake_up(&this_adm.wait[index]);
 				break;
 			case ADM_CMD_SHARED_MEM_MAP_REGIONS:
-				/* Block until memory handle comes back */
-				/* via ADM_CMDRSP_SHARED_MEM_MAP_REGIONS */
+				pr_debug("%s: ADM_CMD_SHARED_MEM_MAP_REGIONS\n",
+					__func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				/* ADM_CMDRSP_SHARED_MEM_MAP_REGIONS */
+				if (payload[1] != 0) {
+					pr_err("%s: ADM map error, resuming\n",
+						__func__);
+					atomic_set(&this_adm.copp_stat[0], 1);
+					wake_up(&this_adm.wait[index]);
+				}
 				break;
 			default:
 				pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
@@ -247,24 +257,27 @@
 	get_audproc_cal(acdb_path, &aud_cal);
 
 	/* map & cache buffers used */
+	atomic_set(&mem_map_index, acdb_path);
 	if (((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr)  &&
 		(aud_cal.cal_size > 0)) ||
 		(aud_cal.cal_size > mem_addr_audproc[acdb_path].cal_size)) {
 
-		atomic_set(&mem_map_index, acdb_path);
 		if (mem_addr_audproc[acdb_path].cal_paddr != 0)
 			adm_memory_unmap_regions(port_id,
 				&mem_addr_audproc[acdb_path].cal_paddr,
 				&size, 1);
 
 		result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
-						0, &aud_cal.cal_size, 1);
-		if (result < 0)
+						0, &size, 1);
+		if (result < 0) {
 			pr_err("ADM audproc mmap did not work! path = %d, addr = 0x%x, size = %d\n",
 				acdb_path, aud_cal.cal_paddr,
 				aud_cal.cal_size);
-		else
-			mem_addr_audproc[acdb_path] = aud_cal;
+		} else {
+			mem_addr_audproc[acdb_path].cal_paddr =
+							aud_cal.cal_paddr;
+			mem_addr_audproc[acdb_path].cal_size = size;
+		}
 	}
 
 	if (!send_adm_cal_block(port_id, &aud_cal))
@@ -278,24 +291,27 @@
 	get_audvol_cal(acdb_path, &aud_cal);
 
 	/* map & cache buffers used */
+	atomic_set(&mem_map_index, (acdb_path + MAX_AUDPROC_TYPES));
 	if (((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr)  &&
 		(aud_cal.cal_size > 0))  ||
 		(aud_cal.cal_size > mem_addr_audvol[acdb_path].cal_size)) {
 
-		atomic_set(&mem_map_index, (acdb_path + MAX_AUDPROC_TYPES));
 		if (mem_addr_audvol[acdb_path].cal_paddr != 0)
 			adm_memory_unmap_regions(port_id,
 				&mem_addr_audvol[acdb_path].cal_paddr,
 				&size, 1);
 
 		result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
-						0, &aud_cal.cal_size, 1);
-		if (result < 0)
+						0, &size, 1);
+		if (result < 0) {
 			pr_err("ADM audvol mmap did not work! path = %d, addr = 0x%x, size = %d\n",
 				acdb_path, aud_cal.cal_paddr,
 				aud_cal.cal_size);
-		else
-			mem_addr_audvol[acdb_path] = aud_cal;
+		} else {
+			mem_addr_audvol[acdb_path].cal_paddr =
+							aud_cal.cal_paddr;
+			mem_addr_audvol[acdb_path].cal_size = size;
+		}
 	}
 
 	if (!send_adm_cal_block(port_id, &aud_cal))
@@ -664,8 +680,8 @@
 								APR_PKT_VER);
 	mmap_regions->hdr.pkt_size = cmd_size;
 	mmap_regions->hdr.src_port = 0;
-	mmap_regions->hdr.dest_port = 0;
-	mmap_regions->hdr.token = 0;
+	mmap_regions->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+	mmap_regions->hdr.token = port_id;
 	mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
 	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
 	mmap_regions->num_regions = bufcnt & 0x00ff;
@@ -733,8 +749,8 @@
 							APR_PKT_VER);
 	unmap_regions.hdr.pkt_size = cmd_size;
 	unmap_regions.hdr.src_port = 0;
-	unmap_regions.hdr.dest_port = 0;
-	unmap_regions.hdr.token = 0;
+	unmap_regions.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+	unmap_regions.hdr.token = port_id;
 	unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
 	unmap_regions.mem_map_handle = atomic_read(&mem_map_handles[
 						atomic_read(&mem_map_index)]);