Merge "msm: ADSPRPC: Fix error paths to avoid double-frees and other badness"
diff --git a/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
index 88d69e0..86d863c 100644
--- a/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
+++ b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
@@ -5,12 +5,26 @@
 Required properties:
   - compatible: Should be "qca,ar3002"
   - qca,bt-reset-gpio: GPIO pin to bring BT Controller out of reset
+  - qca,bt-vdd-io-supply: Bluetooth VDD IO regulator handle
+  - qca,bt-vdd-pa-supply: Bluetooth VDD PA regulator handle
 
 Optional properties:
-  None
+  -qca,bt-vdd-ldo-supply: Bluetooth VDD LDO regulator handle. Kept under optional parameters
+		as some of the chipsets doesn't require ldo or it may use from same vddio.
+  - qca,bt-chip-pwd-supply: Chip power down gpio is required when bluetooth module
+		and other modules like wifi co-exist in a singe chip and shares a
+		common gpio to bring chip out of reset.
+  - qca,bt-vdd-io-voltage-level: min and max voltages for the vdd io regulator
+  - qca,bt-vdd-pa-voltage-level: min and max voltages for the vdd pa regulator
+  - qca,bt-vdd-ldo-voltage-level: min and max voltages for the vdd ldo regulator
 
 Example:
   bt-ar3002 {
     compatible = "qca,ar3002";
     qca,bt-reset-gpio = <&pm8941_gpios 34 0>;
+    qca,bt-vdd-io-supply = <&pm8941_s3>;
+    qca,bt-vdd-pa-supply = <&pm8941_l19>;
+    qca,bt-chip-pwd-supply = <&ath_chip_pwd_l>;
+    qca,bt-vdd-io-supply = <1800000 1800000>;
+    qca,bt-vdd-pa-supply = <2900000 2900000>;
   };
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
index ac8ea73..4cbff52 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
@@ -16,6 +16,7 @@
 - qcom,firmware-name: Base name of the firmware image. Ex. "lpass"
 - qcom,gpio-err-fatal: GPIO used by the lpass to indicate error fatal to the apps.
 - qcom,gpio-force-stop: GPIO used by the apps to force the lpass to shutdown.
+- qcom,gpio-proxy-unvote: GPIO used by the lpass to indicate apps clock is ready.
 
 Optional properties:
 - vdd_pll-supply:     Reference to the regulator that supplies the PLL's rail.
@@ -32,8 +33,9 @@
 		vdd_cx-supply = <&pm8841_s2>;
 	        qcom,firmware-name = "lpass";
 
-		/* GPIO input from lpass */
+		/* GPIO inputs from lpass */
 		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
 
 		/* GPIO output to lpass */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
diff --git a/arch/arm/boot/dts/apq8074-v2.dtsi b/arch/arm/boot/dts/apq8074-v2.dtsi
new file mode 100644
index 0000000..0f867e0
--- /dev/null
+++ b/arch/arm/boot/dts/apq8074-v2.dtsi
@@ -0,0 +1,48 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi file.
+ */
+
+/include/ "msm8974-v2.dtsi"
+
+/ {
+	qcom,qseecom@7f00000 {
+		compatible = "qcom,qseecom";
+		reg = <0x07f00000 0x500000>;
+		reg-names = "secapp-region";
+		qcom,disk-encrypt-pipe-pair = <2>;
+		qcom,hlos-ce-hw-instance = <1>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,msm-bus,name = "qseecom-noc";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 3936000 393600>,
+				<55 512 3936000 393600>,
+				<55 512 3936000 393600>;
+	};
+};
+
+&memory_hole {
+	qcom,memblock-remove = <0x07f00000 0x8000000>; /* Address and size of the hole */
+};
+
+&qseecom {
+	status = "enabled";
+};
+
diff --git a/arch/arm/boot/dts/msmzinc-ion.dtsi b/arch/arm/boot/dts/apq8084-ion.dtsi
similarity index 100%
rename from arch/arm/boot/dts/msmzinc-ion.dtsi
rename to arch/arm/boot/dts/apq8084-ion.dtsi
diff --git a/arch/arm/boot/dts/msmzinc-sim.dts b/arch/arm/boot/dts/apq8084-sim.dts
similarity index 83%
rename from arch/arm/boot/dts/msmzinc-sim.dts
rename to arch/arm/boot/dts/apq8084-sim.dts
index e410344..cb16c07 100644
--- a/arch/arm/boot/dts/msmzinc-sim.dts
+++ b/arch/arm/boot/dts/apq8084-sim.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "msmzinc.dtsi"
+/include/ "apq8084.dtsi"
 
 / {
-	model = "Qualcomm MSM ZINC Simulator";
-	compatible = "qcom,msmzinc-sim", "qcom,msmzinc", "qcom,sim";
+	model = "Qualcomm APQ 8084 Simulator";
+	compatible = "qcom,apq8084-sim", "qcom,apq8084", "qcom,sim";
 	qcom,msm-id = <178 0 0>;
 
 	aliases {
diff --git a/arch/arm/boot/dts/msmzinc.dtsi b/arch/arm/boot/dts/apq8084.dtsi
similarity index 95%
rename from arch/arm/boot/dts/msmzinc.dtsi
rename to arch/arm/boot/dts/apq8084.dtsi
index 642597d..c2e5ef4 100644
--- a/arch/arm/boot/dts/msmzinc.dtsi
+++ b/arch/arm/boot/dts/apq8084.dtsi
@@ -11,11 +11,11 @@
  */
 
 /include/ "skeleton.dtsi"
-/include/ "msmzinc-ion.dtsi"
+/include/ "apq8084-ion.dtsi"
 
 / {
-	model = "Qualcomm MSM ZINC";
-	compatible = "qcom,msmzinc";
+	model = "Qualcomm APQ 8084";
+	compatible = "qcom,apq8084";
 	interrupt-parent = <&intc>;
 
 	intc: interrupt-controller@f9000000 {
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index fe4cada..b88b991 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -29,16 +29,18 @@
 			#size-cells = <1>;
 			status = "disabled";
 
-			qcom,chg-vddmax-mv = <4200>;
-			qcom,chg-vddsafe-mv = <4200>;
-			qcom,chg-vinmin-mv = <4200>;
-			qcom,chg-vbatdet-mv = <4100>;
-			qcom,chg-ibatmax-ma = <1500>;
-			qcom,chg-ibatterm-ma = <200>;
-			qcom,chg-ibatsafe-ma = <1500>;
-			qcom,chg-thermal-mitigation = <1500 700 600 325>;
+			qcom,vddmax-mv = <4200>;
+			qcom,vddsafe-mv = <4200>;
+			qcom,vinmin-mv = <4200>;
+			qcom,vbatdet-mv = <4100>;
+			qcom,ibatmax-ma = <1500>;
+			qcom,ibatterm-ma = <200>;
+			qcom,ibatsafe-ma = <1500>;
+			qcom,thermal-mitigation = <1500 700 600 325>;
+			qcom,vbatdet-delta-mv = <350>;
+			qcom,tchg-mins = <150>;
 
-			qcom,chg-chgr@1000 {
+			qcom,chgr@1000 {
 				status = "disabled";
 				reg = <0x1000 0x100>;
 				interrupts =	<0x0 0x10 0x0>,
@@ -60,7 +62,7 @@
 							"chg-done";
 			};
 
-			qcom,chg-buck@1100 {
+			qcom,buck@1100 {
 				status = "disabled";
 				reg = <0x1100 0x100>;
 				interrupts =	<0x0 0x11 0x0>,
@@ -80,7 +82,7 @@
 							"vdd-loop";
 			};
 
-			qcom,chg-bat-if@1200 {
+			qcom,bat-if@1200 {
 				status = "disabled";
 				reg = <0x1200 0x100>;
 				interrupts =	<0x0 0x12 0x0>,
@@ -96,7 +98,7 @@
 							"psi";
 			};
 
-			qcom,chg-usb-chgpth@1300 {
+			qcom,usb-chgpth@1300 {
 				status = "disabled";
 				reg = <0x1300 0x100>;
 				interrupts =	<0 0x13 0x0>,
diff --git a/arch/arm/boot/dts/msm8226-ion.dtsi b/arch/arm/boot/dts/msm8226-ion.dtsi
index 9cef5a9..f433a49 100644
--- a/arch/arm/boot/dts/msm8226-ion.dtsi
+++ b/arch/arm/boot/dts/msm8226-ion.dtsi
@@ -50,5 +50,19 @@
 			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
 			qcom,memory-reservation-size = <0x314000>;
 		};
+		qcom,ion-heap@23 { /* OTHER PIL HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <23>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-fixed = <0x06400000 0x2000000>;
+		};
+		qcom,ion-heap@26 { /* MODEM PIL HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <26>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-fixed = <0x08400000 0x4E00000>;
+
+		};
+
 	};
 };
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index b949d3b..9ee78a4 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -756,8 +756,9 @@
 
 		qcom,firmware-name = "adsp";
 
-		/* GPIO input from lpass */
+		/* GPIO inputs from lpass */
 		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
 
 		/* GPIO output to lpass */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
@@ -794,7 +795,7 @@
 
 	qcom,msm-mem-hole {
 		compatible = "qcom,msm-mem-hole";
-		qcom,memblock-remove = <0x8400000 0x7b00000>; /* Address and Size of Hole */
+		qcom,memblock-remove = <0x6400000 0x9b00000>; /* Address and Size of Hole */
 	};
 
 	tsens: tsens@fc4a8000 {
diff --git a/arch/arm/boot/dts/msm8610-cdp.dts b/arch/arm/boot/dts/msm8610-cdp.dts
index a8a2446..9b114cc 100644
--- a/arch/arm/boot/dts/msm8610-cdp.dts
+++ b/arch/arm/boot/dts/msm8610-cdp.dts
@@ -17,7 +17,8 @@
 / {
 	model = "Qualcomm MSM 8610 CDP";
 	compatible = "qcom,msm8610-cdp", "qcom,msm8610", "qcom,cdp";
-	qcom,msm-id = <147 1 0>, <165 1 0>;
+	qcom,msm-id = <147 1 0>, <165 1 0>, <161 1 0>, <162 1 0>,
+		      <163 1 0>, <164 1 0>, <166 1 0>;
 
 	serial@f991e000 {
 		status = "ok";
@@ -100,18 +101,18 @@
 
 &pm8110_chg {
 	status = "ok";
-	qcom,chg-charging-disabled;
-	qcom,chg-use-default-batt-values;
+	qcom,charging-disabled;
+	qcom,use-default-batt-values;
 
-	qcom,chg-chgr@1000 {
+	qcom,chgr@1000 {
 		status = "ok";
 	};
 
-	qcom,chg-buck@1100 {
+	qcom,buck@1100 {
 		status = "ok";
 	};
 
-	qcom,chg-usb-chgpth@1300 {
+	qcom,usb-chgpth@1300 {
 		status = "ok";
 	};
 
diff --git a/arch/arm/boot/dts/msm8610-gpu.dtsi b/arch/arm/boot/dts/msm8610-gpu.dtsi
index 5e57430..5580f73 100644
--- a/arch/arm/boot/dts/msm8610-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8610-gpu.dtsi
@@ -27,8 +27,9 @@
 		qcom,idle-timeout = <8>; /* <HZ/12> */
 		qcom,nap-allowed = <1>;
 		qcom,strtstp-sleepwake;
-		qcom,clk-map = <0x000001E>; /* KGSL_CLK_CORE |
-			KGSL_CLK_IFACE | KGSL_CLK_MEM | KGSL_CLK_MEM_IFACE */
+		qcom,clk-map = <0x000005E>; /* KGSL_CLK_CORE |
+			KGSL_CLK_IFACE | KGSL_CLK_MEM | KGSL_CLK_MEM_IFACE |
+			KGSL_CLK_ALT_MEM_IFACE */
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "grp3d";
diff --git a/arch/arm/boot/dts/msm8610-mtp.dts b/arch/arm/boot/dts/msm8610-mtp.dts
index b1511ff..3a26376 100644
--- a/arch/arm/boot/dts/msm8610-mtp.dts
+++ b/arch/arm/boot/dts/msm8610-mtp.dts
@@ -17,9 +17,10 @@
 / {
 	model = "Qualcomm MSM 8610 MTP";
 	compatible = "qcom,msm8610-mtp", "qcom,msm8610", "qcom,mtp";
-	qcom,msm-id = <147 8 0>, <165 8 0>;
+	qcom,msm-id = <147 8 0>, <165 8 0>, <161 8 0>, <162 8 0>,
+		      <163 8 0>, <164 8 0>, <166 8 0>;
 
-	serial@f991f000 {
+	serial@f991e000 {
 		status = "ok";
 	};
 };
@@ -100,21 +101,21 @@
 
 &pm8110_chg {
 	status = "ok";
-	qcom,chg-charging-disabled;
+	qcom,charging-disabled;
 
-	qcom,chg-chgr@1000 {
+	qcom,chgr@1000 {
 		status = "ok";
 	};
 
-	qcom,chg-buck@1100 {
+	qcom,buck@1100 {
 		status = "ok";
 	};
 
-	qcom,chg-bat-if@1200 {
+	qcom,bat-if@1200 {
 		status = "ok";
 	};
 
-	qcom,chg-usb-chgpth@1300 {
+	qcom,usb-chgpth@1300 {
 		status = "ok";
 	};
 
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index b6a4315..44ba72f 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -414,7 +414,6 @@
 		/* 190,ee0_krait_hlos_spmi_periph_irq */
 		/* 187,channel_0_krait_hlos_trans_done_irq */
 		interrupts = <0 190 0>, <0 187 0>;
-		qcom,not-wakeup;
 		qcom,pmic-arb-ee = <0>;
 		qcom,pmic-arb-channel = <0>;
 	};
@@ -640,8 +639,9 @@
 		vdd_cx-supply = <&pm8110_s1_corner>;
 		qcom,firmware-name = "adsp";
 
-		/* GPIO input from lpass */
+		/* GPIO inputs from lpass */
 		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
 
 		/* GPIO output to lpass */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi
index df0db7e..b574a31 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi
@@ -111,7 +111,7 @@
 		reg = <0x6c 0x0>;
 		qcom,slave-id = <0x6c 0x300A 0x2720>;
 		qcom,csiphy-sd-index = <2>;
-		qcom,csid-sd-index = <0>;
+		qcom,csid-sd-index = <2>;
 		qcom,mount-angle = <90>;
 		qcom,sensor-name = "ov2720";
 		cam_vdig-supply = <&pm8941_l3>;
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi
index f58c1e2..748d5f7 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi
@@ -112,7 +112,7 @@
 		reg = <0x6c>;
 		qcom,slave-id = <0x6c 0x300A 0x2720>;
 		qcom,csiphy-sd-index = <2>;
-		qcom,csid-sd-index = <0>;
+		qcom,csid-sd-index = <2>;
 		qcom,mount-angle = <90>;
 		qcom,sensor-name = "ov2720";
 		cam_vdig-supply = <&pm8941_l3>;
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi
index 767a705..53f6e9e 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi
@@ -113,7 +113,7 @@
 		reg = <0x6c>;
 		qcom,slave-id = <0x6c 0x300A 0x2720>;
 		qcom,csiphy-sd-index = <2>;
-		qcom,csid-sd-index = <0>;
+		qcom,csid-sd-index = <2>;
 		qcom,mount-angle = <90>;
 		qcom,sensor-name = "ov2720";
 		cam_vdig-supply = <&pm8941_l3>;
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
index 3a78a15..94a28f7 100644
--- a/arch/arm/boot/dts/msm8974-camera.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -23,8 +23,9 @@
 	qcom,csiphy@fda0ac00 {
 		cell-index = <0>;
 		compatible = "qcom,csiphy";
-		reg = <0xfda0ac00 0x200>;
-		reg-names = "csiphy";
+		reg = <0xfda0ac00 0x200>,
+                      <0xfda00030 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
 		interrupts = <0 78 0>;
 		interrupt-names = "csiphy";
 	};
@@ -32,8 +33,9 @@
 	qcom,csiphy@fda0b000 {
 		cell-index = <1>;
 		compatible = "qcom,csiphy";
-		reg = <0xfda0b000 0x200>;
-		reg-names = "csiphy";
+		reg = <0xfda0b000 0x200>,
+                      <0xfda00038 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
 		interrupts = <0 79 0>;
 		interrupt-names = "csiphy";
 	};
@@ -41,8 +43,9 @@
 	qcom,csiphy@fda0b400 {
 		cell-index = <2>;
 		compatible = "qcom,csiphy";
-		reg = <0xfda0b400 0x200>;
-		reg-names = "csiphy";
+		reg = <0xfda0b400 0x200>,
+                      <0xfda00040 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
 		interrupts = <0 80 0>;
 		interrupt-names = "csiphy";
 	};
@@ -94,8 +97,9 @@
 	qcom,ispif@fda0A000 {
 		cell-index = <0>;
 		compatible = "qcom,ispif";
-		reg = <0xfda0A000 0x500>;
-		reg-names = "ispif";
+		reg = <0xfda0A000 0x500>,
+                      <0xfda00020 0x10>;
+		reg-names = "ispif", "csi_clk_mux";
 		interrupts = <0 55 0>;
 		interrupt-names = "ispif";
 	};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index a9685cc..c4645f3 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -834,8 +834,9 @@
 
 		qcom,firmware-name = "adsp";
 
-		/* GPIO input from lpass */
+		/* GPIO inputs from lpass */
 		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
 
 		/* GPIO output to lpass */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
@@ -1197,7 +1198,7 @@
                reg = <0xf9bff000 0x200>;
         };
 
-	qcom,qseecom@fe806000 {
+	qseecom: qcom,qseecom@7f00000 {
 		compatible = "qcom,qseecom";
 		reg = <0x7f00000 0x500000>;
 		reg-names = "secapp-region";
@@ -1369,8 +1370,8 @@
 		qcom,core-control-mask = <0xe>;
 		qcom,vdd-restriction-temp = <5>;
 		qcom,vdd-restriction-temp-hysteresis = <10>;
-		qcom,pmic-sw-mode-temp = <90>;
-		qcom,pmic-sw-mode-temp-hysteresis = <70>;
+		qcom,pmic-sw-mode-temp = <85>;
+		qcom,pmic-sw-mode-temp-hysteresis = <75>;
 		qcom,pmic-sw-mode-regs = "vdd_dig";
 		vdd_dig-supply = <&pm8841_s2_floor_corner>;
 		vdd_gfx-supply = <&pm8841_s4_floor_corner>;
@@ -1396,7 +1397,7 @@
 		qcom,rx-ring-size = <64>;
 	};
 
-        qcom,msm-mem-hole {
+        memory_hole: qcom,msm-mem-hole {
                 compatible = "qcom,msm-mem-hole";
                 qcom,memblock-remove = <0x7f00000 0x8000000>; /* Address and Size of Hole */
         };
diff --git a/arch/arm/configs/msmzinc_defconfig b/arch/arm/configs/apq8084_defconfig
similarity index 99%
rename from arch/arm/configs/msmzinc_defconfig
rename to arch/arm/configs/apq8084_defconfig
index d0ea87a..c47117e 100644
--- a/arch/arm/configs/msmzinc_defconfig
+++ b/arch/arm/configs/apq8084_defconfig
@@ -34,7 +34,7 @@
 CONFIG_EFI_PARTITION=y
 CONFIG_IOSCHED_TEST=y
 CONFIG_ARCH_MSM=y
-CONFIG_ARCH_MSMZINC=y
+CONFIG_ARCH_APQ8084=y
 CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y
 CONFIG_MSM_RPM_SMD=y
 # CONFIG_MSM_STACKED_MEMORY is not set
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index aa2c028..23244dc 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -239,6 +239,7 @@
 CONFIG_CMA=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
+CONFIG_TSPP=m
 CONFIG_HAPTIC_ISA1200=y
 CONFIG_QSEECOM=y
 CONFIG_QPNP_MISC=y
@@ -321,6 +322,7 @@
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_DEV=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_DVB_CORE=m
 # CONFIG_MSM_CAMERA is not set
 CONFIG_MT9M114=y
 CONFIG_OV2720=y
@@ -338,6 +340,8 @@
 CONFIG_MSMB_JPEG=y
 CONFIG_MSM_VIDC_V4L2=y
 CONFIG_MSM_WFD=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
 CONFIG_VIDEOBUF2_MSM_MEM=y
 CONFIG_USB_VIDEO_CLASS=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
@@ -387,6 +391,8 @@
 CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_STORAGE_ENE_UB6250=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_QCOM_DIAG_BRIDGE=y
+CONFIG_USB_QCOM_KS_BRIDGE=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_CI13XXX_MSM=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index c9f068a..df1837f 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -85,6 +85,7 @@
 CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
 CONFIG_MSM_UARTDM_Core_v14=y
 CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_XPU_ERR_FATAL=y
 CONFIG_STRICT_MEMORY_RWX=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -396,6 +397,8 @@
 CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_STORAGE_ENE_UB6250=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_QCOM_DIAG_BRIDGE=y
+CONFIG_USB_QCOM_KS_BRIDGE=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_CI13XXX_MSM=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index a6e6914..cc85d70 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -286,8 +286,8 @@
 	select MSM_RPM_LOG
 	select ARCH_WANT_KMAP_ATOMIC_FLUSH
 
-config ARCH_MSMZINC
-	bool "MSMZINC"
+config ARCH_APQ8084
+	bool "APQ8084"
 	select ARCH_MSM_KRAITMP
 	select GPIO_MSM_V3
 	select ARM_GIC
@@ -1086,7 +1086,7 @@
 	default "0x80200000" if ARCH_MSM8960
 	default "0x80200000" if ARCH_MSM8930
 	default "0x00000000" if ARCH_MSM8974
-	default "0x00000000" if ARCH_MSMZINC
+	default "0x00000000" if ARCH_APQ8084
 	default "0x00000000" if ARCH_MPQ8092
 	default "0x00000000" if ARCH_MSM8226
 	default "0x00000000" if ARCH_MSM8610
@@ -1239,13 +1239,13 @@
                   Say Y here if you want the debug print routines to direct
                   their output to the serial port on MPQ8092 devices.
 
-	config DEBUG_MSMZINC_UART
-                bool "Kernel low-level debugging messages via MSMZINC UART"
-                depends on ARCH_MSMZINC
+	config DEBUG_APQ8084_UART
+                bool "Kernel low-level debugging messages via APQ8084 UART"
+                depends on ARCH_APQ8084
                 select MSM_HAS_DEBUG_UART_HS_V14
                 help
                   Say Y here if you want the debug print routines to direct
-                  their output to the serial port on MSMZINC devices.
+                  their output to the serial port on APQ8084 devices.
 endchoice
 
 choice
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 7c78395..a45f5ec 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -120,7 +120,7 @@
 ifndef CONFIG_ARCH_MSM9625
 ifndef CONFIG_ARCH_MPQ8092
 ifndef CONFIG_ARCH_MSM8610
-ifndef CONFIG_ARCH_MSMZINC
+ifndef CONFIG_ARCH_APQ8084
 ifndef CONFIG_ARCH_MSMKRYPTON
 	obj-y += nand_partitions.o
 endif
@@ -297,7 +297,7 @@
 obj-$(CONFIG_MACH_MPQ8064_DTV) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_ARCH_MSM9615) += board-9615.o devices-9615.o board-9615-regulator.o board-9615-gpiomux.o board-9615-storage.o board-9615-display.o
 obj-$(CONFIG_ARCH_MSM9615) += clock-local.o clock-9615.o acpuclock-9615.o clock-rpm.o clock-pll.o
-obj-$(CONFIG_ARCH_MSMZINC) += board-zinc.o board-zinc-gpiomux.o
+obj-$(CONFIG_ARCH_APQ8084) += board-8084.o board-8084-gpiomux.o
 obj-$(CONFIG_ARCH_MSM8974) += board-8974.o board-8974-gpiomux.o
 obj-$(CONFIG_ARCH_MSM8974) += acpuclock-8974.o
 obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o clock-mdss-8974.o
@@ -375,7 +375,7 @@
 obj-$(CONFIG_ARCH_MPQ8092) += gpiomux-v2.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM8226) += gpiomux-v2.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM8610) += gpiomux-v2.o gpiomux.o
-obj-$(CONFIG_ARCH_MSMZINC) += gpiomux-v2.o gpiomux.o
+obj-$(CONFIG_ARCH_APQ8084) += gpiomux-v2.o gpiomux.o
 
 obj-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += idle_stats_device.o
 obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_mpdecision.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 8c366da..75191d6 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -58,9 +58,9 @@
         dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2-liquid.dtb
         dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2-mtp.dtb
 
-# MSMZINC
-   zreladdr-$(CONFIG_ARCH_MSMZINC)	:= 0x00008000
-        dtb-$(CONFIG_ARCH_MSMZINC)	+= msmzinc-sim.dtb
+# APQ8084
+   zreladdr-$(CONFIG_ARCH_APQ8084)	:= 0x00008000
+        dtb-$(CONFIG_ARCH_APQ8084)	+= apq8084-sim.dtb
 
 # MSMKRYPTON
    zreladdr-$(CONFIG_ARCH_MSMKRYPTON)	:= 0x00208000
diff --git a/arch/arm/mach-msm/board-zinc-gpiomux.c b/arch/arm/mach-msm/board-8084-gpiomux.c
similarity index 94%
rename from arch/arm/mach-msm/board-zinc-gpiomux.c
rename to arch/arm/mach-msm/board-8084-gpiomux.c
index ac4daa8..8d5bb49 100644
--- a/arch/arm/mach-msm/board-zinc-gpiomux.c
+++ b/arch/arm/mach-msm/board-8084-gpiomux.c
@@ -17,7 +17,7 @@
 #include <mach/board.h>
 #include <mach/gpiomux.h>
 
-void __init msmzinc_init_gpiomux(void)
+void __init apq8084_init_gpiomux(void)
 {
 	int rc;
 
diff --git a/arch/arm/mach-msm/board-zinc.c b/arch/arm/mach-msm/board-8084.c
similarity index 67%
rename from arch/arm/mach-msm/board-zinc.c
rename to arch/arm/mach-msm/board-8084.c
index 444444f..828ae9c 100644
--- a/arch/arm/mach-msm/board-zinc.c
+++ b/arch/arm/mach-msm/board-8084.c
@@ -32,7 +32,7 @@
 #include "devices.h"
 #include "platsmp.h"
 
-static struct memtype_reserve msmzinc_reserve_table[] __initdata = {
+static struct memtype_reserve apq8084_reserve_table[] __initdata = {
 	[MEMTYPE_SMI] = {
 	},
 	[MEMTYPE_EBI0] = {
@@ -43,27 +43,27 @@
 	},
 };
 
-static int msmzinc_paddr_to_memtype(phys_addr_t paddr)
+static int apq8084_paddr_to_memtype(phys_addr_t paddr)
 {
 	return MEMTYPE_EBI1;
 }
 
-static struct reserve_info msmzinc_reserve_info __initdata = {
-	.memtype_reserve_table = msmzinc_reserve_table,
-	.paddr_to_memtype = msmzinc_paddr_to_memtype,
+static struct reserve_info apq8084_reserve_info __initdata = {
+	.memtype_reserve_table = apq8084_reserve_table,
+	.paddr_to_memtype = apq8084_paddr_to_memtype,
 };
 
-void __init msmzinc_reserve(void)
+void __init apq8084_reserve(void)
 {
-	reserve_info = &msmzinc_reserve_info;
-	of_scan_flat_dt(dt_scan_for_memory_reserve, msmzinc_reserve_table);
+	reserve_info = &apq8084_reserve_info;
+	of_scan_flat_dt(dt_scan_for_memory_reserve, apq8084_reserve_table);
 	msm_reserve();
 }
 
-static void __init msmzinc_early_memory(void)
+static void __init apq8084_early_memory(void)
 {
-	reserve_info = &msmzinc_reserve_info;
-	of_scan_flat_dt(dt_scan_for_memory_hole, msmzinc_reserve_table);
+	reserve_info = &apq8084_reserve_info;
+	of_scan_flat_dt(dt_scan_for_memory_hole, apq8084_reserve_table);
 }
 
 static struct clk_lookup msm_clocks_dummy[] = {
@@ -82,46 +82,46 @@
  * into this category, and thus the driver should not be added here. The
  * EPROBE_DEFER can satisfy most dependency problems.
  */
-void __init msmzinc_add_drivers(void)
+void __init apq8084_add_drivers(void)
 {
 	msm_smd_init();
 	msm_clock_init(&msm_dummy_clock_init_data);
 }
 
-static void __init msmzinc_map_io(void)
+static void __init apq8084_map_io(void)
 {
-	msm_map_zinc_io();
+	msm_map_8084_io();
 }
 
-void __init msmzinc_init(void)
+void __init apq8084_init(void)
 {
 	if (socinfo_init() < 0)
 		pr_err("%s: socinfo_init() failed\n", __func__);
 
-	msmzinc_init_gpiomux();
+	apq8084_init_gpiomux();
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-	msmzinc_add_drivers();
+	apq8084_add_drivers();
 }
 
-void __init msmzinc_init_very_early(void)
+void __init apq8084_init_very_early(void)
 {
-	msmzinc_early_memory();
+	apq8084_early_memory();
 }
 
-static const char *msmzinc_dt_match[] __initconst = {
-	"qcom,msmzinc",
+static const char *apq8084_dt_match[] __initconst = {
+	"qcom,apq8084",
 	NULL
 };
 
-DT_MACHINE_START(MSMZINC_DT, "Qualcomm MSM ZINC (Flattened Device Tree)")
-	.map_io = msmzinc_map_io,
+DT_MACHINE_START(APQ8084_DT, "Qualcomm APQ 8084 (Flattened Device Tree)")
+	.map_io = apq8084_map_io,
 	.init_irq = msm_dt_init_irq,
-	.init_machine = msmzinc_init,
+	.init_machine = apq8084_init,
 	.handle_irq = gic_handle_irq,
 	.timer = &msm_dt_timer,
-	.dt_compat = msmzinc_dt_match,
-	.reserve = msmzinc_reserve,
-	.init_very_early = msmzinc_init_very_early,
+	.dt_compat = apq8084_dt_match,
+	.reserve = apq8084_reserve,
+	.init_very_early = apq8084_init_very_early,
 	.restart = msm_restart,
 	.smp = &msm8974_smp_ops,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index cfa1628..3eed219 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -174,6 +174,7 @@
 
 static const char *msm8974_dt_match[] __initconst = {
 	"qcom,msm8974",
+	"qcom,apq8074",
 	NULL
 };
 
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 18c922d..6d76c41 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -2697,7 +2697,6 @@
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
 		.dbg_name = "q6ss_xo_clk",
-		.parent = &xo.c,
 		.ops = &clk_ops_branch,
 		CLK_INIT(q6ss_xo_clk.c),
 	},
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index fae09d0..3cb3ea4 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -2245,7 +2245,6 @@
 	.bcr_reg = LPASS_Q6SS_BCR,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
-		.parent = &gcc_xo_clk_src.c,
 		.dbg_name = "q6ss_xo_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(q6ss_xo_clk.c),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 2de8799..707e6b6 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -4947,68 +4947,75 @@
 		"fda0b400.qcom,csiphy"),
 	CLK_LOOKUP("csiphy_timer_clk", camss_phy2_csi2phytimer_clk.c,
 		"fda0b400.qcom,csiphy"),
+
 	/* CSID clocks */
-	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
-		"fda08000.qcom,csid"),
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
-		"fda08000.qcom,csid"),
-	CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08000.qcom,csid"),
-	CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08000.qcom,csid"),
-	CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08000.qcom,csid"),
-	CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08000.qcom,csid"),
-	CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08000.qcom,csid"),
-	CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08000.qcom,csid"),
+					"fda08000.qcom,csid"),
+	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+					"fda08000.qcom,csid"),
+	CLK_LOOKUP("csi_ahb_clk", camss_csi0_ahb_clk.c,
+					"fda08000.qcom,csid"),
+	CLK_LOOKUP("csi_src_clk", csi0_clk_src.c,
+					"fda08000.qcom,csid"),
+	CLK_LOOKUP("csi_phy_clk", camss_csi0phy_clk.c,
+					"fda08000.qcom,csid"),
+	CLK_LOOKUP("csi_clk", camss_csi0_clk.c,
+					"fda08000.qcom,csid"),
+	CLK_LOOKUP("csi_pix_clk", camss_csi0pix_clk.c,
+					"fda08000.qcom,csid"),
+	CLK_LOOKUP("csi_rdi_clk", camss_csi0rdi_clk.c,
+					"fda08000.qcom,csid"),
 
-	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
-		"fda08400.qcom,csid"),
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
-		"fda08400.qcom,csid"),
-	CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi1_ahb_clk", camss_csi1_ahb_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi1_src_clk", csi1_clk_src.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi1_phy_clk", camss_csi1phy_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi1_clk", camss_csi1_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi1_pix_clk", camss_csi1pix_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08400.qcom,csid"),
-	CLK_LOOKUP("csi1_rdi_clk", camss_csi1rdi_clk.c, "fda08400.qcom,csid"),
+					"fda08400.qcom,csid"),
+	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+					"fda08400.qcom,csid"),
+	CLK_LOOKUP("csi_ahb_clk", camss_csi1_ahb_clk.c,
+					"fda08400.qcom,csid"),
+	CLK_LOOKUP("csi_src_clk", csi1_clk_src.c,
+					"fda08400.qcom,csid"),
+	CLK_LOOKUP("csi_phy_clk", camss_csi1phy_clk.c,
+					"fda08400.qcom,csid"),
+	CLK_LOOKUP("csi_clk", camss_csi1_clk.c,
+					"fda08400.qcom,csid"),
+	CLK_LOOKUP("csi_pix_clk", camss_csi1pix_clk.c,
+					"fda08400.qcom,csid"),
+	CLK_LOOKUP("csi_rdi_clk", camss_csi1rdi_clk.c,
+					"fda08400.qcom,csid"),
 
-	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
-		"fda08800.qcom,csid"),
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
-		"fda08800.qcom,csid"),
-	CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi2_ahb_clk", camss_csi2_ahb_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi2_src_clk", csi2_clk_src.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi2_phy_clk", camss_csi2phy_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi2_clk", camss_csi2_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi2_pix_clk", camss_csi2pix_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08800.qcom,csid"),
-	CLK_LOOKUP("csi2_rdi_clk", camss_csi2rdi_clk.c, "fda08800.qcom,csid"),
+					"fda08800.qcom,csid"),
+	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+					"fda08800.qcom,csid"),
+	CLK_LOOKUP("csi_ahb_clk", camss_csi2_ahb_clk.c,
+					"fda08800.qcom,csid"),
+	CLK_LOOKUP("csi_src_clk", csi2_clk_src.c,
+					"fda08800.qcom,csid"),
+	CLK_LOOKUP("csi_phy_clk", camss_csi2phy_clk.c,
+					"fda08800.qcom,csid"),
+	CLK_LOOKUP("csi_clk", camss_csi2_clk.c,
+					"fda08800.qcom,csid"),
+	CLK_LOOKUP("csi_pix_clk", camss_csi2pix_clk.c,
+					"fda08800.qcom,csid"),
+	CLK_LOOKUP("csi_rdi_clk", camss_csi2rdi_clk.c,
+					"fda08800.qcom,csid"),
 
-	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
-		"fda08c00.qcom,csid"),
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
-		"fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi3_ahb_clk", camss_csi3_ahb_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi3_src_clk", csi3_clk_src.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi3_phy_clk", camss_csi3phy_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi3_clk", camss_csi3_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi3_pix_clk", camss_csi3pix_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08c00.qcom,csid"),
-	CLK_LOOKUP("csi3_rdi_clk", camss_csi3rdi_clk.c, "fda08c00.qcom,csid"),
+					"fda08c00.qcom,csid"),
+	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+					"fda08c00.qcom,csid"),
+	CLK_LOOKUP("csi_ahb_clk", camss_csi3_ahb_clk.c,
+					"fda08c00.qcom,csid"),
+	CLK_LOOKUP("csi_src_clk", csi3_clk_src.c,
+					"fda08c00.qcom,csid"),
+	CLK_LOOKUP("csi_phy_clk", camss_csi3phy_clk.c,
+					"fda08c00.qcom,csid"),
+	CLK_LOOKUP("csi_clk", camss_csi3_clk.c,
+					"fda08c00.qcom,csid"),
+	CLK_LOOKUP("csi_pix_clk", camss_csi3pix_clk.c,
+					"fda08c00.qcom,csid"),
+	CLK_LOOKUP("csi_rdi_clk", camss_csi3rdi_clk.c,
+					"fda08c00.qcom,csid"),
 
 	/* ISPIF clocks */
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 72f5051..22f74c8 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -600,7 +600,7 @@
 void msm_map_msm7x30_io(void);
 void msm_map_fsm9xxx_io(void);
 void msm_map_8974_io(void);
-void msm_map_zinc_io(void);
+void msm_map_8084_io(void);
 void msm_map_msmkrypton_io(void);
 void msm_map_msm8625_io(void);
 void msm_map_msm9625_io(void);
@@ -610,7 +610,7 @@
 void msm_8974_reserve(void);
 void msm_8974_very_early(void);
 void msm_8974_init_gpiomux(void);
-void msmzinc_init_gpiomux(void);
+void apq8084_init_gpiomux(void);
 void msm9625_init_gpiomux(void);
 void msmkrypton_init_gpiomux(void);
 void msm_map_mpq8092_io(void);
@@ -653,7 +653,7 @@
 void msm_snddev_tx_route_config(void);
 void msm_snddev_tx_route_deconfig(void);
 
-extern unsigned int msm_shared_ram_phys; /* defined in arch/arm/mach-msm/io.c */
+extern phys_addr_t msm_shared_ram_phys; /* defined in arch/arm/mach-msm/io.c */
 
 
 #endif
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index f750dc8..23d204a 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -129,6 +129,7 @@
  * @iommu_power_off:    Turn off power to unit
  * @iommu_clk_on:       Turn on clks to unit
  * @iommu_clk_off:      Turn off clks to unit
+ * @iommu_lock_initialize: Initialize the remote lock
  * @iommu_lock_acquire: Acquire any locks needed
  * @iommu_lock_release: Release locks needed
  */
@@ -137,6 +138,7 @@
 	void (*iommu_power_off)(struct msm_iommu_drvdata *);
 	int (*iommu_clk_on)(struct msm_iommu_drvdata *);
 	void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+	void * (*iommu_lock_initialize)(void);
 	void (*iommu_lock_acquire)(void);
 	void (*iommu_lock_release)(void);
 };
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index cc03c48..90757b6 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -557,17 +557,6 @@
 int ipa_set_single_ndp_per_mbim(bool enable);
 
 /*
- * rmnet bridge
- */
-int rmnet_bridge_init(void);
-
-int rmnet_bridge_disconnect(void);
-
-int rmnet_bridge_connect(u32 producer_hdl,
-			 u32 consumer_hdl,
-			 int wwan_logical_channel_id);
-
-/*
  * SW bridge (between IPA and A2)
  */
 int ipa_bridge_setup(enum ipa_bridge_dir dir, enum ipa_bridge_type type,
@@ -917,26 +906,6 @@
 }
 
 /*
- * rmnet bridge
- */
-static inline int rmnet_bridge_init(void)
-{
-	return -EPERM;
-}
-
-static inline int rmnet_bridge_disconnect(void)
-{
-	return -EPERM;
-}
-
-static inline int rmnet_bridge_connect(u32 producer_hdl,
-			 u32 consumer_hdl,
-			 int wwan_logical_channel_id)
-{
-	return -EPERM;
-}
-
-/*
  * SW bridge (between IPA and A2)
  */
 static inline int ipa_bridge_setup(enum ipa_bridge_dir dir,
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
index b68aff8..349dbe7 100644
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ b/arch/arm/mach-msm/include/mach/kgsl.h
@@ -20,6 +20,7 @@
 #define KGSL_CLK_MEM	0x00000008
 #define KGSL_CLK_MEM_IFACE 0x00000010
 #define KGSL_CLK_AXI	0x00000020
+#define KGSL_CLK_ALT_MEM_IFACE 0x00000040
 
 #define KGSL_MAX_PWRLEVELS 10
 
@@ -50,9 +51,19 @@
 	enum kgsl_iommu_context_id ctx_id;
 };
 
+/*
+ * struct kgsl_device_iommu_data - Struct holding iommu context data obtained
+ * from dtsi file
+ * @iommu_ctxs:		Pointer to array of struct hoding context name and id
+ * @iommu_ctx_count:	Number of contexts defined in the dtsi file
+ * @iommu_halt_enable:	Indicated if smmu halt h/w feature is supported
+ * @physstart:		Start of iommu registers physical address
+ * @physend:		End of iommu registers physical address
+ */
 struct kgsl_device_iommu_data {
 	const struct kgsl_iommu_ctx *iommu_ctxs;
 	int iommu_ctx_count;
+	int iommu_halt_enable;
 	unsigned int physstart;
 	unsigned int physend;
 };
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h b/arch/arm/mach-msm/include/mach/msm_iomap-8084.h
similarity index 74%
rename from arch/arm/mach-msm/include/mach/msm_iomap-zinc.h
rename to arch/arm/mach-msm/include/mach/msm_iomap-8084.h
index 0a33055..43f1de0 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8084.h
@@ -11,8 +11,8 @@
  */
 
 
-#ifndef __ASM_ARCH_MSM_IOMAP_zinc_H
-#define __ASM_ARCH_MSM_IOMAP_zinc_H
+#ifndef __ASM_ARCH_MSM_IOMAP_8084_H
+#define __ASM_ARCH_MSM_IOMAP_8084_H
 
 /* Physical base address and size of peripherals.
  * Ordered by the virtual base addresses they will be mapped at.
@@ -23,15 +23,15 @@
  *
  */
 
-#define MSMZINC_SHARED_RAM_PHYS     0x0FA00000
+#define APQ8084_SHARED_RAM_PHYS     0x0FA00000
 
-#define MSMZINC_QGIC_DIST_PHYS	0xF9000000
-#define MSMZINC_QGIC_DIST_SIZE	SZ_4K
+#define APQ8084_QGIC_DIST_PHYS	0xF9000000
+#define APQ8084_QGIC_DIST_SIZE	SZ_4K
 
-#define MSMZINC_TLMM_PHYS	0xFD510000
-#define MSMZINC_TLMM_SIZE	SZ_16K
+#define APQ8084_TLMM_PHYS	0xFD510000
+#define APQ8084_TLMM_SIZE	SZ_16K
 
-#ifdef CONFIG_DEBUG_MSMZINC_UART
+#ifdef CONFIG_DEBUG_APQ8084_UART
 #define MSM_DEBUG_UART_BASE	IOMEM(0xFA71E000)
 #define MSM_DEBUG_UART_PHYS	0xF991E000
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index f27eb36..a90e78a 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -129,7 +129,7 @@
 #include "msm_iomap-8064.h"
 #include "msm_iomap-9615.h"
 #include "msm_iomap-8974.h"
-#include "msm_iomap-zinc.h"
+#include "msm_iomap-8084.h"
 #include "msm_iomap-9625.h"
 #include "msm_iomap-8092.h"
 #include "msm_iomap-8226.h"
diff --git a/arch/arm/mach-msm/include/mach/msm_smsm.h b/arch/arm/mach-msm/include/mach/msm_smsm.h
index d983ce5..81a6399 100644
--- a/arch/arm/mach-msm/include/mach/msm_smsm.h
+++ b/arch/arm/mach-msm/include/mach/msm_smsm.h
@@ -256,6 +256,18 @@
 int smsm_check_for_modem_crash(void);
 void *smem_find(unsigned id, unsigned size);
 void *smem_get_entry(unsigned id, unsigned *size);
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Virtual address returned by smem_alloc()/smem_alloc2()
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address);
+
 #else
 static inline void *smem_alloc(unsigned id, unsigned size)
 {
@@ -339,5 +351,9 @@
 {
 	return NULL;
 }
+static inline phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+	return NULL;
+}
 #endif
 #endif
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 7c9882e..d4ea4ac 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -46,8 +46,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
 #define early_machine_is_mpq8092()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mpq8092")
-#define early_machine_is_msmzinc()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzinc")
+#define early_machine_is_apq8084()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
 #define early_machine_is_msmkrypton()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmkrypton")
 #else
@@ -63,7 +63,7 @@
 
 #define early_machine_is_msm8610()	0
 #define early_machine_is_mpq8092()	0
-#define early_machine_is_msmzinc()	0
+#define early_machine_is_apq8084()	0
 #define early_machine_is_msmkrypton()	0
 #endif
 
@@ -102,7 +102,7 @@
 	MSM_CPU_8226,
 	MSM_CPU_8610,
 	MSM_CPU_8625Q,
-	MSM_CPU_ZINC,
+	MSM_CPU_8084,
 	MSM_CPU_KRYPTON,
 };
 
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index ecac4a5..37dbbab 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -45,7 +45,7 @@
 /* msm_shared_ram_phys default value of 0x00100000 is the most common value
  * and should work as-is for any target without stacked memory.
  */
-unsigned int msm_shared_ram_phys = 0x00100000;
+phys_addr_t msm_shared_ram_phys = 0x00100000;
 
 static void __init msm_map_io(struct map_desc *io_desc, int size)
 {
@@ -321,27 +321,27 @@
 }
 #endif /* CONFIG_ARCH_MSM8974 */
 
-#ifdef CONFIG_ARCH_MSMZINC
-static struct map_desc msm_zinc_io_desc[] __initdata = {
-	MSM_CHIP_DEVICE(QGIC_DIST, MSMZINC),
-	MSM_CHIP_DEVICE(TLMM, MSMZINC),
+#ifdef CONFIG_ARCH_APQ8084
+static struct map_desc msm_8084_io_desc[] __initdata = {
+	MSM_CHIP_DEVICE(QGIC_DIST, APQ8084),
+	MSM_CHIP_DEVICE(TLMM, APQ8084),
 	{
 		.virtual =  (unsigned long) MSM_SHARED_RAM_BASE,
 		.length =   MSM_SHARED_RAM_SIZE,
 		.type =     MT_DEVICE,
 	},
-#ifdef CONFIG_DEBUG_MSMZINC_UART
+#ifdef CONFIG_DEBUG_APQ8084_UART
 	MSM_DEVICE(DEBUG_UART),
 #endif
 };
 
-void __init msm_map_zinc_io(void)
+void __init msm_map_8084_io(void)
 {
-	msm_shared_ram_phys = MSMZINC_SHARED_RAM_PHYS;
-	msm_map_io(msm_zinc_io_desc, ARRAY_SIZE(msm_zinc_io_desc));
+	msm_shared_ram_phys = APQ8084_SHARED_RAM_PHYS;
+	msm_map_io(msm_8084_io_desc, ARRAY_SIZE(msm_8084_io_desc));
 	of_scan_flat_dt(msm_scan_dt_map_imem, NULL);
 }
-#endif /* CONFIG_ARCH_MSMZINC */
+#endif /* CONFIG_ARCH_APQ8084 */
 
 #ifdef CONFIG_ARCH_MSM7X30
 static struct map_desc msm7x30_io_desc[] __initdata = {
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 3c8348d..cd6693e 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -477,9 +477,11 @@
 #define M_MODE_ADDR(b, n) \
 		(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
 enum bimc_m_mode {
-	M_MODE_RMSK				= 0xf0000001,
+	M_MODE_RMSK				= 0xf0000011,
 	M_MODE_WR_GATHER_BEATS_BMSK		= 0xf0000000,
 	M_MODE_WR_GATHER_BEATS_SHFT		= 0x1c,
+	M_MODE_NARROW_WR_BMSK			= 0x10,
+	M_MODE_NARROW_WR_SHFT			= 0x4,
 	M_MODE_ORDERING_MODEL_BMSK		= 0x1,
 	M_MODE_ORDERING_MODEL_SHFT		= 0x0,
 };
@@ -1526,10 +1528,10 @@
 		reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(binfo->
 			base, mas_index)) & M_PRIOLVL_OVERRIDE_RMSK;
 		val =  qmode->fixed.prio_level <<
-			M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT;
+			M_PRIOLVL_OVERRIDE_SHFT;
 		writel_relaxed(((reg_val &
-			~(M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)) | (val
-			& M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)),
+			~(M_PRIOLVL_OVERRIDE_BMSK)) | (val
+			& M_PRIOLVL_OVERRIDE_BMSK)),
 			M_PRIOLVL_OVERRIDE_ADDR(binfo->base, mas_index));
 
 		reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(binfo->
diff --git a/arch/arm/mach-msm/msm_watchdog.c b/arch/arm/mach-msm/msm_watchdog.c
index b1c8b30..dcfe13c 100644
--- a/arch/arm/mach-msm/msm_watchdog.c
+++ b/arch/arm/mach-msm/msm_watchdog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
 #include <linux/suspend.h>
 #include <linux/percpu.h>
 #include <linux/interrupt.h>
+#include <linux/reboot.h>
 #include <asm/fiq.h>
 #include <asm/hardware/gic.h>
 #include <mach/msm_iomap.h>
@@ -64,6 +65,12 @@
 module_param(enable, int, 0);
 
 /*
+ * Watchdog bark reboot timeout in seconds.
+ * Can be specified in kernel command line.
+ */
+static int reboot_bark_timeout = 22;
+module_param(reboot_bark_timeout, int, 0644);
+/*
  * If the watchdog is enabled at bootup (enable=1),
  * the runtime_disable sysfs node at
  * /sys/module/msm_watchdog/runtime_disable
@@ -154,6 +161,27 @@
 	.notifier_call	= panic_wdog_handler,
 };
 
+#define get_sclk_hz(t_ms) ((t_ms / 1000) * WDT_HZ)
+#define get_reboot_bark_timeout(t_s) ((t_s * MSEC_PER_SEC) < bark_time ? \
+		get_sclk_hz(bark_time) : get_sclk_hz(t_s * MSEC_PER_SEC))
+
+static int msm_watchdog_reboot_notifier(struct notifier_block *this,
+		unsigned long code, void *unused)
+{
+
+	u64 timeout = get_reboot_bark_timeout(reboot_bark_timeout);
+	__raw_writel(timeout, msm_wdt_base + WDT_BARK_TIME);
+	__raw_writel(timeout + 3 * WDT_HZ,
+			msm_wdt_base + WDT_BITE_TIME);
+	__raw_writel(1, msm_wdt_base + WDT_RST);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_reboot_notifier = {
+	.notifier_call = msm_watchdog_reboot_notifier,
+};
+
 struct wdog_disable_work_data {
 	struct work_struct work;
 	struct completion complete;
@@ -177,6 +205,7 @@
 	}
 	enable = 0;
 	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_blk);
+	unregister_reboot_notifier(&msm_reboot_notifier);
 	cancel_delayed_work(&dogwork_struct);
 	/* may be suspended after the first write above */
 	__raw_writel(0, msm_wdt_base + WDT_EN);
@@ -373,6 +402,10 @@
 	atomic_notifier_chain_register(&panic_notifier_list,
 				       &panic_blk);
 
+	ret = register_reboot_notifier(&msm_reboot_notifier);
+	if (ret)
+		pr_err("Failed to register reboot notifier\n");
+
 	__raw_writel(1, msm_wdt_base + WDT_EN);
 	__raw_writel(1, msm_wdt_base + WDT_RST);
 	last_pet = sched_clock();
@@ -395,6 +428,11 @@
 	}
 
 	bark_time = pdata->bark_time;
+	/* reboot_bark_timeout (in seconds) might have been supplied as
+	 * module parameter.
+	 */
+	if ((reboot_bark_timeout * MSEC_PER_SEC) < bark_time)
+		reboot_bark_timeout = (bark_time / MSEC_PER_SEC);
 	has_vic = pdata->has_vic;
 	if (!pdata->has_secure) {
 		appsbark = 1;
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index 608c5e0..277addc 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -391,7 +391,7 @@
 	struct q6v5_data *q6;
 	struct pil_desc *desc;
 	struct resource *res;
-	int ret;
+	int ret, gpio_clk_ready;
 
 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 	if (!drv)
@@ -408,6 +408,12 @@
 		return ret;
 	drv->err_fatal_irq = ret;
 
+	ret = gpio_to_irq(of_get_named_gpio(pdev->dev.of_node,
+					    "qcom,gpio-proxy-unvote", 0));
+	if (ret < 0)
+		return ret;
+	gpio_clk_ready = ret;
+
 	drv->force_stop_gpio = of_get_named_gpio(pdev->dev.of_node,
 						"qcom,gpio-force-stop", 0);
 	if (drv->force_stop_gpio < 0)
@@ -421,6 +427,7 @@
 	desc = &q6->desc;
 	desc->owner = THIS_MODULE;
 	desc->proxy_timeout = PROXY_TIMEOUT_MS;
+	desc->proxy_unvote_irq = gpio_clk_ready;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
 	q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index a213112..3590e6b 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -34,7 +34,6 @@
 #include <linux/kfifo.h>
 #include <linux/wakelock.h>
 #include <linux/notifier.h>
-#include <linux/sort.h>
 #include <linux/suspend.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
@@ -47,6 +46,7 @@
 #include <mach/proc_comm.h>
 #include <mach/msm_ipc_logging.h>
 #include <mach/ramdump.h>
+#include <mach/board.h>
 
 #include <asm/cacheflush.h>
 
@@ -184,7 +184,7 @@
 static struct smem_area *smem_areas;
 static struct ramdump_segment *smem_ramdump_segments;
 static void *smem_ramdump_dev;
-static void *smem_range_check(phys_addr_t base, unsigned offset);
+static void *smem_phys_to_virt(phys_addr_t base, unsigned offset);
 static void *smd_dev;
 
 struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
@@ -244,6 +244,17 @@
 #define SMx_POWER_INFO(x...) do { } while (0)
 #endif
 
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * @type: type to check for overflow
+ * @a: left value to use
+ * @b: right value to use
+ * @returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+	(((type)~0 - (a)) < (b) ? true : false)
+
 static unsigned last_heap_free = 0xffffffff;
 
 static inline void smd_write_intr(unsigned int val,
@@ -2400,37 +2411,107 @@
 
 /* -------------------------------------------------------------------------- */
 
-/*
- * Shared Memory Range Check
- *
- * Takes a physical address and an offset and checks if the resulting physical
- * address would fit into one of the aux smem regions.  If so, returns the
- * corresponding virtual address.  Otherwise returns NULL.  Expects the array
- * of smem regions to be in ascending physical address order.
+/**
+ * smem_phys_to_virt() - Convert a physical base and offset to virtual address
  *
  * @base: physical base address to check
  * @offset: offset from the base to get the final address
+ * @returns: virtual SMEM address; NULL for failure
+ *
+ * Takes a physical address and an offset and checks if the resulting physical
+ * address would fit into one of the smem regions.  If so, returns the
+ * corresponding virtual address.  Otherwise returns NULL.
  */
-static void *smem_range_check(phys_addr_t base, unsigned offset)
+static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
 {
 	int i;
 	phys_addr_t phys_addr;
 	resource_size_t size;
 
+	if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
+		return NULL;
+
+	if (!smem_areas) {
+		/*
+		 * Early boot - no area configuration yet, so default
+		 * to using the main memory region.
+		 *
+		 * To remove the MSM_SHARED_RAM_BASE and the static
+		 * mapping of SMEM in the future, add dump_stack()
+		 * to identify the early callers of smem_get_entry()
+		 * (which calls this function) and replace those calls
+		 * with a new function that knows how to lookup the
+		 * SMEM base address before SMEM has been probed.
+		 */
+		phys_addr = msm_shared_ram_phys;
+		size = MSM_SHARED_RAM_SIZE;
+
+		if (base >= phys_addr && base + offset < phys_addr + size) {
+			if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)MSM_SHARED_RAM_BASE, offset)) {
+				pr_err("%s: overflow %p %x\n", __func__,
+					MSM_SHARED_RAM_BASE, offset);
+				return NULL;
+			}
+
+			return MSM_SHARED_RAM_BASE + offset;
+		} else {
+			return NULL;
+		}
+	}
 	for (i = 0; i < num_smem_areas; ++i) {
 		phys_addr = smem_areas[i].phys_addr;
 		size = smem_areas[i].size;
-		if (base < phys_addr)
-			return NULL;
-		if (base > phys_addr + size)
+
+		if (base < phys_addr || base + offset >= phys_addr + size)
 			continue;
-		if (base >= phys_addr && base + offset < phys_addr + size)
-			return smem_areas[i].virt_addr + offset;
+
+		if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_areas[i].virt_addr, offset)) {
+			pr_err("%s: overflow %p %x\n", __func__,
+				smem_areas[i].virt_addr, offset);
+			return NULL;
+		}
+
+		return smem_areas[i].virt_addr + offset;
 	}
 
 	return NULL;
 }
 
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+	phys_addr_t phys_addr = 0;
+	int i;
+	void *vend;
+
+	if (!smem_areas)
+		return phys_addr;
+
+	for (i = 0; i < num_smem_areas; ++i) {
+		vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
+
+		if (smem_address >= smem_areas[i].virt_addr &&
+				smem_address < vend) {
+			phys_addr = smem_address - smem_areas[i].virt_addr;
+			phys_addr +=  smem_areas[i].phys_addr;
+			break;
+		}
+	}
+
+	return phys_addr;
+}
+EXPORT_SYMBOL(smem_virt_to_phys);
+
 /* smem_alloc returns the pointer to smem item if it is already allocated.
  * Otherwise, it returns NULL.
  */
@@ -2504,14 +2585,15 @@
 		remote_spin_lock_irqsave(&remote_spinlock, flags);
 	/* toc is in device memory and cannot be speculatively accessed */
 	if (toc[id].allocated) {
+		phys_addr_t phys_base;
+
 		*size = toc[id].size;
 		barrier();
-		if (!(toc[id].reserved & BASE_ADDR_MASK))
-			ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
-		else
-			ret = smem_range_check(
-				toc[id].reserved & BASE_ADDR_MASK,
-				toc[id].offset);
+
+		phys_base = toc[id].reserved & BASE_ADDR_MASK;
+		if (!phys_base)
+			phys_base = (phys_addr_t)msm_shared_ram_phys;
+		ret = smem_phys_to_virt(phys_base, toc[id].offset);
 	} else {
 		*size = 0;
 	}
@@ -3464,14 +3546,6 @@
 	return ret;
 }
 
-int sort_cmp_func(const void *a, const void *b)
-{
-	struct smem_area *left = (struct smem_area *)(a);
-	struct smem_area *right = (struct smem_area *)(b);
-
-	return left->phys_addr - right->phys_addr;
-}
-
 int smd_core_platform_init(struct platform_device *pdev)
 {
 	int i;
@@ -3482,7 +3556,8 @@
 	struct smd_subsystem_config *cfg;
 	int err_ret = 0;
 	struct smd_smem_regions *smd_smem_areas;
-	int smem_idx = 0;
+	struct smem_area *smem_areas_tmp = NULL;
+	int smem_idx;
 
 	smd_platform_data = pdev->dev.platform_data;
 	num_ss = smd_platform_data->num_ss_configs;
@@ -3493,37 +3568,54 @@
 			   smd_ssr_config->disable_smsm_reset_handshake;
 
 	smd_smem_areas = smd_platform_data->smd_smem_areas;
-	if (smd_smem_areas) {
-		num_smem_areas = smd_platform_data->num_smem_areas;
-		smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
-						GFP_KERNEL);
-		if (!smem_areas) {
-			pr_err("%s: smem_areas kmalloc failed\n", __func__);
+	num_smem_areas = smd_platform_data->num_smem_areas + 1;
+
+	/* Initialize main SMEM region */
+	smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+				GFP_KERNEL);
+	if (!smem_areas_tmp) {
+		pr_err("%s: smem_areas kmalloc failed\n", __func__);
+		err_ret = -ENOMEM;
+		goto smem_areas_alloc_fail;
+	}
+
+	smem_areas_tmp[0].phys_addr =  msm_shared_ram_phys;
+	smem_areas_tmp[0].size = MSM_SHARED_RAM_SIZE;
+	smem_areas_tmp[0].virt_addr = MSM_SHARED_RAM_BASE;
+
+	/* Configure auxiliary SMEM regions */
+	for (smem_idx = 1; smem_idx < num_smem_areas; ++smem_idx) {
+		smem_areas_tmp[smem_idx].phys_addr =
+				smd_smem_areas[smem_idx].phys_addr;
+		smem_areas_tmp[smem_idx].size =
+				smd_smem_areas[smem_idx].size;
+		smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+			(unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+			smem_areas_tmp[smem_idx].size);
+		if (!smem_areas_tmp[smem_idx].virt_addr) {
+			pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
+				__func__,
+				&smem_areas_tmp[smem_idx].phys_addr,
+				&smem_areas_tmp[smem_idx].size);
 			err_ret = -ENOMEM;
-			goto smem_areas_alloc_fail;
+			goto smem_failed;
 		}
 
-		for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
-			smem_areas[smem_idx].phys_addr =
-					smd_smem_areas[smem_idx].phys_addr;
-			smem_areas[smem_idx].size =
-					smd_smem_areas[smem_idx].size;
-			smem_areas[smem_idx].virt_addr = ioremap_nocache(
-				(unsigned long)(smem_areas[smem_idx].phys_addr),
-				smem_areas[smem_idx].size);
-			if (!smem_areas[smem_idx].virt_addr) {
-				pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
-					__func__,
-					&smem_areas[smem_idx].phys_addr,
-					&smem_areas[smem_idx].size);
-				err_ret = -ENOMEM;
-				++smem_idx;
-				goto smem_failed;
-			}
+		if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+				smem_areas_tmp[smem_idx].size)) {
+			pr_err("%s: invalid virtual address block %i: %p:%pa\n",
+					__func__, smem_idx,
+					smem_areas_tmp[smem_idx].virt_addr,
+					&smem_areas_tmp[smem_idx].size);
+			++smem_idx;
+			err_ret = -EINVAL;
+			goto smem_failed;
 		}
-		sort(smem_areas, num_smem_areas,
-				sizeof(struct smem_area),
-				sort_cmp_func, NULL);
+
+		SMD_DBG("%s: %d = %pa %pa", __func__, smem_idx,
+				&smd_smem_areas[smem_idx].phys_addr,
+				&smd_smem_areas[smem_idx].size);
 	}
 
 	for (i = 0; i < num_ss; i++) {
@@ -3567,8 +3659,9 @@
 				cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
 	}
 
-
 	SMD_INFO("smd_core_platform_init() done\n");
+
+	smem_areas = smem_areas_tmp;
 	return 0;
 
 intr_failed:
@@ -3586,9 +3679,12 @@
 				);
 	}
 smem_failed:
-	for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
-		iounmap(smem_areas[smem_idx].virt_addr);
-	kfree(smem_areas);
+	for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+		iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
+	num_smem_areas = 0;
+	kfree(smem_areas_tmp);
+
 smem_areas_alloc_fail:
 	return err_ret;
 }
@@ -3762,12 +3858,14 @@
 	resource_size_t aux_mem_size;
 	int temp_string_size = 11; /* max 3 digit count */
 	char temp_string[temp_string_size];
-	int count;
 	struct device_node *node;
 	int ret;
 	const char *compatible;
-	struct ramdump_segment *ramdump_segments_tmp;
+	struct ramdump_segment *ramdump_segments_tmp = NULL;
+	struct smem_area *smem_areas_tmp = NULL;
+	int smem_idx = 0;
 	int subnode_num = 0;
+	int i;
 	resource_size_t irq_out_size;
 
 	disable_smsm_reset_handshake = 1;
@@ -3787,97 +3885,105 @@
 	}
 	SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
 
-	count = 1;
+	num_smem_areas = 1;
 	while (1) {
-		scnprintf(temp_string, temp_string_size, "aux-mem%d", count);
+		scnprintf(temp_string, temp_string_size, "aux-mem%d",
+				num_smem_areas);
 		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 								temp_string);
 		if (!r)
 			break;
 
 		++num_smem_areas;
-		++count;
-		if (count > 999) {
+		if (num_smem_areas > 999) {
 			pr_err("%s: max num aux mem regions reached\n",
 								__func__);
 			break;
 		}
 	}
 
-	/* initialize SSR ramdump regions */
+	/* Initialize main SMEM region and SSR ramdump region */
 	key = "smem";
 	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
 	if (!r) {
 		pr_err("%s: missing '%s'\n", __func__, key);
 		return -ENODEV;
 	}
-	ramdump_segments_tmp = kmalloc_array(num_smem_areas + 1,
-			sizeof(struct ramdump_segment), GFP_KERNEL);
 
+	smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+				GFP_KERNEL);
+	if (!smem_areas_tmp) {
+		pr_err("%s: smem areas kmalloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto free_smem_areas;
+	}
+
+	ramdump_segments_tmp = kmalloc_array(num_smem_areas,
+			sizeof(struct ramdump_segment), GFP_KERNEL);
 	if (!ramdump_segments_tmp) {
 		pr_err("%s: ramdump segment kmalloc failed\n", __func__);
 		ret = -ENOMEM;
 		goto free_smem_areas;
 	}
-	ramdump_segments_tmp[0].address = r->start;
-	ramdump_segments_tmp[0].size = resource_size(r);
 
-	if (num_smem_areas) {
+	smem_areas_tmp[smem_idx].phys_addr =  r->start;
+	smem_areas_tmp[smem_idx].size = resource_size(r);
+	smem_areas_tmp[smem_idx].virt_addr = MSM_SHARED_RAM_BASE;
 
-		smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
-					GFP_KERNEL);
+	ramdump_segments_tmp[smem_idx].address = r->start;
+	ramdump_segments_tmp[smem_idx].size = resource_size(r);
+	++smem_idx;
 
-		if (!smem_areas) {
-			pr_err("%s: smem areas kmalloc failed\n", __func__);
+	/* Configure auxiliary SMEM regions */
+	while (1) {
+		scnprintf(temp_string, temp_string_size, "aux-mem%d",
+								smem_idx);
+		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							temp_string);
+		if (!r)
+			break;
+		aux_mem_base = r->start;
+		aux_mem_size = resource_size(r);
+
+		ramdump_segments_tmp[smem_idx].address = aux_mem_base;
+		ramdump_segments_tmp[smem_idx].size = aux_mem_size;
+
+		smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
+		smem_areas_tmp[smem_idx].size = aux_mem_size;
+		smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+			(unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+			smem_areas_tmp[smem_idx].size);
+		SMD_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
+				&aux_mem_base, &aux_mem_size,
+				smem_areas_tmp[smem_idx].virt_addr);
+
+		if (!smem_areas_tmp[smem_idx].virt_addr) {
+			pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+				__func__,
+				&smem_areas_tmp[smem_idx].phys_addr,
+				&smem_areas_tmp[smem_idx].size);
 			ret = -ENOMEM;
 			goto free_smem_areas;
 		}
-		count = 1;
-		while (1) {
-			scnprintf(temp_string, temp_string_size, "aux-mem%d",
-									count);
-			r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-								temp_string);
-			if (!r)
-				break;
-			aux_mem_base = r->start;
-			aux_mem_size = resource_size(r);
 
-			/*
-			 * Add to ram-dumps segments.
-			 * ramdump_segments_tmp[0] is the main SMEM region,
-			 * so auxiliary segments are indexed by count
-			 * instead of count - 1.
-			 */
-			ramdump_segments_tmp[count].address = aux_mem_base;
-			ramdump_segments_tmp[count].size = aux_mem_size;
-
-			SMD_DBG("%s: %s = %pa %pa", __func__, temp_string,
-					&aux_mem_base, &aux_mem_size);
-			smem_areas[count - 1].phys_addr = aux_mem_base;
-			smem_areas[count - 1].size = aux_mem_size;
-			smem_areas[count - 1].virt_addr = ioremap_nocache(
-				(unsigned long)(smem_areas[count-1].phys_addr),
-				smem_areas[count - 1].size);
-			if (!smem_areas[count - 1].virt_addr) {
-				pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
-					__func__,
-					&smem_areas[count - 1].phys_addr,
-					&smem_areas[count - 1].size);
-				ret = -ENOMEM;
-				goto free_smem_areas;
-			}
-
-			++count;
-			if (count > 999) {
-				pr_err("%s: max num aux mem regions reached\n",
-								__func__);
-				break;
-			}
+		if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+				smem_areas_tmp[smem_idx].size)) {
+			pr_err("%s: invalid virtual address block %i: %p:%pa\n",
+					__func__, smem_idx,
+					smem_areas_tmp[smem_idx].virt_addr,
+					&smem_areas_tmp[smem_idx].size);
+			++smem_idx;
+			ret = -EINVAL;
+			goto free_smem_areas;
 		}
-		sort(smem_areas, num_smem_areas,
-				sizeof(struct smem_area),
-				sort_cmp_func, NULL);
+
+		++smem_idx;
+		if (smem_idx > 999) {
+			pr_err("%s: max num aux mem regions reached\n",
+							__func__);
+			break;
+		}
 	}
 
 	for_each_child_of_node(pdev->dev.of_node, node) {
@@ -3905,15 +4011,16 @@
 		++subnode_num;
 	}
 
+	smem_areas = smem_areas_tmp;
 	smem_ramdump_segments = ramdump_segments_tmp;
 	return 0;
 
 rollback_subnodes:
-	count = 0;
+	i = 0;
 	for_each_child_of_node(pdev->dev.of_node, node) {
-		if (count >= subnode_num)
+		if (i >= subnode_num)
 			break;
-		++count;
+		++i;
 		compatible = of_get_property(node, "compatible", NULL);
 		if (!strcmp(compatible, "qcom,smd"))
 			unparse_smd_devicetree(node);
@@ -3921,10 +4028,12 @@
 			unparse_smsm_devicetree(node);
 	}
 free_smem_areas:
+	for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+		iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
 	num_smem_areas = 0;
 	kfree(ramdump_segments_tmp);
-	kfree(smem_areas);
-	smem_areas = NULL;
+	kfree(smem_areas_tmp);
 	return ret;
 }
 
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 83f7a1d..12a3ceb 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -328,7 +328,12 @@
 
 	/* 8610 IDs */
 	[147] = MSM_CPU_8610,
+	[161] = MSM_CPU_8610,
+	[162] = MSM_CPU_8610,
+	[163] = MSM_CPU_8610,
+	[164] = MSM_CPU_8610,
 	[165] = MSM_CPU_8610,
+	[166] = MSM_CPU_8610,
 
 	/* 8064AB IDs */
 	[153] = MSM_CPU_8064AB,
@@ -348,8 +353,8 @@
 	/* 8064AA IDs */
 	[172] = MSM_CPU_8064AA,
 
-	/* zinc IDs */
-	[178] = MSM_CPU_ZINC,
+	/* 8084 IDs */
+	[178] = MSM_CPU_8084,
 
 	/* krypton IDs */
 	[187] = MSM_CPU_KRYPTON,
@@ -853,9 +858,9 @@
 		dummy_socinfo.id = 146;
 		strlcpy(dummy_socinfo.build_id, "mpq8092 - ",
 		sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_msmzinc()) {
+	} else if (early_machine_is_apq8084()) {
 		dummy_socinfo.id = 178;
-		strlcpy(dummy_socinfo.build_id, "msmzinc - ",
+		strlcpy(dummy_socinfo.build_id, "apq8084 - ",
 			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_msmkrypton()) {
 		dummy_socinfo.id = 187;
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index d7c69db..13c9080 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -23,38 +23,227 @@
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/delay.h>
+#include <linux/bluetooth-power.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
 
-static struct of_device_id ar3002_match_table[] = {
+#define BT_PWR_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_PWR_INFO(fmt, arg...) pr_info("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_PWR_ERR(fmt, arg...)  pr_err("%s: " fmt "\n" , __func__ , ## arg)
+
+
+static struct of_device_id bt_power_match_table[] = {
 	{	.compatible = "qca,ar3002" },
 	{}
 };
 
-static int bt_reset_gpio;
-
+static struct bluetooth_power_platform_data *bt_power_pdata;
+static struct platform_device *btpdev;
 static bool previous;
 
-static int bluetooth_power(int on)
+static int bt_vreg_init(struct bt_power_vreg_data *vreg)
 {
-	int rc;
+	int rc = 0;
+	struct device *dev = &btpdev->dev;
 
-	pr_debug("%s  bt_gpio= %d\n", __func__, bt_reset_gpio);
+	BT_PWR_DBG("vreg_get for : %s", vreg->name);
+
+	/* Get the regulator handle */
+	vreg->reg = regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		rc = PTR_ERR(vreg->reg);
+		pr_err("%s: regulator_get(%s) failed. rc=%d\n",
+			__func__, vreg->name, rc);
+		goto out;
+	}
+
+	if ((regulator_count_voltages(vreg->reg) > 0)
+			&& (vreg->low_vol_level) && (vreg->high_vol_level))
+		vreg->set_voltage_sup = 1;
+
+out:
+	return rc;
+}
+
+static int bt_vreg_enable(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("vreg_en for : %s", vreg->name);
+
+	if (!vreg->is_enabled) {
+		if (vreg->set_voltage_sup) {
+			rc = regulator_set_voltage(vreg->reg,
+						vreg->low_vol_level,
+						vreg->high_vol_level);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+			}
+		}
+
+		rc = regulator_enable(vreg->reg);
+		if (rc < 0) {
+			BT_PWR_ERR("regulator_enable(%s) failed. rc=%d\n",
+					vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = true;
+	}
+out:
+	return rc;
+}
+
+static int bt_vreg_disable(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	if (!vreg)
+		return rc;
+
+	BT_PWR_DBG("vreg_disable for : %s", vreg->name);
+
+	if (vreg->is_enabled) {
+		rc = regulator_disable(vreg->reg);
+		if (rc < 0) {
+			BT_PWR_ERR("regulator_disable(%s) failed. rc=%d\n",
+					vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = false;
+
+		if (vreg->set_voltage_sup) {
+			/* Set the min voltage to 0 */
+			rc = regulator_set_voltage(vreg->reg,
+						0,
+						vreg->high_vol_level);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+
+			}
+		}
+	}
+out:
+	return rc;
+}
+
+static int bt_configure_vreg(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("config %s", vreg->name);
+
+	/* Get the regulator handle for vreg */
+	if (!(vreg->reg)) {
+		rc = bt_vreg_init(vreg);
+		if (rc < 0)
+			return rc;
+	}
+	rc = bt_vreg_enable(vreg);
+
+	return rc;
+}
+
+static int bt_configure_gpios(int on)
+{
+	int rc = 0;
+	int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst;
+
+	BT_PWR_DBG("%s  bt_gpio= %d on: %d", __func__, bt_reset_gpio, on);
+
 	if (on) {
+		rc = gpio_request(bt_reset_gpio, "bt_sys_rst_n");
+		if (rc) {
+			BT_PWR_ERR("unable to request gpio %d (%d)\n",
+					bt_reset_gpio, rc);
+			return rc;
+		}
+
+		rc = gpio_direction_output(bt_reset_gpio, 0);
+		if (rc) {
+			BT_PWR_ERR("Unable to set direction\n");
+			return rc;
+		}
+
 		rc = gpio_direction_output(bt_reset_gpio, 1);
 		if (rc) {
-			pr_err("%s: Unable to set direction\n", __func__);
+			BT_PWR_ERR("Unable to set direction\n");
 			return rc;
 		}
 		msleep(100);
 	} else {
 		gpio_set_value(bt_reset_gpio, 0);
+
 		rc = gpio_direction_input(bt_reset_gpio);
-		if (rc) {
-			pr_err("%s: Unable to set direction\n", __func__);
-			return rc;
-		}
+		if (rc)
+			BT_PWR_ERR("Unable to set direction\n");
+
 		msleep(100);
 	}
-	return 0;
+	return rc;
+}
+
+static int bluetooth_power(int on)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("on: %d", on);
+
+	if (on) {
+		if (bt_power_pdata->bt_vdd_io) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_io);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddio config failed");
+				goto out;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_ldo) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_ldo);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddldo config failed");
+				goto vdd_ldo_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_pa) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_pa);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddpa config failed");
+				goto vdd_pa_fail;
+			}
+		}
+		if (bt_power_pdata->bt_chip_pwd) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_chip_pwd);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddldo config failed");
+				goto chip_pwd_fail;
+			}
+		}
+		if (bt_power_pdata->bt_gpio_sys_rst) {
+			rc = bt_configure_gpios(on);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power gpio config failed");
+				goto gpio_fail;
+			}
+		}
+	} else {
+		bt_configure_gpios(on);
+gpio_fail:
+		if (bt_power_pdata->bt_gpio_sys_rst)
+			gpio_free(bt_power_pdata->bt_gpio_sys_rst);
+		bt_vreg_disable(bt_power_pdata->bt_chip_pwd);
+chip_pwd_fail:
+		bt_vreg_disable(bt_power_pdata->bt_vdd_pa);
+vdd_pa_fail:
+		bt_vreg_disable(bt_power_pdata->bt_vdd_ldo);
+vdd_ldo_fail:
+		bt_vreg_disable(bt_power_pdata->bt_vdd_io);
+	}
+
+out:
+	return rc;
 }
 
 static int bluetooth_toggle_radio(void *data, bool blocked)
@@ -62,7 +251,9 @@
 	int ret = 0;
 	int (*power_control)(int enable);
 
-	power_control = data;
+	power_control =
+		((struct bluetooth_power_platform_data *)data)->bt_power_setup;
+
 	if (previous != blocked)
 		ret = (*power_control)(!blocked);
 	if (!ret)
@@ -117,47 +308,146 @@
 	platform_set_drvdata(pdev, NULL);
 }
 
+#define MAX_PROP_SIZE 32
+static int bt_dt_parse_vreg_info(struct device *dev,
+		struct bt_power_vreg_data **vreg_data, const char *vreg_name)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	struct bt_power_vreg_data *vreg;
+	struct device_node *np = dev->of_node;
+
+	BT_PWR_DBG("vreg dev tree parse for %s", vreg_name);
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+	if (of_parse_phandle(np, prop_name, 0)) {
+		vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+		if (!vreg) {
+			dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		vreg->name = vreg_name;
+
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"qcom,%s-voltage-level", vreg_name);
+		prop = of_get_property(np, prop_name, &len);
+		if (!prop || (len != (2 * sizeof(__be32)))) {
+			dev_warn(dev, "%s %s property\n",
+				prop ? "invalid format" : "no", prop_name);
+		} else {
+			vreg->low_vol_level = be32_to_cpup(&prop[0]);
+			vreg->high_vol_level = be32_to_cpup(&prop[1]);
+		}
+
+		*vreg_data = vreg;
+		BT_PWR_DBG("%s: vol=[%d %d]uV\n",
+			vreg->name, vreg->low_vol_level,
+			vreg->high_vol_level);
+	} else
+		BT_PWR_INFO("%s: is not provided in device tree", vreg_name);
+
+err:
+	return ret;
+}
+
+static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
+{
+	int rc;
+
+	BT_PWR_DBG("");
+
+	if (!bt_power_pdata)
+		return -ENOMEM;
+
+	if (pdev->dev.of_node) {
+		bt_power_pdata->bt_gpio_sys_rst =
+			of_get_named_gpio(pdev->dev.of_node,
+						"qca,bt-reset-gpio", 0);
+		if (bt_power_pdata->bt_gpio_sys_rst < 0) {
+			BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+			return bt_power_pdata->bt_gpio_sys_rst;
+		}
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_io,
+					"qca,bt-vdd-io");
+		if (rc < 0)
+			return rc;
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_pa,
+					"qca,bt-vdd-pa");
+		if (rc < 0)
+			return rc;
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_ldo,
+					"qca,bt-vdd-ldo");
+		if (rc < 0)
+			return rc;
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_chip_pwd,
+					"qca,bt-chip-pwd");
+		if (rc < 0)
+			return rc;
+
+	}
+
+	bt_power_pdata->bt_power_setup = bluetooth_power;
+
+	return 0;
+}
+
 static int __devinit bt_power_probe(struct platform_device *pdev)
 {
 	int ret = 0;
 
 	dev_dbg(&pdev->dev, "%s\n", __func__);
 
-	if (!pdev->dev.platform_data) {
-		/* Update the platform data if the
-		device node exists as part of device tree.*/
-		if (pdev->dev.of_node) {
-			pdev->dev.platform_data = bluetooth_power;
-		} else {
-			dev_err(&pdev->dev, "device node not set\n");
-			return -ENOSYS;
-		}
+	bt_power_pdata =
+		kzalloc(sizeof(struct bluetooth_power_platform_data),
+			GFP_KERNEL);
+
+	if (!bt_power_pdata) {
+		BT_PWR_ERR("Failed to allocate memory");
+		return -ENOMEM;
 	}
+
 	if (pdev->dev.of_node) {
-		bt_reset_gpio = of_get_named_gpio(pdev->dev.of_node,
-							"qca,bt-reset-gpio", 0);
-		if (bt_reset_gpio < 0) {
-			pr_err("bt-reset-gpio not available");
-			return bt_reset_gpio;
+		ret = bt_power_populate_dt_pinfo(pdev);
+		if (ret < 0) {
+			BT_PWR_ERR("Failed to populate device tree info");
+			goto free_pdata;
 		}
+		pdev->dev.platform_data = bt_power_pdata;
+	} else if (pdev->dev.platform_data) {
+		/* Optional data set to default if not provided */
+		if (!((struct bluetooth_power_platform_data *)
+			(pdev->dev.platform_data))->bt_power_setup)
+			((struct bluetooth_power_platform_data *)
+				(pdev->dev.platform_data))->bt_power_setup =
+						bluetooth_power;
+
+		memcpy(bt_power_pdata, pdev->dev.platform_data,
+			sizeof(struct bluetooth_power_platform_data));
+	} else {
+		BT_PWR_ERR("Failed to get platform data");
+		goto free_pdata;
 	}
 
-	ret = gpio_request(bt_reset_gpio, "bt sys_rst_n");
-	if (ret) {
-		pr_err("%s: unable to request gpio %d (%d)\n",
-			__func__, bt_reset_gpio, ret);
-		return ret;
-	}
+	if (bluetooth_power_rfkill_probe(pdev) < 0)
+		goto free_pdata;
 
-	/* When booting up, de-assert BT reset pin */
-	ret = gpio_direction_output(bt_reset_gpio, 0);
-	if (ret) {
-		pr_err("%s: Unable to set direction\n", __func__);
-		return ret;
-	}
+	btpdev = pdev;
 
-	ret = bluetooth_power_rfkill_probe(pdev);
+	return 0;
 
+free_pdata:
+	kfree(bt_power_pdata);
 	return ret;
 }
 
@@ -167,6 +457,11 @@
 
 	bluetooth_power_rfkill_remove(pdev);
 
+	if (bt_power_pdata->bt_chip_pwd->reg)
+		regulator_put(bt_power_pdata->bt_chip_pwd->reg);
+
+	kfree(bt_power_pdata);
+
 	return 0;
 }
 
@@ -176,7 +471,7 @@
 	.driver = {
 		.name = "bt_power",
 		.owner = THIS_MODULE,
-		.of_match_table = ar3002_match_table,
+		.of_match_table = bt_power_match_table,
 	},
 };
 
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 6d28042..682d876 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1090,6 +1090,12 @@
 		diag_smd_destructor(&driver->smd_dci[i]);
 
 	platform_driver_unregister(&msm_diag_dci_driver);
+
+	if (driver->dci_client_tbl) {
+		for (i = 0; i < MAX_DCI_CLIENTS; i++)
+			kfree(driver->dci_client_tbl[i].dci_data);
+	}
+
 	kfree(driver->req_tracking_tbl);
 	kfree(driver->dci_client_tbl);
 	kfree(driver->apps_dci_buf);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 6f37608..684f11d 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -37,6 +37,7 @@
 #define HDLC_OUT_BUF_SIZE	8192
 #define POOL_TYPE_COPY		1
 #define POOL_TYPE_HDLC		2
+#define POOL_TYPE_USER		3
 #define POOL_TYPE_WRITE_STRUCT	4
 #define POOL_TYPE_HSIC		5
 #define POOL_TYPE_HSIC_2	6
@@ -55,7 +56,7 @@
 #define MSG_MASK_SIZE 10000
 #define LOG_MASK_SIZE 8000
 #define EVENT_MASK_SIZE 1000
-#define USER_SPACE_DATA 8000
+#define USER_SPACE_DATA 8192
 #define PKT_SIZE 4096
 #define MAX_EQUIP_ID 15
 #define DIAG_CTRL_MSG_LOG_MASK	9
@@ -234,16 +235,20 @@
 	unsigned int poolsize;
 	unsigned int itemsize_hdlc;
 	unsigned int poolsize_hdlc;
+	unsigned int itemsize_user;
+	unsigned int poolsize_user;
 	unsigned int itemsize_write_struct;
 	unsigned int poolsize_write_struct;
 	unsigned int debug_flag;
 	/* State for the mempool for the char driver */
 	mempool_t *diagpool;
 	mempool_t *diag_hdlc_pool;
+	mempool_t *diag_user_pool;
 	mempool_t *diag_write_struct_pool;
 	struct mutex diagmem_mutex;
 	int count;
 	int count_hdlc_pool;
+	int count_user_pool;
 	int count_write_struct_pool;
 	int used;
 	/* Buffers for masks */
@@ -259,7 +264,6 @@
 	struct diag_smd_info smd_dci[NUM_SMD_DCI_CHANNELS];
 	unsigned char *usb_buf_out;
 	unsigned char *apps_rsp_buf;
-	unsigned char *user_space_data;
 	/* buffer for updating mask to peripherals */
 	unsigned char *buf_msg_mask_update;
 	unsigned char *buf_log_mask_update;
@@ -276,6 +280,8 @@
 	struct usb_diag_ch *legacy_ch;
 	struct work_struct diag_proc_hdlc_work;
 	struct work_struct diag_read_work;
+	struct work_struct diag_usb_connect_work;
+	struct work_struct diag_usb_disconnect_work;
 #endif
 	struct workqueue_struct *diag_wq;
 	struct work_struct diag_drain_work;
@@ -312,6 +318,7 @@
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	spinlock_t hsic_ready_spinlock;
 	/* common for all bridges */
+	struct work_struct diag_connect_work;
 	struct work_struct diag_disconnect_work;
 	/* SGLTE variables */
 	int lcid;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index a0c32f5..2ebae71 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -56,13 +56,16 @@
 /* The following variables can be specified by module options */
  /* for copy buffer */
 static unsigned int itemsize = 4096; /*Size of item in the mempool */
-static unsigned int poolsize = 10; /*Number of items in the mempool */
+static unsigned int poolsize = 12; /*Number of items in the mempool */
 /* for hdlc buffer */
 static unsigned int itemsize_hdlc = 8192; /*Size of item in the mempool */
-static unsigned int poolsize_hdlc = 8;  /*Number of items in the mempool */
+static unsigned int poolsize_hdlc = 10;  /*Number of items in the mempool */
+/* for user buffer */
+static unsigned int itemsize_user = 8192; /*Size of item in the mempool */
+static unsigned int poolsize_user = 8;  /*Number of items in the mempool */
 /* for write structure buffer */
 static unsigned int itemsize_write_struct = 20; /*Size of item in the mempool */
-static unsigned int poolsize_write_struct = 8; /* Num of items in the mempool */
+static unsigned int poolsize_write_struct = 10;/* Num of items in the mempool */
 /* This is the max number of user-space clients supported at initialization*/
 static unsigned int max_clients = 15;
 static unsigned int threshold_client_limit = 30;
@@ -781,10 +784,26 @@
 {
 	int i, temp, success = -EINVAL, status;
 	int temp_realtime_mode = driver->real_time_mode;
+	int requested_mode = (int)ioarg;
+
+	switch (requested_mode) {
+	case USB_MODE:
+	case MEMORY_DEVICE_MODE:
+	case NO_LOGGING_MODE:
+	case UART_MODE:
+	case SOCKET_MODE:
+	case CALLBACK_MODE:
+	case MEMORY_DEVICE_MODE_NRT:
+		break;
+	default:
+		pr_err("diag: In %s, request to switch to invalid mode: %d\n",
+			__func__, requested_mode);
+		return -EINVAL;
+	}
 
 	mutex_lock(&driver->diagchar_mutex);
 	temp = driver->logging_mode;
-	driver->logging_mode = (int)ioarg;
+	driver->logging_mode = requested_mode;
 
 	if (driver->logging_mode == MEMORY_DEVICE_MODE_NRT) {
 		diag_send_diag_mode_update(MODE_NONREALTIME);
@@ -1013,6 +1032,8 @@
 					 current->tgid)
 					driver->req_tracking_tbl[i].pid = 0;
 			driver->dci_client_tbl[result].client = NULL;
+			kfree(driver->dci_client_tbl[result].dci_data);
+			driver->dci_client_tbl[result].dci_data = NULL;
 			driver->num_dci_client--;
 		}
 		mutex_unlock(&driver->dci_mutex);
@@ -1362,6 +1383,7 @@
 	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
 	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
 	void *buf_copy = NULL;
+	void *user_space_data = NULL;
 	unsigned int payload_size;
 
 	index = 0;
@@ -1388,31 +1410,50 @@
 	}
 #endif /* DIAG over USB */
 	if (pkt_type == DCI_DATA_TYPE) {
-		err = copy_from_user(driver->user_space_data, buf + 4,
-							 payload_size);
+		user_space_data = diagmem_alloc(driver, payload_size,
+								POOL_TYPE_USER);
+		if (!user_space_data) {
+			driver->dropped_count++;
+			return -ENOMEM;
+		}
+		err = copy_from_user(user_space_data, buf + 4, payload_size);
 		if (err) {
 			pr_alert("diag: copy failed for DCI data\n");
 			return DIAG_DCI_SEND_DATA_FAIL;
 		}
-		err = diag_process_dci_transaction(driver->user_space_data,
+		err = diag_process_dci_transaction(user_space_data,
 							payload_size);
+		diagmem_free(driver, user_space_data, POOL_TYPE_USER);
 		return err;
 	}
 	if (pkt_type == CALLBACK_DATA_TYPE) {
-		err = copy_from_user(driver->user_space_data, buf + 4,
-							 payload_size);
-		 if (err) {
+		if (payload_size > itemsize) {
+			pr_err("diag: Dropping packet, packet payload size crosses 4KB limit. Current payload size %d\n",
+				payload_size);
+			driver->dropped_count++;
+			return -EBADMSG;
+		}
+
+		buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
+		if (!buf_copy) {
+			driver->dropped_count++;
+			return -ENOMEM;
+		}
+
+		err = copy_from_user(buf_copy, buf + 4, payload_size);
+		if (err) {
 			pr_err("diag: copy failed for user space data\n");
 			return -EIO;
 		}
 		/* Check for proc_type */
-		remote_proc = diag_get_remote(*(int *)driver->user_space_data);
+		remote_proc = diag_get_remote(*(int *)buf_copy);
 
 		if (!remote_proc) {
 			wait_event_interruptible(driver->wait_q,
 				 (driver->in_busy_pktdata == 0));
-			return diag_process_apps_pkt(driver->user_space_data,
-							payload_size);
+			ret = diag_process_apps_pkt(buf_copy, payload_size);
+			diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+			return ret;
 		}
 		/* The packet is for the remote processor */
 		token_offset = 4;
@@ -1420,8 +1461,8 @@
 		buf += 4;
 		/* Perform HDLC encoding on incoming data */
 		send.state = DIAG_STATE_START;
-		send.pkt = (void *)(driver->user_space_data + token_offset);
-		send.last = (void *)(driver->user_space_data + token_offset -
+		send.pkt = (void *)(buf_copy + token_offset);
+		send.last = (void *)(buf_copy + token_offset -
 							1 + payload_size);
 		send.terminate = 1;
 
@@ -1503,21 +1544,30 @@
 			}
 		}
 #endif
+		diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
 		diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
+		buf_copy = NULL;
 		buf_hdlc = NULL;
 		driver->used = 0;
 		mutex_unlock(&driver->diagchar_mutex);
 		return ret;
 	}
 	if (pkt_type == USER_SPACE_DATA_TYPE) {
-		err = copy_from_user(driver->user_space_data, buf + 4,
+		user_space_data = diagmem_alloc(driver, payload_size,
+								POOL_TYPE_USER);
+		if (!user_space_data) {
+			driver->dropped_count++;
+			return -ENOMEM;
+		}
+		err = copy_from_user(user_space_data, buf + 4,
 							 payload_size);
 		if (err) {
 			pr_err("diag: copy failed for user space data\n");
+			diagmem_free(driver, user_space_data, POOL_TYPE_USER);
 			return -EIO;
 		}
 		/* Check for proc_type */
-		remote_proc = diag_get_remote(*(int *)driver->user_space_data);
+		remote_proc = diag_get_remote(*(int *)user_space_data);
 
 		if (remote_proc) {
 			token_offset = 4;
@@ -1527,9 +1577,11 @@
 
 		/* Check masks for On-Device logging */
 		if (driver->mask_check) {
-			if (!mask_request_validate(driver->user_space_data +
+			if (!mask_request_validate(user_space_data +
 							 token_offset)) {
 				pr_alert("diag: mask request Invalid\n");
+				diagmem_free(driver, user_space_data,
+							POOL_TYPE_USER);
 				return -EFAULT;
 			}
 		}
@@ -1537,7 +1589,7 @@
 #ifdef DIAG_DEBUG
 		pr_debug("diag: user space data %d\n", payload_size);
 		for (i = 0; i < payload_size; i++)
-			pr_debug("\t %x", *((driver->user_space_data
+			pr_debug("\t %x", *((user_space_data
 						+ token_offset)+i));
 #endif
 #ifdef CONFIG_DIAG_SDIO_PIPE
@@ -1548,7 +1600,7 @@
 					 payload_size));
 			if (driver->sdio_ch && (payload_size > 0)) {
 				sdio_write(driver->sdio_ch, (void *)
-				   (driver->user_space_data + token_offset),
+				   (user_space_data + token_offset),
 				   payload_size);
 			}
 		}
@@ -1578,8 +1630,8 @@
 				diag_hsic[index].in_busy_hsic_read_on_device =
 									0;
 				err = diag_bridge_write(index,
-						driver->user_space_data +
-						token_offset, payload_size);
+						user_space_data + token_offset,
+						payload_size);
 				if (err) {
 					pr_err("diag: err sending mask to MDM: %d\n",
 					       err);
@@ -1600,11 +1652,13 @@
 						&& driver->lcid) {
 			if (payload_size > 0) {
 				err = msm_smux_write(driver->lcid, NULL,
-					driver->user_space_data + token_offset,
+					user_space_data + token_offset,
 					payload_size);
 				if (err) {
 					pr_err("diag:send mask to MDM err %d",
 							err);
+					diagmem_free(driver, user_space_data,
+								POOL_TYPE_USER);
 					return err;
 				}
 			}
@@ -1613,8 +1667,8 @@
 		/* send masks to 8k now */
 		if (!remote_proc)
 			diag_process_hdlc((void *)
-				(driver->user_space_data + token_offset),
-				 payload_size);
+				(user_space_data + token_offset), payload_size);
+		diagmem_free(driver, user_space_data, POOL_TYPE_USER);
 		return 0;
 	}
 
@@ -1885,6 +1939,11 @@
 }
 
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_connect_work_fn(struct work_struct *w)
+{
+	diagfwd_connect_bridge(1);
+}
+
 static void diag_disconnect_work_fn(struct work_struct *w)
 {
 	diagfwd_disconnect_bridge(1);
@@ -1944,6 +2003,8 @@
 		driver->poolsize = poolsize;
 		driver->itemsize_hdlc = itemsize_hdlc;
 		driver->poolsize_hdlc = poolsize_hdlc;
+		driver->itemsize_user = itemsize_user;
+		driver->poolsize_user = poolsize_user;
 		driver->itemsize_write_struct = itemsize_write_struct;
 		driver->poolsize_write_struct = poolsize_write_struct;
 		driver->num_clients = max_clients;
@@ -1969,6 +2030,8 @@
 			pr_err("diag: could not register HSIC device, ret: %d\n",
 				ret);
 		diagfwd_bridge_init(SMUX);
+		INIT_WORK(&(driver->diag_connect_work),
+						 diag_connect_work_fn);
 		INIT_WORK(&(driver->diag_disconnect_work),
 						 diag_disconnect_work_fn);
 #endif
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index a4003ff..151e304 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -402,7 +402,7 @@
 				return;
 			}
 			if (pkt_len > r) {
-				pr_err("diag: In %s, SMD sending partial pkt %d %d %d %d %d %d\n",
+				pr_debug("diag: In %s, SMD sending partial pkt %d %d %d %d %d %d\n",
 				__func__, pkt_len, r, total_recd, loop_count,
 				smd_info->peripheral, smd_info->type);
 			}
@@ -1282,6 +1282,16 @@
 #define N_LEGACY_WRITE	(driver->poolsize + 6)
 #define N_LEGACY_READ	1
 
+static void diag_usb_connect_work_fn(struct work_struct *w)
+{
+	diagfwd_connect();
+}
+
+static void diag_usb_disconnect_work_fn(struct work_struct *w)
+{
+	diagfwd_disconnect();
+}
+
 int diagfwd_connect(void)
 {
 	int err;
@@ -1448,10 +1458,12 @@
 {
 	switch (event) {
 	case USB_DIAG_CONNECT:
-		diagfwd_connect();
+		queue_work(driver->diag_wq,
+			 &driver->diag_usb_connect_work);
 		break;
 	case USB_DIAG_DISCONNECT:
-		diagfwd_disconnect();
+		queue_work(driver->diag_wq,
+			 &driver->diag_usb_disconnect_work);
 		break;
 	case USB_DIAG_READ_DONE:
 		diagfwd_read_complete(d_req);
@@ -1783,11 +1795,6 @@
 	    && (driver->hdlc_buf = kzalloc(HDLC_MAX, GFP_KERNEL)) == NULL)
 		goto err;
 	kmemleak_not_leak(driver->hdlc_buf);
-	if (driver->user_space_data == NULL)
-		driver->user_space_data = kzalloc(USER_SPACE_DATA, GFP_KERNEL);
-		if (driver->user_space_data == NULL)
-			goto err;
-	kmemleak_not_leak(driver->user_space_data);
 	if (driver->client_map == NULL &&
 	    (driver->client_map = kzalloc
 	     ((driver->num_clients) * sizeof(struct diag_client_map),
@@ -1832,6 +1839,10 @@
 	}
 	driver->diag_wq = create_singlethread_workqueue("diag_wq");
 #ifdef CONFIG_DIAG_OVER_USB
+	INIT_WORK(&(driver->diag_usb_connect_work),
+						 diag_usb_connect_work_fn);
+	INIT_WORK(&(driver->diag_usb_disconnect_work),
+						 diag_usb_disconnect_work_fn);
 	INIT_WORK(&(driver->diag_proc_hdlc_work), diag_process_hdlc_fn);
 	INIT_WORK(&(driver->diag_read_work), diag_read_work_fn);
 	driver->legacy_ch = usb_diag_open(DIAG_LEGACY, driver,
@@ -1862,7 +1873,6 @@
 	kfree(driver->pkt_buf);
 	kfree(driver->usb_read_ptr);
 	kfree(driver->apps_rsp_buf);
-	kfree(driver->user_space_data);
 	if (driver->diag_wq)
 		destroy_workqueue(driver->diag_wq);
 }
@@ -1895,6 +1905,5 @@
 	kfree(driver->pkt_buf);
 	kfree(driver->usb_read_ptr);
 	kfree(driver->apps_rsp_buf);
-	kfree(driver->user_space_data);
 	destroy_workqueue(driver->diag_wq);
 }
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 475f5ba..8c07219b 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -233,7 +233,8 @@
 
 	switch (event) {
 	case USB_DIAG_CONNECT:
-		diagfwd_connect_bridge(1);
+		queue_work(driver->diag_wq,
+			 &driver->diag_connect_work);
 		break;
 	case USB_DIAG_DISCONNECT:
 		queue_work(driver->diag_wq,
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 0cd8267..bd339e2 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -45,6 +45,15 @@
 								 GFP_ATOMIC);
 			}
 		}
+	} else if (pool_type == POOL_TYPE_USER) {
+		if (driver->diag_user_pool) {
+			if (driver->count_user_pool < driver->poolsize_user) {
+				atomic_add(1,
+					(atomic_t *)&driver->count_user_pool);
+				buf = mempool_alloc(driver->diag_user_pool,
+					GFP_ATOMIC);
+			}
+		}
 	} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
 		if (driver->diag_write_struct_pool) {
 			if (driver->count_write_struct_pool <
@@ -98,8 +107,9 @@
 			mempool_destroy(driver->diagpool);
 			driver->diagpool = NULL;
 		} else if (driver->ref_count == 0 && pool_type ==
-							POOL_TYPE_ALL)
-			printk(KERN_ALERT "Unable to destroy COPY mempool");
+							POOL_TYPE_ALL) {
+			pr_err("diag: Unable to destroy COPY mempool");
+		}
 	}
 
 	if (driver->diag_hdlc_pool) {
@@ -107,8 +117,19 @@
 			mempool_destroy(driver->diag_hdlc_pool);
 			driver->diag_hdlc_pool = NULL;
 		} else if (driver->ref_count == 0 && pool_type ==
-							POOL_TYPE_ALL)
-			printk(KERN_ALERT "Unable to destroy HDLC mempool");
+							POOL_TYPE_ALL) {
+			pr_err("diag: Unable to destroy HDLC mempool");
+		}
+	}
+
+	if (driver->diag_user_pool) {
+		if (driver->count_user_pool == 0 && driver->ref_count == 0) {
+			mempool_destroy(driver->diag_user_pool);
+			driver->diag_user_pool = NULL;
+		} else if (driver->ref_count == 0 && pool_type ==
+							POOL_TYPE_ALL) {
+			pr_err("diag: Unable to destroy USER mempool");
+		}
 	}
 
 	if (driver->diag_write_struct_pool) {
@@ -119,8 +140,9 @@
 			mempool_destroy(driver->diag_write_struct_pool);
 			driver->diag_write_struct_pool = NULL;
 		} else if (driver->ref_count == 0 && pool_type ==
-								POOL_TYPE_ALL)
-			printk(KERN_ALERT "Unable to destroy STRUCT mempool");
+							POOL_TYPE_ALL) {
+			pr_err("diag: Unable to destroy STRUCT mempool");
+		}
 	}
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	for (index = 0; index < MAX_HSIC_CH; index++) {
@@ -163,16 +185,25 @@
 			mempool_free(buf, driver->diagpool);
 			atomic_add(-1, (atomic_t *)&driver->count);
 		} else
-			pr_err("diag: Attempt to free up DIAG driver "
-	       "mempool memory which is already free %d", driver->count);
+			pr_err("diag: Attempt to free up DIAG driver mempool memory which is already free %d",
+							driver->count);
 	} else if (pool_type == POOL_TYPE_HDLC) {
 		if (driver->diag_hdlc_pool != NULL &&
 			 driver->count_hdlc_pool > 0) {
 			mempool_free(buf, driver->diag_hdlc_pool);
 			atomic_add(-1, (atomic_t *)&driver->count_hdlc_pool);
 		} else
-			pr_err("diag: Attempt to free up DIAG driver "
-	"HDLC mempool which is already free %d ", driver->count_hdlc_pool);
+			pr_err("diag: Attempt to free up DIAG driver HDLC mempool which is already free %d ",
+						driver->count_hdlc_pool);
+	} else if (pool_type == POOL_TYPE_USER) {
+		if (driver->diag_user_pool != NULL &&
+			driver->count_user_pool > 0) {
+			mempool_free(buf, driver->diag_user_pool);
+			atomic_add(-1, (atomic_t *)&driver->count_user_pool);
+		} else {
+			pr_err("diag: Attempt to free up DIAG driver USER mempool which is already free %d ",
+						driver->count_user_pool);
+		}
 	} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
 		if (driver->diag_write_struct_pool != NULL &&
 			 driver->count_write_struct_pool > 0) {
@@ -180,9 +211,8 @@
 			atomic_add(-1,
 				 (atomic_t *)&driver->count_write_struct_pool);
 		} else
-			pr_err("diag: Attempt to free up DIAG driver "
-			   "USB structure mempool which is already free %d ",
-				    driver->count_write_struct_pool);
+			pr_err("diag: Attempt to free up DIAG driver USB structure mempool which is already free %d ",
+					driver->count_write_struct_pool);
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	} else if (pool_type == POOL_TYPE_HSIC ||
 				pool_type == POOL_TYPE_HSIC_2) {
@@ -229,18 +259,25 @@
 		driver->diag_hdlc_pool = mempool_create_kmalloc_pool(
 				driver->poolsize_hdlc, driver->itemsize_hdlc);
 
+	if (driver->count_user_pool == 0)
+		driver->diag_user_pool = mempool_create_kmalloc_pool(
+				driver->poolsize_user, driver->itemsize_user);
+
 	if (driver->count_write_struct_pool == 0)
 		driver->diag_write_struct_pool = mempool_create_kmalloc_pool(
 		driver->poolsize_write_struct, driver->itemsize_write_struct);
 
 	if (!driver->diagpool)
-		printk(KERN_INFO "Cannot allocate diag mempool\n");
+		pr_err("diag: Cannot allocate diag mempool\n");
 
 	if (!driver->diag_hdlc_pool)
-		printk(KERN_INFO "Cannot allocate diag HDLC mempool\n");
+		pr_err("diag: Cannot allocate diag HDLC mempool\n");
+
+	if (!driver->diag_user_pool)
+		pr_err("diag: Cannot allocate diag USER mempool\n");
 
 	if (!driver->diag_write_struct_pool)
-		printk(KERN_INFO "Cannot allocate diag USB struct mempool\n");
+		pr_err("diag: Cannot allocate diag USB struct mempool\n");
 }
 
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index c768bb7..5f435f3 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -179,6 +179,7 @@
 #define A3XX_CP_MEQ_ADDR 0x1DA
 #define A3XX_CP_MEQ_DATA 0x1DB
 #define A3XX_CP_PERFCOUNTER_SELECT 0x445
+#define A3XX_CP_WFI_PEND_CTR 0x01F5
 #define A3XX_CP_HW_FAULT  0x45C
 #define A3XX_CP_AHB_FAULT 0x54D
 #define A3XX_CP_PROTECT_CTRL 0x45E
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index d5904b9..9a2e901 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -606,41 +606,15 @@
 	return result;
 }
 
-static void adreno_iommu_setstate(struct kgsl_device *device,
-					unsigned int context_id,
-					uint32_t flags)
+static unsigned int _adreno_iommu_setstate_v0(struct kgsl_device *device,
+					unsigned int *cmds_orig,
+					unsigned int pt_val,
+					int num_iommu_units, uint32_t flags)
 {
-	unsigned int pt_val, reg_pt_val;
-	unsigned int link[230];
-	unsigned int *cmds = &link[0];
-	int sizedwords = 0;
+	unsigned int reg_pt_val;
+	unsigned int *cmds = cmds_orig;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int num_iommu_units, i;
-	struct kgsl_context *context;
-	struct adreno_context *adreno_ctx = NULL;
-
-	/*
-	 * If we're idle and we don't need to use the GPU to save context
-	 * state, use the CPU instead of the GPU to reprogram the
-	 * iommu for simplicity's sake.
-	 */
-	 if (!adreno_dev->drawctxt_active || device->ftbl->isidle(device))
-		return kgsl_mmu_device_setstate(&device->mmu, flags);
-
-	num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
-
-	context = idr_find(&device->context_idr, context_id);
-	if (context == NULL)
-		return;
-	adreno_ctx = context->devctxt;
-
-	if (kgsl_mmu_enable_clk(&device->mmu,
-				KGSL_IOMMU_CONTEXT_USER))
-		return;
-
-	cmds += __adreno_add_idle_indirect_cmds(cmds,
-		device->mmu.setstate_memory.gpuaddr +
-		KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+	int i;
 
 	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
@@ -657,8 +631,6 @@
 	/* Acquire GPU-CPU sync Lock here */
 	cmds += kgsl_mmu_sync_lock(&device->mmu, cmds);
 
-	pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
-					device->mmu.hwpagetable);
 	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
 		/*
 		 * We need to perfrom the following operations for all
@@ -735,25 +707,169 @@
 
 	cmds += adreno_add_idle_cmds(adreno_dev, cmds);
 
-	sizedwords += (cmds - &link[0]);
-	if (sizedwords) {
-		/* invalidate all base pointers */
-		*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
-		*cmds++ = 0x7fff;
-		sizedwords += 2;
-		/* This returns the per context timestamp but we need to
-		 * use the global timestamp for iommu clock disablement */
-		adreno_ringbuffer_issuecmds(device, adreno_ctx,
-			KGSL_CMD_FLAGS_PMODE,
-			&link[0], sizedwords);
-		kgsl_mmu_disable_clk_on_ts(&device->mmu,
-		adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
+	return cmds - cmds_orig;
+}
+
+static unsigned int _adreno_iommu_setstate_v1(struct kgsl_device *device,
+					unsigned int *cmds_orig,
+					unsigned int pt_val,
+					int num_iommu_units, uint32_t flags)
+{
+	unsigned int reg_pt_val;
+	unsigned int *cmds = cmds_orig;
+	int i;
+	unsigned int ttbr0, tlbiall, tlbstatus, tlbsync, mmu_ctrl;
+
+	for (i = 0; i < num_iommu_units; i++) {
+		reg_pt_val = (pt_val + kgsl_mmu_get_pt_lsb(&device->mmu,
+				i, KGSL_IOMMU_CONTEXT_USER));
+		if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+			mmu_ctrl = kgsl_mmu_get_reg_ahbaddr(
+				&device->mmu, i,
+				KGSL_IOMMU_CONTEXT_USER,
+				KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL) >> 2;
+
+			ttbr0 = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+						KGSL_IOMMU_CONTEXT_USER,
+						KGSL_IOMMU_CTX_TTBR0) >> 2;
+
+			if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
+				*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+				*cmds++ = 0;
+				/*
+				 * glue commands together until next
+				 * WAIT_FOR_ME
+				 */
+				cmds += adreno_wait_reg_eq(cmds,
+				A3XX_CP_WFI_PEND_CTR, 1, 0xFFFFFFFF, 0xF);
+
+				/* set the iommu lock bit */
+				*cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+				*cmds++ = mmu_ctrl;
+				/* AND to unmask the lock bit */
+				*cmds++ =
+				 ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
+				/* OR to set the IOMMU lock bit */
+				*cmds++ =
+				   KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT;
+				/* wait for smmu to lock */
+				cmds += adreno_wait_reg_eq(cmds, mmu_ctrl,
+				KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE,
+				KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE, 0xF);
+			}
+			/* set ttbr0 */
+			*cmds++ = cp_type0_packet(ttbr0, 1);
+			*cmds++ = reg_pt_val;
+			if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
+				/* unlock the IOMMU lock */
+				*cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+				*cmds++ = mmu_ctrl;
+				/* AND to unmask the lock bit */
+				*cmds++ =
+				   ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
+				/* OR with 0 so lock bit is unset */
+				*cmds++ = 0;
+				/* release all commands with wait_for_me */
+				*cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
+				*cmds++ = 0;
+			}
+		}
+		if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+			tlbiall = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+						KGSL_IOMMU_CONTEXT_USER,
+						KGSL_IOMMU_CTX_TLBIALL) >> 2;
+			*cmds++ = cp_type0_packet(tlbiall, 1);
+			*cmds++ = 1;
+
+			tlbsync = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+						KGSL_IOMMU_CONTEXT_USER,
+						KGSL_IOMMU_CTX_TLBSYNC) >> 2;
+			*cmds++ = cp_type0_packet(tlbsync, 1);
+			*cmds++ = 0;
+
+			tlbstatus = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+					KGSL_IOMMU_CONTEXT_USER,
+					KGSL_IOMMU_CTX_TLBSTATUS) >> 2;
+			cmds += adreno_wait_reg_eq(cmds, tlbstatus, 0,
+					KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
+		}
 	}
+	return cmds - cmds_orig;
+}
+
+static void adreno_iommu_setstate(struct kgsl_device *device,
+					unsigned int context_id,
+					uint32_t flags)
+{
+	unsigned int pt_val;
+	unsigned int link[230];
+	unsigned int *cmds = &link[0];
+	int sizedwords = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int num_iommu_units;
+	struct kgsl_context *context;
+	struct adreno_context *adreno_ctx = NULL;
+
+	if (!adreno_dev->drawctxt_active) {
+		kgsl_mmu_device_setstate(&device->mmu, flags);
+		return;
+	}
+	num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
+
+	context = idr_find(&device->context_idr, context_id);
+	if (context == NULL)
+		return;
+
+	kgsl_context_get(context);
+
+	adreno_ctx = context->devctxt;
+
+	if (kgsl_mmu_enable_clk(&device->mmu,
+				KGSL_IOMMU_CONTEXT_USER))
+		return;
+
+	pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
+				device->mmu.hwpagetable);
+
+	cmds += __adreno_add_idle_indirect_cmds(cmds,
+		device->mmu.setstate_memory.gpuaddr +
+		KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+	if (msm_soc_version_supports_iommu_v0())
+		cmds += _adreno_iommu_setstate_v0(device, cmds, pt_val,
+						num_iommu_units, flags);
+	else
+		cmds += _adreno_iommu_setstate_v1(device, cmds, pt_val,
+						num_iommu_units, flags);
+
+	sizedwords += (cmds - &link[0]);
+	if (sizedwords == 0) {
+		KGSL_DRV_ERR(device, "no commands generated\n");
+		BUG();
+	}
+	/* invalidate all base pointers */
+	*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+	*cmds++ = 0x7fff;
+	sizedwords += 2;
 
 	if (sizedwords > (ARRAY_SIZE(link))) {
 		KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
 		BUG();
 	}
+	/*
+	 * This returns the per context timestamp but we need to
+	 * use the global timestamp for iommu clock disablement
+	 */
+	adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
+			&link[0], sizedwords);
+	/* timestamp based clock gating is currently unstable on iommuv1 */
+	if (msm_soc_version_supports_iommu_v0()) {
+		struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+		kgsl_mmu_disable_clk_on_ts(&device->mmu,
+			rb->timestamp[KGSL_MEMSTORE_GLOBAL], true);
+	}
+
+	kgsl_context_put(context);
 }
 
 static void adreno_gpummu_setstate(struct kgsl_device *device,
@@ -1283,6 +1399,8 @@
 
 	data->physstart = reg_val[0];
 	data->physend = data->physstart + reg_val[1] - 1;
+	data->iommu_halt_enable = of_property_read_bool(node,
+					"qcom,iommu-enable-halt");
 
 	data->iommu_ctx_count = 0;
 
@@ -2186,6 +2304,7 @@
 		context->wait_on_invalid_ts = false;
 
 		if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+			ft_data->status = 1;
 			KGSL_FT_ERR(device, "Fault tolerance not supported\n");
 			goto play_good_cmds;
 		}
@@ -2220,6 +2339,7 @@
 
 	/* If long IB detected do not attempt replay of bad cmds */
 	if (long_ib) {
+		ft_data->status = 1;
 		_adreno_debug_ft_info(device, ft_data);
 		goto play_good_cmds;
 	}
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 3935cd8..fa892b9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -507,4 +507,25 @@
 	return cmds - start;
 }
 
+/*
+ * adreno_wait_reg_eq() - Add a CP_WAIT_REG_EQ command
+ * @cmds:	Pointer to memory where commands are to be added
+ * @addr:	Regiater address to poll for
+ * @val:	Value to poll for
+ * @mask:	The value against which register value is masked
+ * @interval:	wait interval
+ */
+static inline int adreno_wait_reg_eq(unsigned int *cmds, unsigned int addr,
+					unsigned int val, unsigned int mask,
+					unsigned int interval)
+{
+	unsigned int *start = cmds;
+	*cmds++ = cp_type3_packet(CP_WAIT_REG_EQ, 4);
+	*cmds++ = addr;
+	*cmds++ = val;
+	*cmds++ = mask;
+	*cmds++ = interval;
+	return cmds - start;
+}
+
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index be5c786..a4b3121 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2758,7 +2758,7 @@
 {
 	unsigned int in, out, bit, sel;
 
-	if (countable > 0x7f)
+	if (counter > 1 || countable > 0x7f)
 		return;
 
 	adreno_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
@@ -2767,7 +2767,7 @@
 	if (counter == 0) {
 		bit = VBIF_PERF_CNT_0;
 		sel = (sel & ~VBIF_PERF_CNT_0_SEL_MASK) | countable;
-	} else if (counter == 1) {
+	} else {
 		bit = VBIF_PERF_CNT_1;
 		sel = (sel & ~VBIF_PERF_CNT_1_SEL_MASK)
 			| (countable << VBIF_PERF_CNT_1_SEL);
@@ -2821,10 +2821,10 @@
 	unsigned int val = 0;
 	struct a3xx_perfcounter_register *reg;
 
-	if (group > ARRAY_SIZE(a3xx_perfcounter_reglist))
+	if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
 		return;
 
-	if (counter > a3xx_perfcounter_reglist[group].count)
+	if (counter >= a3xx_perfcounter_reglist[group].count)
 		return;
 
 	/* Special cases */
@@ -2858,7 +2858,10 @@
 	unsigned int lo = 0, hi = 0;
 	unsigned int val;
 
-	if (group > ARRAY_SIZE(a3xx_perfcounter_reglist))
+	if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
+		return 0;
+
+	if (counter >= a3xx_perfcounter_reglist[group].count)
 		return 0;
 
 	reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 5396196..2249907 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -69,6 +69,8 @@
 	{CP_SET_PROTECTED_MODE,	"ST_PRT_M"},
 	{CP_SET_SHADER_BASES,		"ST_SHD_B"},
 	{CP_WAIT_FOR_IDLE,		"WAIT4IDL"},
+	{CP_WAIT_FOR_ME,		"WAIT4ME"},
+	{CP_WAIT_REG_EQ,		"WAITRGEQ"},
 };
 
 static const struct pm_id_name pm3_nop_values[] = {
@@ -854,7 +856,12 @@
 			(num_iommu_units && this_cmd ==
 			kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
 						KGSL_IOMMU_CONTEXT_USER,
-						KGSL_IOMMU_CTX_TTBR0))) {
+						KGSL_IOMMU_CTX_TTBR0)) ||
+			(num_iommu_units && this_cmd == cp_type0_packet(
+						kgsl_mmu_get_reg_ahbaddr(
+						&device->mmu, 0,
+						KGSL_IOMMU_CONTEXT_USER,
+						KGSL_IOMMU_CTX_TTBR0), 1))) {
 			KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
 				"pagetable base: %x\n",
 				kgsl_mmu_get_ptname_from_ptbase(&device->mmu,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 8c96884..479f9b8 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -1122,8 +1122,9 @@
 		ret = 0;
 
 done:
-	kgsl_trace_issueibcmds(device, context->id, ibdesc, numibs,
-		*timestamp, flags, ret, drawctxt->type);
+	kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
+		numibs, *timestamp, flags, ret,
+		drawctxt ? drawctxt->type : 0);
 
 	kfree(link);
 	return ret;
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 5cc0dff..8d071d1 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -759,7 +759,9 @@
 	.mmu_disable_clk_on_ts = NULL,
 	.mmu_get_pt_lsb = NULL,
 	.mmu_get_reg_gpuaddr = NULL,
+	.mmu_get_reg_ahbaddr = NULL,
 	.mmu_get_num_iommu_units = kgsl_gpummu_get_num_iommu_units,
+	.mmu_hw_halt_supported = NULL,
 };
 
 struct kgsl_mmu_pt_ops gpummu_pt_ops = {
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 739fcff..15f35c9 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -37,31 +37,51 @@
 
 
 static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = {
-	{ 0, 0, 0 },				/* GLOBAL_BASE */
-	{ 0x10, 0x0003FFFF, 14 },		/* TTBR0 */
-	{ 0x14, 0x0003FFFF, 14 },		/* TTBR1 */
-	{ 0x20, 0, 0 },				/* FSR */
-	{ 0x800, 0, 0 },			/* TLBIALL */
-	{ 0x820, 0, 0 },			/* RESUME */
-	{ 0x03C, 0, 0 },			/* TLBLKCR */
-	{ 0x818, 0, 0 },			/* V2PUR */
-	{ 0x2C, 0, 0 },                         /* FSYNR0 */
-	{ 0x2C, 0, 0 },                         /* FSYNR0 */
+	{ 0, 0 },			/* GLOBAL_BASE */
+	{ 0x10, 1 },			/* TTBR0 */
+	{ 0x14, 1 },			/* TTBR1 */
+	{ 0x20, 1 },			/* FSR */
+	{ 0x800, 1 },			/* TLBIALL */
+	{ 0x820, 1 },			/* RESUME */
+	{ 0x03C, 1 },			/* TLBLKCR */
+	{ 0x818, 1 },			/* V2PUR */
+	{ 0x2C, 1 },			/* FSYNR0 */
+	{ 0x2C, 1 },			/* FSYNR0 */
+	{ 0, 0 },			/* TLBSYNC, not in v0 */
+	{ 0, 0 },			/* TLBSTATUS, not in v0 */
+	{ 0, 0 }			/* IMPLDEF_MICRO_MMU_CRTL, not in v0 */
 };
 
 static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
-	{ 0, 0, 0 },				/* GLOBAL_BASE */
-	{ 0x20, 0x00FFFFFF, 14 },		/* TTBR0 */
-	{ 0x28, 0x00FFFFFF, 14 },		/* TTBR1 */
-	{ 0x58, 0, 0 },				/* FSR */
-	{ 0x618, 0, 0 },			/* TLBIALL */
-	{ 0x008, 0, 0 },			/* RESUME */
-	{ 0, 0, 0 },				/* TLBLKCR */
-	{ 0, 0, 0 },				/* V2PUR */
-	{ 0x68, 0, 0 },				/* FSYNR0 */
-	{ 0x6C, 0, 0 }				/* FSYNR1 */
+	{ 0, 0 },			/* GLOBAL_BASE */
+	{ 0x20, 1 },			/* TTBR0 */
+	{ 0x28, 1 },			/* TTBR1 */
+	{ 0x58, 1 },			/* FSR */
+	{ 0x618, 1 },			/* TLBIALL */
+	{ 0x008, 1 },			/* RESUME */
+	{ 0, 0 },			/* TLBLKCR not in V1 */
+	{ 0, 0 },			/* V2PUR not in V1 */
+	{ 0x68, 0 },			/* FSYNR0 */
+	{ 0x6C, 0 },			/* FSYNR1 */
+	{ 0x7F0, 1 },			/* TLBSYNC */
+	{ 0x7F4, 1 },			/* TLBSTATUS */
+	{ 0x2000, 0 }			/* IMPLDEF_MICRO_MMU_CRTL */
 };
 
+static struct iommu_access_ops *iommu_access_ops;
+
+static void _iommu_lock(void)
+{
+	if (iommu_access_ops && iommu_access_ops->iommu_lock_acquire)
+		iommu_access_ops->iommu_lock_acquire();
+}
+
+static void _iommu_unlock(void)
+{
+	if (iommu_access_ops && iommu_access_ops->iommu_lock_release)
+		iommu_access_ops->iommu_lock_release();
+}
+
 struct remote_iommu_petersons_spinlock kgsl_iommu_sync_lock_vars;
 
 static int get_iommu_unit(struct device *dev, struct kgsl_mmu **mmu_out,
@@ -582,18 +602,13 @@
 				struct kgsl_pagetable *pt,
 				unsigned int pt_base)
 {
-	struct kgsl_iommu *iommu = mmu->priv;
 	struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
 	unsigned int domain_ptbase = iommu_pt ?
 				iommu_get_pt_base_addr(iommu_pt->domain) : 0;
 	/* Only compare the valid address bits of the pt_base */
-	domain_ptbase &=
-		(iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
-		iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+	domain_ptbase &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
 
-	pt_base &=
-		(iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
-		iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+	pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
 
 	return domain_ptbase && pt_base &&
 		(domain_ptbase == pt_base);
@@ -649,15 +664,18 @@
 	domain_num = msm_register_domain(&kgsl_layout);
 	if (domain_num >= 0) {
 		iommu_pt->domain = msm_get_iommu_domain(domain_num);
-		iommu_set_fault_handler(iommu_pt->domain,
-			kgsl_iommu_fault_handler, NULL);
-	} else {
-		KGSL_CORE_ERR("Failed to create iommu domain\n");
-		kfree(iommu_pt);
-		return NULL;
+
+		if (iommu_pt->domain) {
+			iommu_set_fault_handler(iommu_pt->domain,
+				kgsl_iommu_fault_handler, NULL);
+
+			return iommu_pt;
+		}
 	}
 
-	return iommu_pt;
+	KGSL_CORE_ERR("Failed to create iommu domain\n");
+	kfree(iommu_pt);
+	return NULL;
 }
 
 /*
@@ -889,11 +907,14 @@
 	if (iommu->sync_lock_initialized)
 		return status;
 
-	/* Get the physical address of the Lock variables */
-	lock_phy_addr = (msm_iommu_lock_initialize()
+	iommu_access_ops = get_iommu_access_ops_v0();
+
+	if (iommu_access_ops && iommu_access_ops->iommu_lock_initialize)
+		lock_phy_addr = (iommu_access_ops->iommu_lock_initialize()
 			- MSM_SHARED_RAM_BASE + msm_shared_ram_phys);
 
 	if (!lock_phy_addr) {
+		iommu_access_ops = NULL;
 		KGSL_DRV_ERR(mmu->device,
 				"GPU CPU sync lock is not supported by kernel\n");
 		return -ENXIO;
@@ -911,8 +932,10 @@
 				 iommu->sync_lock_desc.physaddr,
 				 iommu->sync_lock_desc.size);
 
-	if (status)
+	if (status) {
+		iommu_access_ops = NULL;
 		return status;
+	}
 
 	/* Flag Sync Lock is Initialized  */
 	iommu->sync_lock_initialized = 1;
@@ -1092,6 +1115,9 @@
 				iommu_unit->reg_map.size);
 		if (ret)
 			goto err;
+
+		iommu_unit->iommu_halt_enable = data.iommu_halt_enable;
+		iommu_unit->ahb_base = data.physstart - mmu->device->reg_phys;
 	}
 	iommu->unit_count = pdata_dev->iommu_count;
 	return ret;
@@ -1118,11 +1144,9 @@
 static unsigned int kgsl_iommu_get_pt_base_addr(struct kgsl_mmu *mmu,
 						struct kgsl_pagetable *pt)
 {
-	struct kgsl_iommu *iommu = mmu->priv;
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	return iommu_get_pt_base_addr(iommu_pt->domain) &
-			(iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
-			iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+			KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
 }
 
 /*
@@ -1241,6 +1265,30 @@
 }
 
 
+/*
+ * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
+ * @mmu - Pointer to mmu structure
+ * @iommu_unit - The iommu unit for which base address is requested
+ * @ctx_id - The context ID of the IOMMU ctx
+ * @reg - The register for which address is required
+ *
+ * Return - The address of register which can be used in type0 packet
+ */
+static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
+					int iommu_unit, int ctx_id,
+					enum kgsl_iommu_reg_map reg)
+{
+	struct kgsl_iommu *iommu = mmu->priv;
+
+	if (iommu->iommu_reg_list[reg].ctx_reg)
+		return iommu->iommu_units[iommu_unit].ahb_base +
+			iommu->iommu_reg_list[reg].reg_offset +
+			(ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
+	else
+		return iommu->iommu_units[iommu_unit].ahb_base +
+			iommu->iommu_reg_list[reg].reg_offset;
+}
+
 static int kgsl_iommu_init(struct kgsl_mmu *mmu)
 {
 	/*
@@ -1265,13 +1313,15 @@
 	status = kgsl_set_register_map(mmu);
 	if (status)
 		goto done;
-	status = kgsl_iommu_init_sync_lock(mmu);
-	if (status)
-		goto done;
 
-	/* We presently do not support per-process for IOMMU-v1 */
+	/*
+	 * IOMMU-v1 requires hardware halt support to do in stream
+	 * pagetable switching. This check assumes that if there are
+	 * multiple units, they will be matching hardware.
+	 */
 	mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT &&
-				msm_soc_version_supports_iommu_v0();
+				(msm_soc_version_supports_iommu_v0() ||
+				 iommu->iommu_units[0].iommu_halt_enable);
 
 	/*
 	 * For IOMMU per-process pagetables, the allocatable range
@@ -1294,6 +1344,9 @@
 		mmu->use_cpu_map = false;
 	}
 
+	status = kgsl_iommu_init_sync_lock(mmu);
+	if (status)
+		goto done;
 
 	iommu->iommu_reg_list = kgsl_iommuv0_reg;
 	iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
@@ -1535,7 +1588,7 @@
 	 * changing pagetables we can use this lsb value of the pagetable w/o
 	 * having to read it again
 	 */
-	msm_iommu_lock();
+	_iommu_lock();
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
 		for (j = 0; j < iommu_unit->dev_count; j++) {
@@ -1547,7 +1600,7 @@
 		}
 	}
 	kgsl_iommu_lock_rb_in_tlb(mmu);
-	msm_iommu_unlock();
+	_iommu_unlock();
 
 	/* For complete CFF */
 	kgsl_cffdump_setmem(mmu->setstate_memory.gpuaddr +
@@ -1667,12 +1720,12 @@
 				for (j = 0; j < iommu_unit->dev_count; j++) {
 					if (iommu_unit->dev[j].fault) {
 						kgsl_iommu_enable_clk(mmu, j);
-						msm_iommu_lock();
+						_iommu_lock();
 						KGSL_IOMMU_SET_CTX_REG(iommu,
 						iommu_unit,
 						iommu_unit->dev[j].ctx_id,
 						RESUME, 1);
-						msm_iommu_unlock();
+						_iommu_unlock();
 						iommu_unit->dev[j].fault = 0;
 					}
 				}
@@ -1732,9 +1785,7 @@
 					KGSL_IOMMU_CONTEXT_USER,
 					TTBR0);
 	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
-	return pt_base &
-		(iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
-		iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+	return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
 }
 
 /*
@@ -1764,15 +1815,14 @@
 		return;
 	}
 	/* Mask off the lsb of the pt base address since lsb will not change */
-	pt_base &= (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
-			iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+	pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
 
 	/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
 	if (msm_soc_version_supports_iommu_v0())
 		kgsl_idle(mmu->device);
 
 	/* Acquire GPU-CPU sync Lock here */
-	msm_iommu_lock();
+	_iommu_lock();
 
 	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
 		if (!msm_soc_version_supports_iommu_v0())
@@ -1795,15 +1845,42 @@
 	}
 	/* Flush tlb */
 	if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+		unsigned long wait_for_flush;
 		for (i = 0; i < iommu->unit_count; i++) {
 			KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]),
 				KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1);
 			mb();
+			/*
+			 * Wait for flush to complete by polling the flush
+			 * status bit of TLBSTATUS register for not more than
+			 * 2 s. After 2s just exit, at that point the SMMU h/w
+			 * may be stuck and will eventually cause GPU to hang
+			 * or bring the system down.
+			 */
+			if (!msm_soc_version_supports_iommu_v0()) {
+				wait_for_flush = jiffies +
+						msecs_to_jiffies(2000);
+				KGSL_IOMMU_SET_CTX_REG(iommu,
+					(&iommu->iommu_units[i]),
+					KGSL_IOMMU_CONTEXT_USER, TLBSYNC, 0);
+				while (KGSL_IOMMU_GET_CTX_REG(iommu,
+					(&iommu->iommu_units[i]),
+					KGSL_IOMMU_CONTEXT_USER, TLBSTATUS) &
+					(KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
+					if (time_after(jiffies,
+						wait_for_flush)) {
+						KGSL_DRV_ERR(mmu->device,
+						"Wait limit reached for IOMMU tlb flush\n");
+						break;
+					}
+					cpu_relax();
+				}
+			}
 		}
 	}
 
 	/* Release GPU-CPU sync Lock here */
-	msm_iommu_unlock();
+	_iommu_unlock();
 
 	/* Disable smmu clock */
 	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
@@ -1816,8 +1893,7 @@
  * @ctx_id - The context ID of the IOMMU ctx
  * @reg - The register for which address is required
  *
- * Return - The number of iommu units which is also the number of register
- * mapped descriptor arrays which the out parameter will have
+ * Return - The gpu address of register which can be used in type3 packet
  */
 static unsigned int kgsl_iommu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
 					int iommu_unit, int ctx_id, int reg)
@@ -1826,10 +1902,25 @@
 
 	if (KGSL_IOMMU_GLOBAL_BASE == reg)
 		return iommu->iommu_units[iommu_unit].reg_map.gpuaddr;
-	else
+
+	if (iommu->iommu_reg_list[reg].ctx_reg)
 		return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
 			iommu->iommu_reg_list[reg].reg_offset +
 			(ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
+	else
+		return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
+			iommu->iommu_reg_list[reg].reg_offset;
+}
+/*
+ * kgsl_iommu_hw_halt_supported - Returns whether IOMMU halt command is
+ * supported
+ * @mmu - Pointer to mmu structure
+ * @iommu_unit - The iommu unit for which the property is requested
+ */
+static int kgsl_iommu_hw_halt_supported(struct kgsl_mmu *mmu, int iommu_unit)
+{
+	struct kgsl_iommu *iommu = mmu->priv;
+	return iommu->iommu_units[iommu_unit].iommu_halt_enable;
 }
 
 static int kgsl_iommu_get_num_iommu_units(struct kgsl_mmu *mmu)
@@ -1851,9 +1942,11 @@
 	.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
 	.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
 	.mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
+	.mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
 	.mmu_get_num_iommu_units = kgsl_iommu_get_num_iommu_units,
 	.mmu_pt_equal = kgsl_iommu_pt_equal,
 	.mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr,
+	.mmu_hw_halt_supported = kgsl_iommu_hw_halt_supported,
 	/* These callbacks will be set on some chipsets */
 	.mmu_setup_pt = NULL,
 	.mmu_cleanup_pt = NULL,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index c09bc4b..b1b83c0 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -46,6 +46,16 @@
 #define KGSL_IOMMU_V1_FSYNR0_WNR_MASK		0x00000001
 #define KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT		4
 
+/* TTBR0 register fields */
+#define KGSL_IOMMU_CTX_TTBR0_ADDR_MASK		0xFFFFC000
+
+/* TLBSTATUS register fields */
+#define KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE BIT(0)
+
+/* IMPLDEF_MICRO_MMU_CTRL register fields */
+#define KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT BIT(2)
+#define KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE BIT(3)
+
 enum kgsl_iommu_reg_map {
 	KGSL_IOMMU_GLOBAL_BASE = 0,
 	KGSL_IOMMU_CTX_TTBR0,
@@ -57,15 +67,31 @@
 	KGSL_IOMMU_CTX_V2PUR,
 	KGSL_IOMMU_CTX_FSYNR0,
 	KGSL_IOMMU_CTX_FSYNR1,
+	KGSL_IOMMU_CTX_TLBSYNC,
+	KGSL_IOMMU_CTX_TLBSTATUS,
+	KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL,
 	KGSL_IOMMU_REG_MAX
 };
 
 struct kgsl_iommu_register_list {
 	unsigned int reg_offset;
-	unsigned int reg_mask;
-	unsigned int reg_shift;
+	int ctx_reg;
 };
 
+#ifdef CONFIG_MSM_IOMMU
+extern struct iommu_access_ops iommu_access_ops_v0;
+
+static inline struct iommu_access_ops *get_iommu_access_ops_v0(void)
+{
+	return &iommu_access_ops_v0;
+}
+#else
+static inline struct iommu_access_ops *get_iommu_access_ops_v0(void)
+{
+	return NULL;
+}
+#endif
+
 /*
  * Max number of iommu units that the gpu core can have
  * On APQ8064, KGSL can control a maximum of 2 IOMMU units.
@@ -91,10 +117,8 @@
 		iommu->ctx_offset)
 
 /* Gets the lsb value of pagetable */
-#define KGSL_IOMMMU_PT_LSB(iommu, pt_val) \
-	(pt_val &							\
-	~(iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<	\
-	iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift))
+#define KGSL_IOMMMU_PT_LSB(iommu, pt_val)				\
+	(pt_val & ~(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK))
 
 /* offset at which a nop command is placed in setstate_memory */
 #define KGSL_IOMMU_SETSTATE_NOP_OFFSET	1024
@@ -130,11 +154,18 @@
  * @dev_count: Number of IOMMU contexts that are valid in the previous feild
  * @reg_map: Memory descriptor which holds the mapped address of this IOMMU
  * units register range
+ * @ahb_base - The base address from where IOMMU registers can be accesed from
+ * ahb bus
+ * @iommu_halt_enable: Valid only on IOMMU-v1, when set indicates that the iommu
+ * unit supports halting of the IOMMU, which can be enabled while programming
+ * the IOMMU registers for synchronization
  */
 struct kgsl_iommu_unit {
 	struct kgsl_iommu_device dev[KGSL_IOMMU_MAX_DEVS_PER_UNIT];
 	unsigned int dev_count;
 	struct kgsl_memdesc reg_map;
+	unsigned int ahb_base;
+	int iommu_halt_enable;
 };
 
 /*
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index d7d9516..ef5b0f4 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -14,7 +14,7 @@
 #define __KGSL_MMU_H
 
 #include <mach/iommu.h>
-
+#include "kgsl_iommu.h"
 /*
  * These defines control the address range for allocations that
  * are mapped into all pagetables.
@@ -150,6 +150,9 @@
 				enum kgsl_iommu_context_id ctx_id);
 	unsigned int (*mmu_get_reg_gpuaddr)(struct kgsl_mmu *mmu,
 			int iommu_unit_num, int ctx_id, int reg);
+	unsigned int (*mmu_get_reg_ahbaddr)(struct kgsl_mmu *mmu,
+			int iommu_unit_num, int ctx_id,
+			enum kgsl_iommu_reg_map reg);
 	int (*mmu_get_num_iommu_units)(struct kgsl_mmu *mmu);
 	int (*mmu_pt_equal) (struct kgsl_mmu *mmu,
 			struct kgsl_pagetable *pt,
@@ -165,6 +168,7 @@
 			(struct kgsl_mmu *mmu, unsigned int *cmds);
 	unsigned int (*mmu_sync_unlock)
 			(struct kgsl_mmu *mmu, unsigned int *cmds);
+	int (*mmu_hw_halt_supported)(struct kgsl_mmu *mmu, int iommu_unit_num);
 };
 
 struct kgsl_mmu_pt_ops {
@@ -337,6 +341,29 @@
 		return 0;
 }
 
+/*
+ * kgsl_mmu_get_reg_ahbaddr() - Calls the mmu specific function pointer to
+ * return the address that GPU can use to access register
+ * @mmu:		Pointer to the device mmu
+ * @iommu_unit_num:	There can be multiple iommu units used for graphics.
+ *			This parameter is an index to the iommu unit being used
+ * @ctx_id:		The context id within the iommu unit
+ * @reg:		Register whose address is to be returned
+ *
+ * Returns the ahb address of reg else 0
+ */
+static inline unsigned int kgsl_mmu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
+						int iommu_unit_num,
+						int ctx_id,
+						enum kgsl_iommu_reg_map reg)
+{
+	if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_ahbaddr)
+		return mmu->mmu_ops->mmu_get_reg_ahbaddr(mmu, iommu_unit_num,
+							ctx_id, reg);
+	else
+		return 0;
+}
+
 static inline int kgsl_mmu_get_num_iommu_units(struct kgsl_mmu *mmu)
 {
 	if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_num_iommu_units)
@@ -346,6 +373,22 @@
 }
 
 /*
+ * kgsl_mmu_hw_halt_supported() - Runtime check for iommu hw halt
+ * @mmu: the mmu
+ *
+ * Returns non-zero if the iommu supports hw halt,
+ * 0 if not.
+ */
+static inline int kgsl_mmu_hw_halt_supported(struct kgsl_mmu *mmu,
+						int iommu_unit_num)
+{
+	if (mmu->mmu_ops && mmu->mmu_ops->mmu_hw_halt_supported)
+		return mmu->mmu_ops->mmu_hw_halt_supported(mmu, iommu_unit_num);
+	else
+		return 0;
+}
+
+/*
  * kgsl_mmu_is_perprocess() - Runtime check for per-process
  * pagetables.
  * @mmu: the mmu
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 5909153..ef22c41 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -59,6 +59,10 @@
 		.name = "mem_iface_clk",
 		.map = KGSL_CLK_MEM_IFACE,
 	},
+	{
+		.name = "alt_mem_iface_clk",
+		.map = KGSL_CLK_ALT_MEM_IFACE,
+	},
 };
 
 /* Update the elapsed time at a particular clock level
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 0fd64c3..bb45829 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -23,7 +23,7 @@
 #define KGSL_PWRLEVEL_NOMINAL 1
 #define KGSL_PWRLEVEL_LAST_OFFSET 2
 
-#define KGSL_MAX_CLKS 5
+#define KGSL_MAX_CLKS 6
 
 struct platform_device;
 
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 1458bc5..b3b5643 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -638,20 +638,21 @@
 {
 	struct qpnp_vadc_linear_graph vbatt_param;
 	int rc = 0;
+	int64_t low_thr = 0, high_thr = 0;
 
 	rc = qpnp_get_vadc_gain_and_offset(&vbatt_param, CALIB_ABSOLUTE);
 	if (rc < 0)
 		return rc;
 
-	*low_threshold = (((param->low_thr/3) - QPNP_ADC_625_UV) *
+	low_thr = (((param->low_thr/3) - QPNP_ADC_625_UV) *
 				vbatt_param.dy);
-	do_div(*low_threshold, QPNP_ADC_625_UV);
-	*low_threshold += vbatt_param.adc_gnd;
+	do_div(low_thr, QPNP_ADC_625_UV);
+	*low_threshold = low_thr + vbatt_param.adc_gnd;
 
-	*high_threshold = (((param->high_thr/3) - QPNP_ADC_625_UV) *
+	high_thr = (((param->high_thr/3) - QPNP_ADC_625_UV) *
 				vbatt_param.dy);
-	do_div(*high_threshold, QPNP_ADC_625_UV);
-	*high_threshold += vbatt_param.adc_gnd;
+	do_div(high_thr, QPNP_ADC_625_UV);
+	*high_threshold = high_thr + vbatt_param.adc_gnd;
 
 	pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
 				param->low_thr);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f35f0e7..aa69475 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -16,7 +16,7 @@
 # MSM IOMMU support
 config MSM_IOMMU
 	bool "MSM IOMMU Support"
-	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8974 || ARCH_MPQ8092 || ARCH_MSM8610 || ARCH_MSM8226 || ARCH_MSMZINC
+	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8974 || ARCH_MPQ8092 || ARCH_MSM8610 || ARCH_MSM8226 || ARCH_APQ8084
 	select IOMMU_API
 	help
 	  Support for the IOMMUs found on certain Qualcomm SOCs.
diff --git a/drivers/iommu/msm_iommu-v0.c b/drivers/iommu/msm_iommu-v0.c
index c0a4720..b1960c6 100644
--- a/drivers/iommu/msm_iommu-v0.c
+++ b/drivers/iommu/msm_iommu-v0.c
@@ -55,6 +55,9 @@
 	.name = "msm_iommu_sec_bus",
 };
 
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+				 unsigned int len);
+
 static inline void clean_pte(unsigned long *start, unsigned long *end,
 			     int redirect)
 {
@@ -167,6 +170,11 @@
 	/* No need to do anything. IOMMUv0 is always on. */
 }
 
+static void *_iommu_lock_initialize(void)
+{
+	return msm_iommu_lock_initialize();
+}
+
 static void _iommu_lock_acquire(void)
 {
 	msm_iommu_lock();
@@ -182,6 +190,7 @@
 	.iommu_power_off = __disable_regulators,
 	.iommu_clk_on = __enable_clocks,
 	.iommu_clk_off = __disable_clocks,
+	.iommu_lock_initialize = _iommu_lock_initialize,
 	.iommu_lock_acquire = _iommu_lock_acquire,
 	.iommu_lock_release = _iommu_lock_release,
 };
@@ -953,6 +962,7 @@
 			       int prot)
 {
 	unsigned int pa;
+	unsigned int start_va = va;
 	unsigned int offset = 0;
 	unsigned long *fl_table;
 	unsigned long *fl_pte;
@@ -1026,12 +1036,6 @@
 				chunk_offset = 0;
 				sg = sg_next(sg);
 				pa = get_phys_addr(sg);
-				if (pa == 0) {
-					pr_debug("No dma address for sg %p\n",
-							sg);
-					ret = -EINVAL;
-					goto fail;
-				}
 			}
 			continue;
 		}
@@ -1085,12 +1089,6 @@
 				chunk_offset = 0;
 				sg = sg_next(sg);
 				pa = get_phys_addr(sg);
-				if (pa == 0) {
-					pr_debug("No dma address for sg %p\n",
-							sg);
-					ret = -EINVAL;
-					goto fail;
-				}
 			}
 		}
 
@@ -1103,6 +1101,8 @@
 	__flush_iotlb(domain);
 fail:
 	mutex_unlock(&msm_iommu_lock);
+	if (ret && offset > 0)
+		msm_iommu_unmap_range(domain, start_va, offset);
 	return ret;
 }
 
diff --git a/drivers/iommu/msm_iommu_pagetable.c b/drivers/iommu/msm_iommu_pagetable.c
index b62bb76..9614692 100644
--- a/drivers/iommu/msm_iommu_pagetable.c
+++ b/drivers/iommu/msm_iommu_pagetable.c
@@ -431,6 +431,7 @@
 		       struct scatterlist *sg, unsigned int len, int prot)
 {
 	phys_addr_t pa;
+	unsigned int start_va = va;
 	unsigned int offset = 0;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
@@ -495,12 +496,6 @@
 				chunk_offset = 0;
 				sg = sg_next(sg);
 				pa = get_phys_addr(sg);
-				if (pa == 0) {
-					pr_debug("No dma address for sg %p\n",
-							sg);
-					ret = -EINVAL;
-					goto fail;
-				}
 			}
 			continue;
 		}
@@ -553,12 +548,6 @@
 				chunk_offset = 0;
 				sg = sg_next(sg);
 				pa = get_phys_addr(sg);
-				if (pa == 0) {
-					pr_debug("No dma address for sg %p\n",
-							sg);
-					ret = -EINVAL;
-					goto fail;
-				}
 			}
 		}
 
@@ -569,6 +558,9 @@
 	}
 
 fail:
+	if (ret && offset > 0)
+		msm_iommu_pagetable_unmap_range(pt, start_va, offset);
+
 	return ret;
 }
 
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index f0b9b05..89500f9 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -127,6 +127,7 @@
 			u32 cont_err_counter;
 			u32 ts_packets_num;
 			u32 ts_dropped_bytes;
+			u64 stc;
 		} buf;
 
 		struct {
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index ca71c06..5e7a09e 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -2406,6 +2406,7 @@
 		event.params.es_data.pts = dmx_data_ready->buf.pts;
 		event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists;
 		event.params.es_data.dts = dmx_data_ready->buf.dts;
+		event.params.es_data.stc = dmx_data_ready->buf.stc;
 		event.params.es_data.transport_error_indicator_counter =
 				dmx_data_ready->buf.tei_counter;
 		event.params.es_data.continuity_error_counter =
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 263241b..04afec0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -660,22 +660,35 @@
 	}
 }
 
-static int msm_isp_attach_ctx(struct msm_isp_buf_mgr *buf_mgr,
-	struct device *iommu_ctx)
+static void msm_isp_register_ctx(struct msm_isp_buf_mgr *buf_mgr,
+	struct device **iommu_ctx, int num_iommu_ctx)
 {
-	int rc;
-	rc = iommu_attach_device(buf_mgr->iommu_domain, iommu_ctx);
-	if (rc) {
-		pr_err("%s: Iommu attach error\n", __func__);
-		return -EINVAL;
+	int i;
+	buf_mgr->num_iommu_ctx = num_iommu_ctx;
+	for (i = 0; i < num_iommu_ctx; i++)
+		buf_mgr->iommu_ctx[i] = iommu_ctx[i];
+}
+
+static int msm_isp_attach_ctx(struct msm_isp_buf_mgr *buf_mgr)
+{
+	int rc, i;
+	for (i = 0; i < buf_mgr->num_iommu_ctx; i++) {
+		rc = iommu_attach_device(buf_mgr->iommu_domain,
+			buf_mgr->iommu_ctx[i]);
+		if (rc) {
+			pr_err("%s: Iommu attach error\n", __func__);
+			return -EINVAL;
+		}
 	}
 	return 0;
 }
 
-static void msm_isp_detach_ctx(struct msm_isp_buf_mgr *buf_mgr,
-	struct device *iommu_ctx)
+static void msm_isp_detach_ctx(struct msm_isp_buf_mgr *buf_mgr)
 {
-	iommu_detach_device(buf_mgr->iommu_domain, iommu_ctx);
+	int i;
+	for (i = 0; i < buf_mgr->num_iommu_ctx; i++)
+		iommu_detach_device(buf_mgr->iommu_domain,
+			buf_mgr->iommu_ctx[i]);
 }
 
 static int msm_isp_init_isp_buf_mgr(
@@ -692,6 +705,7 @@
 	}
 
 	CDBG("%s: E\n", __func__);
+	msm_isp_attach_ctx(buf_mgr);
 	buf_mgr->num_buf_q = num_buf_q;
 	buf_mgr->bufq =
 		kzalloc(sizeof(struct msm_isp_bufq) * num_buf_q,
@@ -716,6 +730,7 @@
 	ion_client_destroy(buf_mgr->client);
 	kfree(buf_mgr->bufq);
 	buf_mgr->num_buf_q = 0;
+	msm_isp_detach_ctx(buf_mgr);
 	return 0;
 }
 
@@ -752,8 +767,7 @@
 	.flush_buf = msm_isp_flush_buf,
 	.buf_done = msm_isp_buf_done,
 	.buf_divert = msm_isp_buf_divert,
-	.attach_ctx = msm_isp_attach_ctx,
-	.detach_ctx = msm_isp_detach_ctx,
+	.register_ctx = msm_isp_register_ctx,
 	.buf_mgr_init = msm_isp_init_isp_buf_mgr,
 	.buf_mgr_deinit = msm_isp_deinit_isp_buf_mgr,
 };
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
index 066d02a..fda1a57 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -112,10 +112,8 @@
 	int (*buf_divert) (struct msm_isp_buf_mgr *buf_mgr,
 		uint32_t bufq_handle, uint32_t buf_index,
 		struct timeval *tv, uint32_t frame_id);
-	int (*attach_ctx) (struct msm_isp_buf_mgr *buf_mgr,
-		struct device *iommu_ctx);
-	void (*detach_ctx) (struct msm_isp_buf_mgr *buf_mgr,
-		struct device *iommu_ctx);
+	void (*register_ctx) (struct msm_isp_buf_mgr *buf_mgr,
+		struct device **iommu_ctx, int num_iommu_ctx);
 	int (*buf_mgr_init) (struct msm_isp_buf_mgr *buf_mgr,
 		const char *ctx_name, uint16_t num_buf_q);
 	int (*buf_mgr_deinit) (struct msm_isp_buf_mgr *buf_mgr);
@@ -137,6 +135,9 @@
 	/*IOMMU specific*/
 	int iommu_domain_num;
 	struct iommu_domain *iommu_domain;
+
+	int num_iommu_ctx;
+	struct device *iommu_ctx[2];
 };
 
 int msm_isp_create_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 447c752..b31b3f1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -138,6 +138,8 @@
 		kfree(vfe_dev);
 		return -EINVAL;
 	}
+	vfe_dev->buf_mgr->ops->register_ctx(vfe_dev->buf_mgr,
+		&vfe_dev->iommu_ctx[0], vfe_dev->hw_info->num_iommu_ctx);
 	vfe_dev->vfe_open_cnt = 0;
 end:
 	return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 1b762ea..7bc2b7d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -148,7 +148,8 @@
 		struct msm_vfe_stats_stream *stream_info);
 	void (*clear_framedrop) (struct vfe_device *vfe_dev,
 		struct msm_vfe_stats_stream *stream_info);
-	void (*cfg_comp_mask) (struct vfe_device *vfe_dev);
+	void (*cfg_comp_mask) (struct vfe_device *vfe_dev,
+		uint32_t stats_mask, uint8_t enable);
 	void (*cfg_wm_irq_mask) (struct vfe_device *vfe_dev,
 		struct msm_vfe_stats_stream *stream_info);
 	void (*clear_wm_irq_mask) (struct vfe_device *vfe_dev,
@@ -294,6 +295,7 @@
 		composite_info[MAX_NUM_COMPOSITE_MASK];
 	uint8_t num_used_composite_mask;
 	uint32_t stream_update;
+	enum msm_isp_camif_update_state pipeline_update;
 	struct msm_vfe_src_info src_info[VFE_SRC_MAX];
 	uint16_t stream_handle_cnt;
 	unsigned long event_mask;
@@ -312,6 +314,7 @@
 	STATS_ACTIVE,
 	STATS_START_PENDING,
 	STATS_STOP_PENDING,
+	STATS_STARTING,
 	STATS_STOPPING,
 };
 
@@ -319,9 +322,11 @@
 	uint32_t session_id;
 	uint32_t stream_id;
 	uint32_t stream_handle;
+	uint32_t composite_flag;
 	enum msm_isp_stats_type stats_type;
 	enum msm_vfe_stats_state state;
 	uint32_t framedrop_pattern;
+	uint32_t framedrop_period;
 	uint32_t irq_subsample_pattern;
 
 	uint32_t buffer_offset;
@@ -331,11 +336,10 @@
 
 struct msm_vfe_stats_shared_data {
 	struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
-	enum msm_vfe_stats_pipeline_policy stats_pipeline_policy;
-	uint32_t comp_framedrop_pattern;
-	uint32_t comp_irq_subsample_pattern;
 	uint8_t num_active_stream;
+	atomic_t stats_comp_mask;
 	uint16_t stream_handle_cnt;
+	atomic_t stats_update;
 };
 
 struct msm_vfe_tasklet_queue_cmd {
@@ -380,6 +384,7 @@
 	struct completion reset_complete;
 	struct completion halt_complete;
 	struct completion stream_config_complete;
+	struct completion stats_config_complete;
 	struct mutex realtime_mutex;
 	struct mutex core_mutex;
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index d4f6a07..a251f0a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -114,7 +114,7 @@
 	msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
 	/* BUS_CFG */
 	msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x3C);
-	msm_camera_io_w(0x00000025, vfe_dev->vfe_base + 0x1C);
+	msm_camera_io_w(0x01000025, vfe_dev->vfe_base + 0x1C);
 	msm_camera_io_w_mb(0x1DFFFFFF, vfe_dev->vfe_base + 0x20);
 	msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
 	msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
@@ -304,6 +304,8 @@
 
 	if (vfe_dev->axi_data.stream_update)
 		msm_isp_axi_stream_update(vfe_dev);
+	if (atomic_read(&vfe_dev->stats_data.stats_update))
+		msm_isp_stats_stream_update(vfe_dev);
 	msm_isp_update_framedrop_reg(vfe_dev);
 	msm_isp_update_error_frame_count(vfe_dev);
 
@@ -767,7 +769,8 @@
 	}
 }
 
-static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev)
+static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
+	uint32_t stats_mask, uint8_t enable)
 {
 	return;
 }
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 256d136..5a17635 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -266,7 +266,7 @@
 	msm_camera_io_w(0xC001FF7F, vfe_dev->vfe_base + 0x974);
 	/* BUS_CFG */
 	msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
-	msm_camera_io_w(0x800000F3, vfe_dev->vfe_base + 0x28);
+	msm_camera_io_w(0xE00000F3, vfe_dev->vfe_base + 0x28);
 	msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x2C);
 	msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
 	msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
@@ -466,6 +466,8 @@
 
 	if (vfe_dev->axi_data.stream_update)
 		msm_isp_axi_stream_update(vfe_dev);
+	if (atomic_read(&vfe_dev->stats_data.stats_update))
+		msm_isp_stats_stream_update(vfe_dev);
 	msm_isp_update_framedrop_reg(vfe_dev);
 	msm_isp_update_error_frame_count(vfe_dev);
 
@@ -1003,12 +1005,16 @@
 	}
 }
 
-static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev)
+static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
+	uint32_t stats_mask, uint8_t enable)
 {
-	if (vfe_dev->stats_data.stats_pipeline_policy == STATS_COMP_ALL)
-		msm_camera_io_w(0x00FF0000, vfe_dev->vfe_base + 0x44);
+	uint32_t comp_mask;
+	comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x44) >> 16;
+	if (enable)
+		comp_mask |= stats_mask;
 	else
-		msm_camera_io_w(0x00000000, vfe_dev->vfe_base + 0x44);
+		comp_mask &= ~stats_mask;
+	msm_camera_io_w(comp_mask << 16, vfe_dev->vfe_base + 0x44);
 }
 
 static void msm_vfe40_stats_cfg_wm_irq_mask(
@@ -1039,9 +1045,10 @@
 	uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
 
 	/*WR_ADDR_CFG*/
-	msm_camera_io_w(0x7C, vfe_dev->vfe_base + stats_base + 0x8);
+	msm_camera_io_w(stream_info->framedrop_period << 2,
+		vfe_dev->vfe_base + stats_base + 0x8);
 	/*WR_IRQ_FRAMEDROP_PATTERN*/
-	msm_camera_io_w(0xFFFFFFFF,
+	msm_camera_io_w(stream_info->framedrop_pattern,
 		vfe_dev->vfe_base + stats_base + 0x10);
 	/*WR_IRQ_SUBSAMPLE_PATTERN*/
 	msm_camera_io_w(0xFFFFFFFF,
@@ -1189,11 +1196,9 @@
 		goto vfe_no_resource;
 	}
 
-	if (vfe_dev->pdev->id == 0)
-		vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe0");
-	else if (vfe_dev->pdev->id == 1)
-		vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe1");
-	if (!vfe_dev->iommu_ctx[0]) {
+	vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe0");
+	vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe1");
+	if (!vfe_dev->iommu_ctx[0] || !vfe_dev->iommu_ctx[1]) {
 		pr_err("%s: cannot get iommu_ctx\n", __func__);
 		rc = -ENODEV;
 		goto vfe_no_resource;
@@ -1245,7 +1250,7 @@
 };
 
 struct msm_vfe_hardware_info vfe40_hw_info = {
-	.num_iommu_ctx = 1,
+	.num_iommu_ctx = 2,
 	.vfe_clk_idx = VFE40_CLK_IDX,
 	.vfe_ops = {
 		.irq_ops = {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 477985d..728e172 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -389,31 +389,6 @@
 	msm_isp_send_event(vfe_dev, ISP_EVENT_SOF, &sof_event);
 }
 
-uint32_t msm_isp_get_framedrop_period(
-	enum msm_vfe_frame_skip_pattern frame_skip_pattern)
-{
-	switch (frame_skip_pattern) {
-	case NO_SKIP:
-	case EVERY_2FRAME:
-	case EVERY_3FRAME:
-	case EVERY_4FRAME:
-	case EVERY_5FRAME:
-	case EVERY_6FRAME:
-	case EVERY_7FRAME:
-	case EVERY_8FRAME:
-		return frame_skip_pattern + 1;
-	case EVERY_16FRAME:
-		return 16;
-		break;
-	case EVERY_32FRAME:
-		return 32;
-		break;
-	default:
-		return 1;
-	}
-	return 1;
-}
-
 void msm_isp_calculate_framedrop(
 	struct msm_vfe_axi_shared_data *axi_data,
 	struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
@@ -611,6 +586,13 @@
 				ACTIVE : INACTIVE;
 		}
 	}
+
+	if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF) {
+		vfe_dev->hw_info->vfe_ops.stats_ops.
+			enable_module(vfe_dev, 0xFF, 0);
+		vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+	}
+
 	vfe_dev->axi_data.stream_update--;
 	if (vfe_dev->axi_data.stream_update == 0)
 		complete(&vfe_dev->stream_config_complete);
@@ -849,12 +831,14 @@
 	return rc;
 }
 
-static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev)
+static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
+	enum msm_isp_camif_update_state camif_update)
 {
 	int rc;
 	unsigned long flags;
 	spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
 	init_completion(&vfe_dev->stream_config_complete);
+	vfe_dev->axi_data.pipeline_update = camif_update;
 	vfe_dev->axi_data.stream_update = 2;
 	spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
 	rc = wait_for_completion_interruptible_timeout(
@@ -958,7 +942,7 @@
 			update_camif_state(vfe_dev, camif_update);
 
 	if (wait_for_complete)
-		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev);
+		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
 
 	return rc;
 }
@@ -976,7 +960,7 @@
 		stream_info->state = STOP_PENDING;
 	}
 
-	rc = msm_isp_axi_wait_for_cfg_done(vfe_dev);
+	rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
 	if (rc < 0) {
 		pr_err("%s: wait for config done failed\n", __func__);
 		return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index c47209f..e08dea2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 #include <linux/io.h>
+#include <linux/atomic.h>
 #include <media/v4l2-subdev.h>
 #include "msm_isp_util.h"
 #include "msm_isp_stats_util.h"
@@ -67,6 +68,7 @@
 	struct msm_isp_buffer *done_buf;
 	struct msm_vfe_stats_stream *stream_info = NULL;
 	uint32_t pingpong_status;
+	uint32_t comp_stats_type_mask = 0;
 	uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
 	stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
 		get_comp_mask(irq_status0, irq_status1);
@@ -76,13 +78,17 @@
 		return;
 	ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
 
-	if (vfe_dev->stats_data.stats_pipeline_policy == STATS_COMP_ALL) {
-		if (!stats_comp_mask)
-			return;
-		stats_irq_mask = 0xFFFFFFFF;
-	}
+	if (!stats_comp_mask)
+		stats_irq_mask &=
+			~atomic_read(&vfe_dev->stats_data.stats_comp_mask);
+	else
+		stats_irq_mask |=
+			atomic_read(&vfe_dev->stats_data.stats_comp_mask);
 
 	memset(&buf_event, 0, sizeof(struct msm_isp_event_data));
+	buf_event.timestamp = ts->event_time;
+	buf_event.frame_id =
+		vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
 	pingpong_status = vfe_dev->hw_info->
 		vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
 
@@ -98,22 +104,32 @@
 				done_buf->bufq_handle, done_buf->buf_idx,
 				&ts->buf_time, vfe_dev->axi_data.
 				src_info[VFE_PIX_0].frame_id);
-			if (rc == 0) {
-				stats_event->stats_mask |=
+			if (rc != 0)
+				continue;
+
+			stats_event->stats_buf_idxs[stream_info->stats_type] =
+				done_buf->buf_idx;
+			if (!stream_info->composite_flag) {
+				stats_event->stats_mask =
 					1 << stream_info->stats_type;
-				stats_event->stats_buf_idxs[
-					stream_info->stats_type] =
-					done_buf->buf_idx;
+				ISP_DBG("%s: stats event frame id: 0x%x\n",
+					__func__, buf_event.frame_id);
+				msm_isp_send_event(vfe_dev,
+					ISP_EVENT_STATS_NOTIFY +
+					stream_info->stats_type, &buf_event);
+			} else {
+				comp_stats_type_mask |=
+					1 << stream_info->stats_type;
 			}
 		}
 	}
 
-	if (stats_event->stats_mask) {
-		buf_event.timestamp = ts->event_time;
-		buf_event.frame_id =
-			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
-		msm_isp_send_event(vfe_dev, ISP_EVENT_STATS_NOTIFY +
-				stream_info->stats_type, &buf_event);
+	if (comp_stats_type_mask) {
+		ISP_DBG("%s: composite stats event frame id: 0x%x mask: 0x%x\n",
+			__func__, buf_event.frame_id, comp_stats_type_mask);
+		stats_event->stats_mask = comp_stats_type_mask;
+		msm_isp_send_event(vfe_dev,
+			ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
 	}
 }
 
@@ -140,36 +156,19 @@
 		return rc;
 	}
 
-	if (stats_data->stats_pipeline_policy != STATS_COMP_ALL) {
-		if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
-			pr_err("%s: Invalid framedrop pattern\n", __func__);
-			return rc;
-		}
+	if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid framedrop pattern\n", __func__);
+		return rc;
+	}
 
-		if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
-			pr_err("%s: Invalid irq subsample pattern\n", __func__);
-			return rc;
-		}
-	} else {
-		if (stats_data->comp_framedrop_pattern >= MAX_SKIP) {
-			pr_err("%s: Invalid comp framedrop pattern\n",
-				__func__);
-			return rc;
-		}
-
-		if (stats_data->comp_irq_subsample_pattern >= MAX_SKIP) {
-			pr_err("%s: Invalid comp irq subsample pattern\n",
-				__func__);
-			return rc;
-		}
-		stream_req_cmd->framedrop_pattern =
-			vfe_dev->stats_data.comp_framedrop_pattern;
-		stream_req_cmd->irq_subsample_pattern =
-			vfe_dev->stats_data.comp_irq_subsample_pattern;
+	if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid irq subsample pattern\n", __func__);
+		return rc;
 	}
 
 	stream_info->session_id = stream_req_cmd->session_id;
 	stream_info->stream_id = stream_req_cmd->stream_id;
+	stream_info->composite_flag = stream_req_cmd->composite_flag;
 	stream_info->stats_type = stream_req_cmd->stats_type;
 	stream_info->buffer_offset = stream_req_cmd->buffer_offset;
 	stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
@@ -193,6 +192,7 @@
 	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
 	struct msm_vfe_stats_stream *stream_info = NULL;
 	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	uint32_t framedrop_period;
 	uint32_t stats_idx;
 
 	rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
@@ -204,31 +204,12 @@
 	stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
 	stream_info = &stats_data->stream_info[stats_idx];
 
-	switch (stream_info->framedrop_pattern) {
-	case NO_SKIP:
-		stream_info->framedrop_pattern = VFE_NO_DROP;
-		break;
-	case EVERY_2FRAME:
-		stream_info->framedrop_pattern = VFE_DROP_EVERY_2FRAME;
-		break;
-	case EVERY_4FRAME:
-		stream_info->framedrop_pattern = VFE_DROP_EVERY_4FRAME;
-		break;
-	case EVERY_8FRAME:
-		stream_info->framedrop_pattern = VFE_DROP_EVERY_8FRAME;
-		break;
-	case EVERY_16FRAME:
-		stream_info->framedrop_pattern = VFE_DROP_EVERY_16FRAME;
-		break;
-	case EVERY_32FRAME:
-		stream_info->framedrop_pattern = VFE_DROP_EVERY_32FRAME;
-		break;
-	default:
-		stream_info->framedrop_pattern = VFE_NO_DROP;
-		break;
-	}
+	framedrop_period = msm_isp_get_framedrop_period(
+	   stream_req_cmd->framedrop_pattern);
+	stream_info->framedrop_pattern = 0x1;
+	stream_info->framedrop_period = framedrop_period - 1;
 
-	if (stats_data->stats_pipeline_policy == STATS_COMP_NONE)
+	if (!stream_info->composite_flag)
 		vfe_dev->hw_info->vfe_ops.stats_ops.
 			cfg_wm_irq_mask(vfe_dev, stream_info);
 
@@ -257,7 +238,7 @@
 		rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
 	}
 
-	if (stats_data->stats_pipeline_policy == STATS_COMP_NONE)
+	if (!stream_info->composite_flag)
 		vfe_dev->hw_info->vfe_ops.stats_ops.
 			clear_wm_irq_mask(vfe_dev, stream_info);
 
@@ -266,18 +247,145 @@
 	return 0;
 }
 
-int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
+static int msm_isp_init_stats_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream *stream_info)
+{
+	int rc = 0;
+	stream_info->bufq_handle =
+		vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id);
+	if (stream_info->bufq_handle == 0) {
+		pr_err("%s: no buf configured for stream: 0x%x\n",
+			__func__, stream_info->stream_handle);
+		return -EINVAL;
+	}
+
+	rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PING_FLAG, NULL);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for ping\n", __func__);
+		return rc;
+	}
+	rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PONG_FLAG, NULL);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for pong\n", __func__);
+		return rc;
+	}
+	return rc;
+}
+
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t stats_mask = 0, comp_stats_mask = 0;
+	uint32_t enable = 0;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+		if (stats_data->stream_info[i].state == STATS_START_PENDING ||
+				stats_data->stream_info[i].state ==
+					STATS_STOP_PENDING) {
+			stats_mask |= i;
+			enable = stats_data->stream_info[i].state ==
+				STATS_START_PENDING ? 1 : 0;
+			stats_data->stream_info[i].state =
+				stats_data->stream_info[i].state ==
+				STATS_START_PENDING ?
+				STATS_STARTING : STATS_STOPPING;
+			vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+				vfe_dev, BIT(i), enable);
+			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+			   vfe_dev, BIT(i), enable);
+		} else if (stats_data->stream_info[i].state == STATS_STARTING ||
+			stats_data->stream_info[i].state == STATS_STOPPING) {
+			if (stats_data->stream_info[i].composite_flag)
+				comp_stats_mask |= i;
+			if (stats_data->stream_info[i].state == STATS_STARTING)
+				atomic_add(BIT(i),
+					&stats_data->stats_comp_mask);
+			else
+				atomic_sub(BIT(i),
+					&stats_data->stats_comp_mask);
+			stats_data->stream_info[i].state =
+				stats_data->stream_info[i].state ==
+				STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
+		}
+	}
+	atomic_sub(1, &stats_data->stats_update);
+	if (!atomic_read(&stats_data->stats_update))
+		complete(&vfe_dev->stats_config_complete);
+}
+
+static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+{
+	int rc;
+	init_completion(&vfe_dev->stats_config_complete);
+	atomic_set(&vfe_dev->stats_data.stats_update, 2);
+	rc = wait_for_completion_interruptible_timeout(
+		&vfe_dev->stats_config_complete,
+		msecs_to_jiffies(500));
+	if (rc == 0) {
+		pr_err("%s: wait timeout\n", __func__);
+		rc = -1;
+	} else {
+		rc = 0;
+	}
+	return rc;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
 {
 	int i, rc = 0;
-	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+	uint32_t stats_mask = 0, comp_stats_mask = 0, idx;
 	struct msm_vfe_stats_stream *stream_info;
 	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
-	int idx;
-	uint32_t stats_mask = 0;
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+		stream_info = &stats_data->stream_info[idx];
+		if (stream_info->stream_handle !=
+				stream_cfg_cmd->stream_handle[i]) {
+			pr_err("%s: Invalid stream handle: 0x%x received\n",
+				__func__, stream_cfg_cmd->stream_handle[i]);
+			continue;
+		}
+		rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+		if (rc < 0) {
+			pr_err("%s: No buffer for stream%d\n", __func__, idx);
+			return rc;
+		}
 
-	if (stats_data->num_active_stream == 0)
-		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+			stream_info->state = STATS_START_PENDING;
+		else
+			stream_info->state = STATS_ACTIVE;
 
+		stats_data->num_active_stream++;
+		stats_mask |= 1 << idx;
+		if (stream_info->composite_flag)
+			comp_stats_mask |= 1 << idx;
+	}
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+	} else {
+		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+		atomic_add(comp_stats_mask, &stats_data->stats_comp_mask);
+		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+		   vfe_dev, comp_stats_mask, 1);
+	}
+	return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i, rc = 0;
+	uint32_t stats_mask = 0, comp_stats_mask = 0, idx;
+	struct msm_vfe_stats_stream *stream_info;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
 	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
 		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
 		stream_info = &stats_data->stream_info[idx];
@@ -288,71 +396,39 @@
 			continue;
 		}
 
-		if (stream_cfg_cmd->enable) {
-			stream_info->bufq_handle =
-				vfe_dev->buf_mgr->ops->get_bufq_handle(
-				vfe_dev->buf_mgr, stream_info->session_id,
-				stream_info->stream_id);
-				if (stream_info->bufq_handle == 0) {
-					pr_err("%s: no buf configured for stream: 0x%x\n",
-						__func__,
-						stream_info->stream_handle);
-					return -EINVAL;
-				}
-
-			msm_isp_stats_cfg_ping_pong_address(vfe_dev,
-				stream_info, VFE_PING_FLAG, NULL);
-			msm_isp_stats_cfg_ping_pong_address(vfe_dev,
-				stream_info, VFE_PONG_FLAG, NULL);
-			stream_info->state = STATS_START_PENDING;
-			stats_data->num_active_stream++;
-		} else {
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
 			stream_info->state = STATS_STOP_PENDING;
-			stats_data->num_active_stream--;
-		}
+		else
+			stream_info->state = STATS_INACTIVE;
+
+		stats_data->num_active_stream--;
 		stats_mask |= 1 << idx;
+		if (stream_info->composite_flag)
+			comp_stats_mask |= 1 << idx;
 	}
-	vfe_dev->hw_info->vfe_ops.stats_ops.
-		enable_module(vfe_dev, stats_mask, stream_cfg_cmd->enable);
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+	} else {
+		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+		atomic_sub(comp_stats_mask, &stats_data->stats_comp_mask);
+		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+		   vfe_dev, comp_stats_mask, 0);
+	}
 	return rc;
 }
 
-int msm_isp_cfg_stats_comp_policy(struct vfe_device *vfe_dev, void *arg)
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
 {
-	int rc = -1;
-	struct msm_vfe_stats_comp_policy_cfg *policy_cfg_cmd = arg;
-	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	int rc = 0;
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+	if (vfe_dev->stats_data.num_active_stream == 0)
+		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
 
-	if (stats_data->num_active_stream != 0) {
-		pr_err("%s: Cannot update policy when there are active streams\n",
-			   __func__);
-		return rc;
-	}
+	if (stream_cfg_cmd->enable)
+		rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd);
+	else
+		rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd);
 
-	if (policy_cfg_cmd->stats_pipeline_policy >= MAX_STATS_POLICY) {
-		pr_err("%s: Invalid stats composite policy\n", __func__);
-		return rc;
-	}
-
-	if (policy_cfg_cmd->comp_framedrop_pattern >= MAX_SKIP) {
-		pr_err("%s: Invalid comp framedrop pattern\n", __func__);
-		return rc;
-	}
-
-	if (policy_cfg_cmd->comp_irq_subsample_pattern >= MAX_SKIP) {
-		pr_err("%s: Invalid comp irq subsample pattern\n", __func__);
-		return rc;
-	}
-
-	stats_data->stats_pipeline_policy =
-		policy_cfg_cmd->stats_pipeline_policy;
-	stats_data->comp_framedrop_pattern =
-		policy_cfg_cmd->comp_framedrop_pattern;
-	stats_data->comp_irq_subsample_pattern =
-		policy_cfg_cmd->comp_irq_subsample_pattern;
-
-	vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(vfe_dev);
-
-	return 0;
+	return rc;
 }
-
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index 13e1fd6..7b4c4b4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -18,8 +18,8 @@
 void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
 	uint32_t irq_status0, uint32_t irq_status1,
 	struct msm_isp_timestamp *ts);
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
 int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
 int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
 int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
-int msm_isp_cfg_stats_comp_policy(struct vfe_device *vfe_dev, void *arg);
 #endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 2b047ad..c981901 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -12,6 +12,7 @@
 #include <linux/mutex.h>
 #include <linux/io.h>
 #include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
 
 #include "msm.h"
 #include "msm_isp_util.h"
@@ -154,6 +155,31 @@
 	mutex_unlock(&bandwidth_mgr_mutex);
 }
 
+uint32_t msm_isp_get_framedrop_period(
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern)
+{
+	switch (frame_skip_pattern) {
+	case NO_SKIP:
+	case EVERY_2FRAME:
+	case EVERY_3FRAME:
+	case EVERY_4FRAME:
+	case EVERY_5FRAME:
+	case EVERY_6FRAME:
+	case EVERY_7FRAME:
+	case EVERY_8FRAME:
+		return frame_skip_pattern + 1;
+	case EVERY_16FRAME:
+		return 16;
+		break;
+	case EVERY_32FRAME:
+		return 32;
+		break;
+	default:
+		return 1;
+	}
+	return 1;
+}
+
 static inline void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp)
 {
 	struct timespec ts;
@@ -355,11 +381,6 @@
 		rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
 		mutex_unlock(&vfe_dev->core_mutex);
 		break;
-	case VIDIOC_MSM_ISP_CFG_STATS_COMP_POLICY:
-		mutex_lock(&vfe_dev->core_mutex);
-		rc = msm_isp_cfg_stats_comp_policy(vfe_dev, arg);
-		mutex_unlock(&vfe_dev->core_mutex);
-		break;
 	case VIDIOC_MSM_ISP_UPDATE_STREAM:
 		mutex_lock(&vfe_dev->core_mutex);
 		rc = msm_isp_update_axi_stream(vfe_dev, arg);
@@ -686,6 +707,11 @@
 {
 	int i;
 	struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+	static DEFINE_RATELIMIT_STATE(rs,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+	static DEFINE_RATELIMIT_STATE(rs_stats,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
 	if (error_info->error_count == 1 ||
 		!(error_info->info_dump_frame_count % 100)) {
 		vfe_dev->hw_info->vfe_ops.core_ops.
@@ -695,7 +721,8 @@
 		error_info->camif_status = 0;
 		error_info->violation_status = 0;
 		for (i = 0; i < MAX_NUM_STREAM; i++) {
-			if (error_info->stream_framedrop_count[i] != 0) {
+			if (error_info->stream_framedrop_count[i] != 0 &&
+				__ratelimit(&rs)) {
 				pr_err("%s: Stream[%d]: dropped %d frames\n",
 					__func__, i,
 					error_info->stream_framedrop_count[i]);
@@ -703,7 +730,8 @@
 			}
 		}
 		for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
-			if (error_info->stats_framedrop_count[i] != 0) {
+			if (error_info->stats_framedrop_count[i] != 0 &&
+				__ratelimit(&rs_stats)) {
 				pr_err("%s: Stats stream[%d]: dropped %d frames\n",
 					__func__, i,
 					error_info->stats_framedrop_count[i]);
@@ -750,7 +778,8 @@
 	spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
 	queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
 	if (queue_cmd->cmd_used) {
-		pr_err("%s: Tasklet queue overflow\n", __func__);
+		pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
+			__func__, vfe_dev->pdev->id);
 		list_del(&queue_cmd->list);
 	} else {
 		atomic_add(1, &vfe_dev->irq_cnt);
@@ -818,7 +847,6 @@
 
 int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
-	uint32_t i;
 	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
 	long rc;
 	ISP_DBG("%s\n", __func__);
@@ -851,9 +879,6 @@
 
 	vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
 
-	for (i = 0; i < vfe_dev->hw_info->num_iommu_ctx; i++)
-		vfe_dev->buf_mgr->ops->attach_ctx(vfe_dev->buf_mgr,
-			vfe_dev->iommu_ctx[i]);
 	vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr, "msm_isp", 28);
 
 	memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
@@ -870,7 +895,6 @@
 
 int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
-	int i;
 	long rc;
 	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
 	ISP_DBG("%s\n", __func__);
@@ -888,11 +912,6 @@
 		pr_err("%s: halt timeout\n", __func__);
 
 	vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
-
-	for (i = vfe_dev->hw_info->num_iommu_ctx - 1; i >= 0; i--)
-		vfe_dev->buf_mgr->ops->detach_ctx(vfe_dev->buf_mgr,
-			vfe_dev->iommu_ctx[i]);
-
 	vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
 
 	vfe_dev->vfe_open_cnt--;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
index 7934f26..34b9859 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
@@ -43,6 +43,9 @@
 	struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
 };
 
+uint32_t msm_isp_get_framedrop_period(
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern);
+
 int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client);
 int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
 	uint64_t ab, uint64_t ib);
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 90cc61c..962c079 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -175,9 +175,9 @@
 		data |= (csid << 20);
 		break;
 	}
-	if (data)
-		msm_camera_io_w_mb(data, ispif->base +
-			ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+
+	msm_camera_io_w_mb(data, ispif->base +
+		ISPIF_VFE_m_INPUT_SEL(vfe_intf));
 }
 
 static void msm_ispif_enable_crop(struct ispif_device *ispif,
@@ -294,6 +294,58 @@
 	return rc;
 }
 
+static void msm_ispif_select_clk_mux(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+	uint32_t data = 0;
+
+	switch (intftype) {
+	case PIX0:
+		data = msm_camera_io_r(ispif->clk_mux_base);
+		data &= ~(0xf << (vfe_intf * 8));
+		data |= (csid << (vfe_intf * 8));
+		msm_camera_io_w(data, ispif->clk_mux_base);
+		break;
+
+	case RDI0:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (vfe_intf * 12));
+		data |= (csid << (vfe_intf * 12));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+
+	case PIX1:
+		data = msm_camera_io_r(ispif->clk_mux_base);
+		data &= ~(0xf0 << (vfe_intf * 8));
+		data |= (csid << (4 + (vfe_intf * 8)));
+		msm_camera_io_w(data, ispif->clk_mux_base);
+		break;
+
+	case RDI1:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (4 + (vfe_intf * 12)));
+		data |= (csid << (4 + (vfe_intf * 12)));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+
+	case RDI2:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (8 + (vfe_intf * 12)));
+		data |= (csid << (8 + (vfe_intf * 12)));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+	}
+	CDBG("%s intftype %d data %x\n", __func__, intftype, data);
+	mb();
+	return;
+}
+
 static uint16_t msm_ispif_get_cids_mask_from_cfg(
 	struct msm_ispif_params_entry *entry)
 {
@@ -358,6 +410,10 @@
 			return -EINVAL;
 		}
 
+		if (ispif->csid_version >= CSID_VERSION_V3)
+				msm_ispif_select_clk_mux(ispif, intftype,
+				params->entries[i].csid, vfe_intf);
+
 		rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
 		if (rc) {
 			pr_err("%s:validate_intf_status failed, rc = %d\n",
@@ -716,6 +772,22 @@
 
 	ispif->csid_version = csid_version;
 
+	if (ispif->csid_version >= CSID_VERSION_V3) {
+		if (!ispif->clk_mux_mem || !ispif->clk_mux_io) {
+			pr_err("%s csi clk mux mem %p io %p\n", __func__,
+				ispif->clk_mux_mem, ispif->clk_mux_io);
+			rc = -ENOMEM;
+			return rc;
+		}
+		ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start,
+			resource_size(ispif->clk_mux_mem));
+		if (!ispif->clk_mux_base) {
+			pr_err("%s: clk_mux_mem ioremap failed\n", __func__);
+			rc = -ENOMEM;
+			return rc;
+		}
+	}
+
 	ispif->base = ioremap(ispif->mem->start,
 		resource_size(ispif->mem));
 	if (!ispif->base) {
@@ -771,6 +843,8 @@
 
 	iounmap(ispif->base);
 
+	iounmap(ispif->clk_mux_base);
+
 	ispif->ispif_state = ISPIF_POWER_DOWN;
 }
 
@@ -942,6 +1016,16 @@
 		rc = -EBUSY;
 		goto error;
 	}
+	ispif->clk_mux_mem = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "csi_clk_mux");
+	if (ispif->clk_mux_mem) {
+		ispif->clk_mux_io = request_mem_region(
+			ispif->clk_mux_mem->start,
+			resource_size(ispif->clk_mux_mem),
+			ispif->clk_mux_mem->name);
+		if (!ispif->clk_mux_io)
+			pr_err("%s: no valid csi_mux region\n", __func__);
+	}
 
 	ispif->pdev = pdev;
 	ispif->ispif_state = ISPIF_POWER_DOWN;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index fae7a38..faa32aa 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -42,9 +42,12 @@
 	struct platform_device *pdev;
 	struct msm_sd_subdev msm_sd;
 	struct resource *mem;
+	struct resource *clk_mux_mem;
 	struct resource *irq;
 	struct resource *io;
+	struct resource *clk_mux_io;
 	void __iomem *base;
+	void __iomem *clk_mux_base;
 	struct mutex mutex;
 	uint8_t start_ack_pending;
 	uint32_t csid_version;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
index 9f8b2fa..6396486 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
@@ -50,6 +50,8 @@
 
 
 
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR              0x8
 
 /*ISPIF RESET BITS*/
 #define VFE_CLK_DOMAIN_RST                 BIT(31)
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
index 5e61a4d..c805c3d 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
@@ -46,6 +46,9 @@
 #define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n)      (0x2D0 + ISPIF_VFE(m) + 4*(n))
 #define ISPIF_VFE_m_3D_DESKEW_SIZE(m)            (0x2E4 + ISPIF_VFE(m))
 
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR               0x8
+
 /*ISPIF RESET BITS*/
 #define VFE_CLK_DOMAIN_RST                       BIT(31)
 #define PIX_1_CLK_DOMAIN_RST                     BIT(30)
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index fbd4a2e..33eaa69 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -185,49 +185,15 @@
 	{"csi_pclk", -1},
 };
 
-static struct msm_cam_clk_info csid0_8974_clk_info[] = {
+static struct msm_cam_clk_info csid_8974_clk_info[] = {
 	{"camss_top_ahb_clk", -1},
 	{"ispif_ahb_clk", -1},
-	{"csi0_ahb_clk", -1},
-	{"csi0_src_clk", 200000000},
-	{"csi0_clk", -1},
-	{"csi0_phy_clk", -1},
-	{"csi0_pix_clk", -1},
-	{"csi0_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_info csid1_8974_clk_info[] = {
-	{"csi1_ahb_clk", -1},
-	{"csi1_src_clk", 200000000},
-	{"csi1_clk", -1},
-	{"csi1_phy_clk", -1},
-	{"csi1_pix_clk", -1},
-	{"csi1_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_info csid2_8974_clk_info[] = {
-	{"csi2_ahb_clk", -1},
-	{"csi2_src_clk", 200000000},
-	{"csi2_clk", -1},
-	{"csi2_phy_clk", -1},
-	{"csi2_pix_clk", -1},
-	{"csi2_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_info csid3_8974_clk_info[] = {
-	{"csi3_ahb_clk", -1},
-	{"csi3_src_clk", 200000000},
-	{"csi3_clk", -1},
-	{"csi3_phy_clk", -1},
-	{"csi3_pix_clk", -1},
-	{"csi3_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_setting csid_8974_clk_info[] = {
-	{&csid0_8974_clk_info[0], ARRAY_SIZE(csid0_8974_clk_info)},
-	{&csid1_8974_clk_info[0], ARRAY_SIZE(csid1_8974_clk_info)},
-	{&csid2_8974_clk_info[0], ARRAY_SIZE(csid2_8974_clk_info)},
-	{&csid3_8974_clk_info[0], ARRAY_SIZE(csid3_8974_clk_info)},
+	{"csi_ahb_clk", -1},
+	{"csi_src_clk", 200000000},
+	{"csi_clk", -1},
+	{"csi_phy_clk", -1},
+	{"csi_pix_clk", -1},
+	{"csi_rdi_clk", -1},
 };
 
 static struct camera_vreg_t csid_8960_vreg_info[] = {
@@ -241,7 +207,6 @@
 static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
 {
 	int rc = 0;
-	uint8_t core_id = 0;
 
 	if (!csid_version) {
 		pr_err("%s:%d csid_version NULL\n", __func__, __LINE__);
@@ -306,26 +271,14 @@
 		}
 
 		rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
-			csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
-			csid_8974_clk_info[0].num_clk_info, 1);
+			csid_8974_clk_info, csid_dev->csid_clk,
+			ARRAY_SIZE(csid_8974_clk_info), 1);
 		if (rc < 0) {
 			pr_err("%s: clock enable failed\n", __func__);
-			goto csid0_clk_enable_failed;
-		}
-		core_id = csid_dev->pdev->id;
-		if (core_id) {
-			rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
-				csid_8974_clk_info[core_id].clk_info,
-				csid_dev->csid_clk,
-				csid_8974_clk_info[core_id].num_clk_info, 1);
-			if (rc < 0) {
-				pr_err("%s: clock enable failed\n",
-					__func__);
-				goto clk_enable_failed;
-			}
+			goto clk_enable_failed;
 		}
 	}
-
+		CDBG("%s:%d called\n", __func__, __LINE__);
 	csid_dev->hw_version =
 		msm_camera_io_r(csid_dev->base + CSID_HW_VERSION_ADDR);
 	CDBG("%s:%d called csid_dev->hw_version %x\n", __func__, __LINE__,
@@ -341,12 +294,6 @@
 	return rc;
 
 clk_enable_failed:
-	if (CSID_VERSION >= CSID_VERSION_V3) {
-		msm_cam_clk_enable(&csid_dev->pdev->dev,
-			csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
-			csid_8974_clk_info[0].num_clk_info, 0);
-	}
-csid0_clk_enable_failed:
 	if (CSID_VERSION <= CSID_VERSION_V2) {
 		msm_camera_enable_vreg(&csid_dev->pdev->dev,
 			csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
@@ -375,7 +322,6 @@
 static int msm_csid_release(struct csid_device *csid_dev)
 {
 	uint32_t irq;
-	uint8_t core_id = 0;
 
 	if (csid_dev->csid_state != CSID_POWER_UP) {
 		pr_err("%s: csid invalid state %d\n", __func__,
@@ -401,16 +347,8 @@
 			csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
 			NULL, 0, &csid_dev->csi_vdd, 0);
 	} else if (csid_dev->hw_version >= CSID_VERSION_V3) {
-		core_id = csid_dev->pdev->id;
-		if (core_id)
-			msm_cam_clk_enable(&csid_dev->pdev->dev,
-				csid_8974_clk_info[core_id].clk_info,
-				csid_dev->csid_clk,
-				csid_8974_clk_info[core_id].num_clk_info, 0);
-
-		msm_cam_clk_enable(&csid_dev->pdev->dev,
-			csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
-			csid_8974_clk_info[0].num_clk_info, 0);
+		msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8974_clk_info,
+			csid_dev->csid_clk, ARRAY_SIZE(csid_8974_clk_info), 0);
 
 		msm_camera_enable_vreg(&csid_dev->pdev->dev,
 			csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
index 7ae1392..fd4db79 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
@@ -38,7 +38,6 @@
 	uint32_t hw_version;
 	enum msm_csid_state_t csid_state;
 
-	struct clk *csid0_clk[11];
 	struct clk *csid_clk[11];
 };
 
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index df3ee60..7d3a1fc 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -43,14 +43,15 @@
 	uint8_t lane_cnt = 0;
 	uint16_t lane_mask = 0;
 	void __iomem *csiphybase;
+	uint8_t csiphy_id = csiphy_dev->pdev->id;
 	csiphybase = csiphy_dev->base;
 	if (!csiphybase) {
 		pr_err("%s: csiphybase NULL\n", __func__);
 		return -EINVAL;
 	}
 
-	csiphy_dev->lane_mask[csiphy_dev->pdev->id] |= csiphy_params->lane_mask;
-	lane_mask = csiphy_dev->lane_mask[csiphy_dev->pdev->id];
+	csiphy_dev->lane_mask[csiphy_id] |= csiphy_params->lane_mask;
+	lane_mask = csiphy_dev->lane_mask[csiphy_id];
 	lane_cnt = csiphy_params->lane_cnt;
 	if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
 		pr_err("%s: unsupported lane cnt %d\n",
@@ -58,11 +59,28 @@
 		return rc;
 	}
 
-	CDBG("%s csiphy_params, mask = %x, cnt = %d, settle cnt = %x\n",
+	CDBG("%s csiphy_params, mask = %x cnt = %d settle cnt = %x csid %d\n",
 		__func__,
 		csiphy_params->lane_mask,
 		csiphy_params->lane_cnt,
-		csiphy_params->settle_cnt);
+		csiphy_params->settle_cnt,
+		csiphy_params->csid_core);
+
+	if (csiphy_dev->hw_version >= CSIPHY_VERSION_V3) {
+		val = msm_camera_io_r(csiphy_dev->clk_mux_base);
+		if (csiphy_params->combo_mode &&
+			(csiphy_params->lane_mask & 0x18)) {
+			val &= ~0xf0;
+			val |= csiphy_params->csid_core << 4;
+		} else {
+			val &= ~0xf;
+			val |= csiphy_params->csid_core;
+		}
+		msm_camera_io_w(val, csiphy_dev->clk_mux_base);
+		CDBG("%s clk mux addr %p val 0x%x\n", __func__,
+			csiphy_dev->clk_mux_base, val);
+		mb();
+	}
 	msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR);
 	msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR);
 
@@ -204,6 +222,22 @@
 			csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
 			ARRAY_SIZE(csiphy_8960_clk_info), 1);
 	} else {
+		if (!csiphy_dev->clk_mux_mem || !csiphy_dev->clk_mux_io) {
+			pr_err("%s clk mux mem %p io %p\n", __func__,
+				csiphy_dev->clk_mux_mem,
+				csiphy_dev->clk_mux_io);
+			rc = -ENOMEM;
+			return rc;
+		}
+		csiphy_dev->clk_mux_base = ioremap(
+			csiphy_dev->clk_mux_mem->start,
+			resource_size(csiphy_dev->clk_mux_mem));
+		if (!csiphy_dev->clk_mux_base) {
+			pr_err("%s: ERROR %d\n", __func__, __LINE__);
+			rc = -ENOMEM;
+			return rc;
+		}
+
 		CDBG("%s:%d called\n", __func__, __LINE__);
 		rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
 			csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
@@ -269,12 +303,31 @@
 	}
 	CDBG("%s:%d called\n", __func__, __LINE__);
 
+
 	if (CSIPHY_VERSION < CSIPHY_VERSION_V3) {
 		CDBG("%s:%d called\n", __func__, __LINE__);
 		rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
 			csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
 			ARRAY_SIZE(csiphy_8960_clk_info), 1);
 	} else {
+
+		if (!csiphy_dev->clk_mux_mem || !csiphy_dev->clk_mux_io) {
+			pr_err("%s clk mux mem %p io %p\n", __func__,
+				csiphy_dev->clk_mux_mem,
+				csiphy_dev->clk_mux_io);
+			rc = -ENOMEM;
+			return rc;
+		}
+
+		csiphy_dev->clk_mux_base = ioremap(
+			csiphy_dev->clk_mux_mem->start,
+			resource_size(csiphy_dev->clk_mux_mem));
+		if (!csiphy_dev->clk_mux_base) {
+			pr_err("%s: ERROR %d\n", __func__, __LINE__);
+			rc = -ENOMEM;
+			return rc;
+		}
+
 		CDBG("%s:%d called\n", __func__, __LINE__);
 		rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
 			csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
@@ -359,14 +412,16 @@
 
 	disable_irq(csiphy_dev->irq->start);
 
-	if (CSIPHY_VERSION < CSIPHY_VERSION_V3)
+	if (CSIPHY_VERSION < CSIPHY_VERSION_V3) {
 		msm_cam_clk_enable(&csiphy_dev->pdev->dev,
 			csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
 			ARRAY_SIZE(csiphy_8960_clk_info), 0);
-	else
+	} else {
 		msm_cam_clk_enable(&csiphy_dev->pdev->dev,
 			csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
 			ARRAY_SIZE(csiphy_8974_clk_info), 0);
+			iounmap(csiphy_dev->clk_mux_base);
+	}
 
 	iounmap(csiphy_dev->base);
 	csiphy_dev->base = NULL;
@@ -426,20 +481,23 @@
 	msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_LNCK_CFG2_ADDR);
 	msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
 
-	if (CSIPHY_VERSION < CSIPHY_VERSION_V3)
+	if (CSIPHY_VERSION < CSIPHY_VERSION_V3) {
 		msm_cam_clk_enable(&csiphy_dev->pdev->dev,
 			csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
 			ARRAY_SIZE(csiphy_8960_clk_info), 0);
-	else
+	} else {
 		msm_cam_clk_enable(&csiphy_dev->pdev->dev,
 			csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
 			ARRAY_SIZE(csiphy_8974_clk_info), 0);
+			iounmap(csiphy_dev->clk_mux_base);
+	}
 
 	iounmap(csiphy_dev->base);
 	csiphy_dev->base = NULL;
 	csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
 	return 0;
 }
+
 #endif
 
 static long msm_csiphy_cmd(struct csiphy_device *csiphy_dev, void *arg)
@@ -588,6 +646,17 @@
 	}
 	disable_irq(new_csiphy_dev->irq->start);
 
+	new_csiphy_dev->clk_mux_mem = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "csiphy_clk_mux");
+	if (new_csiphy_dev->clk_mux_mem) {
+		new_csiphy_dev->clk_mux_io = request_mem_region(
+			new_csiphy_dev->clk_mux_mem->start,
+			resource_size(new_csiphy_dev->clk_mux_mem),
+			new_csiphy_dev->clk_mux_mem->name);
+		if (!new_csiphy_dev->clk_mux_io)
+			pr_err("%s: ERROR %d\n", __func__, __LINE__);
+	}
+
 	new_csiphy_dev->pdev = pdev;
 	new_csiphy_dev->msm_sd.sd.internal_ops = &msm_csiphy_internal_ops;
 	new_csiphy_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index e19be34..a11b958 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -32,9 +32,12 @@
 	struct msm_sd_subdev msm_sd;
 	struct v4l2_subdev subdev;
 	struct resource *mem;
+	struct resource *clk_mux_mem;
 	struct resource *irq;
 	struct resource *io;
+	struct resource *clk_mux_io;
 	void __iomem *base;
+	void __iomem *clk_mux_base;
 	struct mutex mutex;
 	uint32_t hw_version;
 	enum msm_csiphy_state_t csiphy_state;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 14d1197..b56378a 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -25,6 +25,9 @@
 /* Length of mandatory fields that must exist in header of video PES */
 #define PES_MANDATORY_FIELDS_LEN			9
 
+/* Index of first byte in TS packet holding STC */
+#define STC_LOCATION_IDX			188
+
 #define MAX_PES_LENGTH	(SZ_64K)
 
 #define MAX_TS_PACKETS_FOR_SDMX_PROCESS	(500)
@@ -2202,6 +2205,14 @@
 	size_t len = 0;
 	struct dmx_pts_dts_info *pts_dts;
 
+	if (meta_data->packet_type == DMX_PES_PACKET) {
+		pts_dts = &meta_data->info.pes.pts_dts_info;
+		data->buf.stc = meta_data->info.pes.stc;
+	} else {
+		pts_dts = &meta_data->info.framing.pts_dts_info;
+		data->buf.stc = meta_data->info.framing.stc;
+	}
+
 	pts_dts = meta_data->packet_type == DMX_PES_PACKET ?
 		&meta_data->info.pes.pts_dts_info :
 		&meta_data->info.framing.pts_dts_info;
@@ -2313,6 +2324,7 @@
 		packet.raw_data_offset = feed_data->frame_offset;
 		meta_data.info.framing.pattern_type =
 			feed_data->last_framing_match_type;
+		meta_data.info.framing.stc = feed_data->last_framing_match_stc;
 
 		mpq_streambuffer_get_buffer_handle(stream_buffer,
 			0, /* current write buffer handle */
@@ -2385,6 +2397,7 @@
 		mpq_dmx_save_pts_dts(feed_data);
 
 		meta_data.packet_type = DMX_PES_PACKET;
+		meta_data.info.pes.stc = feed_data->prev_stc;
 
 		mpq_dmx_update_decoder_stat(mpq_demux);
 
@@ -2411,7 +2424,8 @@
 
 static int mpq_dmx_process_video_packet_framing(
 			struct dvb_demux_feed *feed,
-			const u8 *buf)
+			const u8 *buf,
+			u64 curr_stc)
 {
 	int bytes_avail;
 	u32 ts_payload_offset;
@@ -2596,7 +2610,8 @@
 	 * pass data to decoder only after sequence header
 	 * or equivalent is found. Otherwise the data is dropped.
 	 */
-	if (!(feed_data->found_sequence_header_pattern)) {
+	if (!feed_data->found_sequence_header_pattern) {
+		feed_data->prev_stc = curr_stc;
 		spin_unlock(&feed_data->video_buffer_lock);
 		return 0;
 	}
@@ -2699,6 +2714,12 @@
 					framing_res.info[i].type;
 				feed_data->last_pattern_offset =
 					framing_res.info[i].offset;
+				if (framing_res.info[i].used_prefix_size)
+					feed_data->last_framing_match_stc =
+						feed_data->prev_stc;
+				else
+					feed_data->last_framing_match_stc =
+						curr_stc;
 				continue;
 			}
 			/*
@@ -2744,6 +2765,8 @@
 			packet.raw_data_offset = feed_data->frame_offset;
 			meta_data.info.framing.pattern_type =
 				feed_data->last_framing_match_type;
+			meta_data.info.framing.stc =
+				feed_data->last_framing_match_stc;
 
 			mpq_streambuffer_get_buffer_handle(
 				stream_buffer,
@@ -2784,8 +2807,13 @@
 			framing_res.info[i].type;
 		feed_data->last_pattern_offset =
 			framing_res.info[i].offset;
+		if (framing_res.info[i].used_prefix_size)
+			feed_data->last_framing_match_stc = feed_data->prev_stc;
+		else
+			feed_data->last_framing_match_stc = curr_stc;
 	}
 
+	feed_data->prev_stc = curr_stc;
 	feed_data->first_prefix_size = 0;
 
 	if (pending_data_len) {
@@ -2810,7 +2838,8 @@
 
 static int mpq_dmx_process_video_packet_no_framing(
 			struct dvb_demux_feed *feed,
-			const u8 *buf)
+			const u8 *buf,
+			u64 curr_stc)
 {
 	int bytes_avail;
 	u32 ts_payload_offset;
@@ -2886,6 +2915,7 @@
 				mpq_dmx_save_pts_dts(feed_data);
 
 				meta_data.packet_type = DMX_PES_PACKET;
+				meta_data.info.pes.stc = feed_data->prev_stc;
 
 				mpq_dmx_update_decoder_stat(mpq_demux);
 
@@ -2928,6 +2958,8 @@
 		} else {
 			feed->pusi_seen = 1;
 		}
+
+		feed_data->prev_stc = curr_stc;
 	}
 
 	/*
@@ -3077,10 +3109,25 @@
 			struct dvb_demux_feed *feed,
 			const u8 *buf)
 {
+	u64 curr_stc;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+		(mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+		curr_stc = 0;
+	} else {
+		curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+		curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+		curr_stc += buf[STC_LOCATION_IDX];
+		curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+	}
+
 	if (mpq_dmx_info.decoder_framing)
-		return mpq_dmx_process_video_packet_no_framing(feed, buf);
+		return mpq_dmx_process_video_packet_no_framing(feed, buf,
+				curr_stc);
 	else
-		return mpq_dmx_process_video_packet_framing(feed, buf);
+		return mpq_dmx_process_video_packet_framing(feed, buf,
+				curr_stc);
 }
 EXPORT_SYMBOL(mpq_dmx_process_video_packet);
 
@@ -3151,9 +3198,9 @@
 		(mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
 		stc = 0;
 	} else {
-		stc = buf[190] << 16;
-		stc += buf[189] << 8;
-		stc += buf[188];
+		stc = buf[STC_LOCATION_IDX + 2] << 16;
+		stc += buf[STC_LOCATION_IDX + 1] << 8;
+		stc += buf[STC_LOCATION_IDX];
 		stc *= 256; /* convert from 105.47 KHZ to 27MHz */
 	}
 
@@ -4190,6 +4237,9 @@
 		pes_header = (struct pes_packet_header *)
 			&metadata_buf[pes_header_offset];
 		meta_data.packet_type = DMX_PES_PACKET;
+		/* TODO - set to real STC when SDMX supports it */
+		meta_data.info.pes.stc = 0;
+
 		if (pes_header->pts_dts_flag & 0x2) {
 			meta_data.info.pes.pts_dts_info.pts_exist = 1;
 			meta_data.info.pes.pts_dts_info.pts =
@@ -4544,7 +4594,7 @@
 	}
 
 	MPQ_DVB_DBG_PRINT(
-		"\n\n%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
+		"%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
 		__func__, read_offset, fill_count);
 
 	process_start_time = current_kernel_time();
@@ -4587,14 +4637,19 @@
 int mpq_sdmx_process(struct mpq_demux *mpq_demux,
 	struct sdmx_buff_descr *input,
 	u32 fill_count,
-	u32 read_offset)
+	u32 read_offset,
+	size_t tsp_size)
 {
 	int ret;
 	int todo;
 	int total_bytes_read = 0;
-	int limit = mpq_sdmx_proc_limit * mpq_demux->demux.ts_packet_size;
+	int limit = mpq_sdmx_proc_limit * tsp_size;
 
-	while (fill_count >= mpq_demux->demux.ts_packet_size) {
+	MPQ_DVB_DBG_PRINT(
+		"\n\n%s: read_offset=%u, fill_count=%u, tsp_size=%u\n",
+		__func__, read_offset, fill_count, tsp_size);
+
+	while (fill_count >= tsp_size) {
 		todo = fill_count > limit ? limit : fill_count;
 		ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
 			read_offset);
@@ -4643,7 +4698,8 @@
 	}
 	read_offset = mpq_demux->demux.dmx.dvr_input.ringbuff->pread;
 
-	return mpq_sdmx_process(mpq_demux, &buf_desc, count, read_offset);
+	return mpq_sdmx_process(mpq_demux, &buf_desc, count,
+				read_offset, mpq_demux->demux.ts_packet_size);
 }
 
 int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count)
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
index 80b5428..ca7c15a 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -244,6 +244,8 @@
  * reported for this frame.
  * @last_framing_match_type: Used for saving the type of
  * the previous pattern match found in this video feed.
+ * @last_framing_match_stc: Used for saving the STC attached to TS packet
+ * of the previous pattern match found in this video feed.
  * @found_sequence_header_pattern: Flag used to note that an MPEG-2
  * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
  * (whichever is relevant according to the video standard) had already
@@ -272,6 +274,7 @@
  * buffer space.
  * @last_pkt_index: used to save the last streambuffer packet index reported in
  * a new elementary stream data event.
+ * @prev_stc: STC attached to the previous video TS packet
  */
 struct mpq_video_feed_info {
 	struct mpq_streambuffer *video_buffer;
@@ -289,6 +292,7 @@
 	u32 last_pattern_offset;
 	u32 pending_pattern_len;
 	u64 last_framing_match_type;
+	u64 last_framing_match_stc;
 	int found_sequence_header_pattern;
 	struct dvb_dmx_video_prefix_size_masks prefix_size;
 	u32 first_prefix_size;
@@ -303,6 +307,7 @@
 	u32 ts_packets_num;
 	u32 ts_dropped_bytes;
 	int last_pkt_index;
+	u64 prev_stc;
 };
 
 /**
@@ -716,13 +721,15 @@
  * @input: input buffer descriptor
  * @fill_count: number of data bytes in input buffer that can be read
  * @read_offset: offset in buffer for reading
+ * @tsp_size: size of single TS packet
  *
  * Return number of bytes read or error code
  */
 int mpq_sdmx_process(struct mpq_demux *mpq_demux,
 	struct sdmx_buff_descr *input,
 	u32 fill_count,
-	u32 read_offset);
+	u32 read_offset,
+	size_t tsp_size);
 
 /**
  * mpq_sdmx_loaded - Returns 1 if secure demux application is loaded,
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
index 193141a..43a65e9 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -382,7 +382,8 @@
 			buff_current_addr_phys - buff_start_addr_phys);
 
 		mpq_sdmx_process(mpq_demux, &input, aggregate_len,
-			buff_current_addr_phys - buff_start_addr_phys);
+			buff_current_addr_phys - buff_start_addr_phys,
+			TSPP_RAW_TTS_SIZE);
 	}
 
 	for (i = 0; i < aggregate_count; i++)
diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
index 19abbbe..a2ade18 100644
--- a/drivers/media/platform/msm/dvb/include/mpq_adapter.h
+++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
@@ -64,11 +64,17 @@
 
 	/** PTS/DTS information */
 	struct dmx_pts_dts_info pts_dts_info;
+
+	/** STC value attached to first TS packet holding the pattern */
+	u64 stc;
 };
 
 struct dmx_pes_packet_info {
 	/** PTS/DTS information */
 	struct dmx_pts_dts_info pts_dts_info;
+
+	/** STC value attached to first TS packet holding the PES */
+	u64 stc;
 };
 
 struct dmx_marker_info {
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index ddf271e..7fc8810 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1091,6 +1091,17 @@
 			sizeof(struct hfi_quantization_range);
 		break;
 	}
+	case HAL_PARAM_VENC_MAX_NUM_B_FRAMES:
+	{
+		struct hfi_max_num_b_frames *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
+		hfi = (struct hfi_max_num_b_frames *) &pkt->rg_property_data[1];
+		memcpy(hfi, (struct hfi_max_num_b_frames *) pdata,
+				sizeof(struct hfi_max_num_b_frames));
+		pkt->size += sizeof(u32) + sizeof(struct hfi_max_num_b_frames);
+		break;
+	}
 	case HAL_CONFIG_VENC_INTRA_PERIOD:
 	{
 		struct hfi_intra_period *hfi;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 91fb514..43a3dad 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -174,8 +174,8 @@
 
 	switch (pkt->event_id) {
 	case HFI_EVENT_SYS_ERROR:
-		dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d\n",
-			pkt->event_data1);
+		dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d, 0x%x\n",
+			pkt->event_data1, pkt->event_data2);
 		hfi_process_sys_error(callback, device_id);
 		break;
 	case HFI_EVENT_SESSION_ERROR:
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 464cb05..5f47ae1 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -35,6 +35,7 @@
 #define P_FRAME_QP 28
 #define B_FRAME_QP 30
 #define MAX_INTRA_REFRESH_MBS 300
+#define MAX_NUM_B_FRAMES 4
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
 #define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY
 
@@ -761,7 +762,7 @@
 	struct v4l2_ctrl *ctrl = NULL;
 	u32 extradata = 0;
 	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %p\n", q);
+		dprintk(VIDC_ERR, "Invalid input\n");
 		return -EINVAL;
 	}
 	inst = q->drv_priv;
@@ -1237,11 +1238,24 @@
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES);
-
-		property_id =
-			HAL_CONFIG_VENC_INTRA_PERIOD;
 		intra_period.bframes = ctrl->val;
 		intra_period.pframes = temp_ctrl->val;
+		if (intra_period.bframes) {
+			u32 max_num_b_frames = MAX_NUM_B_FRAMES;
+			property_id =
+				HAL_PARAM_VENC_MAX_NUM_B_FRAMES;
+			pdata = &max_num_b_frames;
+			rc = call_hfi_op(hdev, session_set_property,
+				(void *)inst->session, property_id, pdata);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed : Setprop MAX_NUM_B_FRAMES"
+					"%d", rc);
+				break;
+			}
+		}
+		property_id =
+			HAL_CONFIG_VENC_INTRA_PERIOD;
 		pdata = &intra_period;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index d925de3..d880016 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -22,7 +22,7 @@
 #include "msm_smem.h"
 #include "msm_vidc_debug.h"
 
-#define HW_RESPONSE_TIMEOUT 200
+#define HW_RESPONSE_TIMEOUT 1000
 
 #define IS_ALREADY_IN_STATE(__p, __d) ({\
 	int __rc = (__p >= __d);\
@@ -1667,6 +1667,7 @@
 			core->state == VIDC_CORE_INVALID) {
 		dprintk(VIDC_ERR,
 				"Core is in bad state can't change the state");
+		rc = -EINVAL;
 		goto exit;
 	}
 	flipped_state = get_flipped_state(inst->state, state);
@@ -1900,8 +1901,14 @@
 		dprintk(VIDC_ERR, "%s invalid parameters", __func__);
 		return -EINVAL;
 	}
-	hdev = inst->core->device;
 
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+			inst->core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+				"Core is in bad state can't query get_bufreqs()");
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
 	mutex_lock(&inst->sync_lock);
 	if (inst->state < MSM_VIDC_OPEN_DONE || inst->state >= MSM_VIDC_CLOSE) {
 		dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index ddb3063..ed4f317 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1874,17 +1874,42 @@
 	mutex_unlock(&device->write_lock);
 	return rc;
 }
+static void venus_hfi_process_msg_event_notify(
+	struct venus_hfi_device *device, void *packet)
+{
+	struct hfi_sfr_struct *vsfr = NULL;
+	struct hfi_msg_event_notify_packet *event_pkt;
+	struct vidc_hal_msg_pkt_hdr *msg_hdr;
 
+	msg_hdr = (struct vidc_hal_msg_pkt_hdr *)packet;
+	event_pkt =
+		(struct hfi_msg_event_notify_packet *)msg_hdr;
+	if (event_pkt && event_pkt->event_id ==
+		HFI_EVENT_SYS_ERROR) {
+		vsfr = (struct hfi_sfr_struct *)
+				device->sfr.align_virtual_addr;
+		if (vsfr)
+			dprintk(VIDC_ERR, "SFR Message from FW : %s",
+				vsfr->rg_data);
+	}
+}
 static void venus_hfi_response_handler(struct venus_hfi_device *device)
 {
 	u8 packet[VIDC_IFACEQ_MED_PKT_SIZE];
 	u32 rc = 0;
+	struct hfi_sfr_struct *vsfr = NULL;
 	dprintk(VIDC_INFO, "#####venus_hfi_response_handler#####\n");
 	if (device) {
 		if ((device->intr_status &
 			VIDC_WRAPPER_INTR_CLEAR_A2HWD_BMSK)) {
 			dprintk(VIDC_ERR, "Received: Watchdog timeout %s",
 				__func__);
+			vsfr = (struct hfi_sfr_struct *)
+					device->sfr.align_virtual_addr;
+			if (vsfr)
+				dprintk(VIDC_ERR,
+					"SFR Message from FW : %s",
+						vsfr->rg_data);
 			venus_hfi_process_sys_watchdog_timeout(device);
 		}
 
@@ -1892,6 +1917,9 @@
 			rc = hfi_process_msg_packet(device->callback,
 				device->device_id,
 				(struct vidc_hal_msg_pkt_hdr *) packet);
+			if (rc == HFI_MSG_EVENT_NOTIFY)
+				venus_hfi_process_msg_event_notify(
+					device, (void *)packet);
 		}
 		while (!venus_hfi_iface_dbgq_read(device, packet)) {
 			struct hfi_msg_sys_debug_packet *pkt =
@@ -2752,9 +2780,10 @@
 		return;
 	}
 	if (device->resources.fw.cookie) {
+		flush_workqueue(device->vidc_workq);
 		venus_hfi_disable_clks(device);
-		venus_hfi_iommu_detach(device);
 		subsystem_put(device->resources.fw.cookie);
+		venus_hfi_iommu_detach(device);
 		device->resources.fw.cookie = NULL;
 	}
 }
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 5ba191f..3fbfec4 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -170,6 +170,7 @@
 	HAL_CONFIG_VENC_MAX_BITRATE,
 	HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
 	HAL_PARAM_VENC_H264_GENERATE_AUDNAL,
+	HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
 };
 
 enum hal_domain {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 2d0c3bd..66eade1 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -304,7 +304,8 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
 #define HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG		\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01F)
-
+#define  HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
 #define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
 #define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE				\
@@ -424,6 +425,10 @@
 	u32 idr_period;
 };
 
+struct hfi_max_num_b_frames {
+	u32 max_num_b_frames;
+};
+
 struct hfi_intra_period {
 	u32 pframes;
 	u32 bframes;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 55e3e4e..ac77bfb 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1648,7 +1648,6 @@
 	data->abort = 0;
 	data->type = QSEECOM_CLIENT_APP;
 	data->released = false;
-	data->client.app_id = ret;
 	data->client.sb_length = size;
 	data->client.user_virt_sb_base = 0;
 	data->client.ihandle = NULL;
@@ -1693,7 +1692,7 @@
 		*handle = NULL;
 		return -EINVAL;
 	}
-
+	data->client.app_id = ret;
 	if (ret > 0) {
 		pr_warn("App id %d for [%s] app exists\n", ret,
 			(char *)app_ireq.app_name);
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index dbb4f5e..73cae32 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -1571,6 +1571,7 @@
 	int id;
 	int table_idx;
 	u32 val;
+	unsigned long flags;
 
 	struct sps_connect *config;
 	struct tspp_device *pdev;
@@ -1591,6 +1592,15 @@
 	if (!channel->used)
 		return 0;
 
+	/*
+	 * Need to protect access to used and waiting fields, as they are
+	 * used by the tasklet which is invoked from interrupt context
+	 */
+	spin_lock_irqsave(&pdev->spinlock, flags);
+	channel->used = 0;
+	channel->waiting = NULL;
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
 	if (channel->expiration_period_ms)
 		del_timer(&channel->expiration_timer);
 
@@ -1644,9 +1654,7 @@
 	channel->buffer_count = 0;
 	channel->data = NULL;
 	channel->read = NULL;
-	channel->waiting = NULL;
 	channel->locked = NULL;
-	channel->used = 0;
 
 	if (tspp_channels_in_use(pdev) == 0) {
 		wake_unlock(&pdev->wake_lock);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4e12bb7..147e378 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1127,6 +1127,33 @@
 }
 #endif
 
+static inline unsigned long get_vm_size(struct vm_area_struct *vma)
+{
+	return vma->vm_end - vma->vm_start;
+}
+
+static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
+{
+	return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
+}
+
+/*
+ * Set a new vm offset.
+ *
+ * Verify that the incoming offset really works as a page offset,
+ * and that the offset and size fit in a resource_size_t.
+ */
+static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
+{
+	pgoff_t pgoff = off >> PAGE_SHIFT;
+	if (off != (resource_size_t) pgoff << PAGE_SHIFT)
+		return -EINVAL;
+	if (off + get_vm_size(vma) - 1 < off)
+		return -EINVAL;
+	vma->vm_pgoff = pgoff;
+	return 0;
+}
+
 /*
  * set up a mapping for shared memory segments
  */
@@ -1136,20 +1163,29 @@
 	struct mtd_file_info *mfi = file->private_data;
 	struct mtd_info *mtd = mfi->mtd;
 	struct map_info *map = mtd->priv;
-	unsigned long start;
-	unsigned long off;
-	u32 len;
+	resource_size_t start, off;
+	unsigned long len, vma_len;
 
 	if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
-		off = vma->vm_pgoff << PAGE_SHIFT;
+		off = get_vm_offset(vma);
 		start = map->phys;
 		len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
 		start &= PAGE_MASK;
-		if ((vma->vm_end - vma->vm_start + off) > len)
+		vma_len = get_vm_size(vma);
+
+		/* Overflow in off+len? */
+		if (vma_len + off < off)
+			return -EINVAL;
+		/* Does it fit in the mapping? */
+		if (vma_len + off > len)
 			return -EINVAL;
 
 		off += start;
-		vma->vm_pgoff = off >> PAGE_SHIFT;
+		/* Did that overflow? */
+		if (off < start)
+			return -EINVAL;
+		if (set_vm_offset(vma, off) < 0)
+			return -EINVAL;
 		vma->vm_flags |= VM_IO | VM_RESERVED;
 
 #ifdef pgprot_noncached
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
index 114b23d..f6b8051 100644
--- a/drivers/net/ethernet/msm/ecm_ipa.c
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -22,13 +22,13 @@
 #include <mach/ecm_ipa.h>
 
 #define DRIVER_NAME "ecm_ipa"
-#define DRIVER_VERSION "20-Mar-2013"
 #define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
 #define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6"
 #define IPA_TO_USB_CLIENT	IPA_CLIENT_USB_CONS
 #define INACTIVITY_MSEC_DELAY 100
 #define DEFAULT_OUTSTANDING_HIGH 64
 #define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
 
 #define ECM_IPA_ERROR(fmt, args...) \
 	pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
@@ -57,6 +57,7 @@
  * @tx_file: saved debugfs entry to allow cleanup
  * @rx_file: saved debugfs entry to allow cleanup
  * @rm_file: saved debugfs entry to allow cleanup
+ * @outstanding_file:  saved debugfs entry to allow cleanup
  * @outstanding_high_file saved debugfs entry to allow cleanup
  * @outstanding_low_file saved debugfs entry to allow cleanup
  * @dma_file: saved debugfs entry to allow cleanup
@@ -82,6 +83,7 @@
 	struct dentry *outstanding_high_file;
 	struct dentry *outstanding_low_file;
 	struct dentry *dma_file;
+	struct dentry *outstanding_file;
 	uint32_t eth_ipv4_hdr_hdl;
 	uint32_t eth_ipv6_hdr_hdl;
 	u32 usb_to_ipa_hdl;
@@ -129,8 +131,11 @@
 static int ecm_ipa_debugfs_rx_open(struct inode *inode, struct file *file);
 static int ecm_ipa_debugfs_rm_open(struct inode *inode, struct file *file);
 static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
 static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
 		char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos);
 static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
 		const char __user *buf, size_t count, loff_t *ppos);
 static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
@@ -169,6 +174,11 @@
 	.write = ecm_ipa_debugfs_enable_write_dma,
 };
 
+const struct file_operations ecm_ipa_debugfs_atomic_ops = {
+		.open = ecm_ipa_debugfs_atomic_open,
+		.read = ecm_ipa_debugfs_atomic_read,
+};
+
 /**
  * ecm_ipa_init() - initializes internal data structures
  * @ecm_ipa_rx_dp_notify: supplied callback to be called by the IPA
@@ -195,7 +205,7 @@
 	struct net_device *net;
 	struct ecm_ipa_dev *dev;
 	ECM_IPA_LOG_ENTRY();
-	pr_debug("%s version %s\n", DRIVER_NAME, DRIVER_VERSION);
+	pr_debug("%s initializing\n", DRIVER_NAME);
 	NULL_CHECK(ecm_ipa_rx_dp_notify);
 	NULL_CHECK(ecm_ipa_tx_dp_notify);
 	NULL_CHECK(priv);
@@ -833,6 +843,15 @@
 	return 0;
 }
 
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *dev = inode->i_private;
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &(dev->outstanding_pkts);
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
 static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
 		const char __user *buf, size_t count, loff_t *ppos)
 {
@@ -904,9 +923,22 @@
 	return size;
 }
 
+static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+	atomic_t *atomic_var = file->private_data;
+	nbytes = scnprintf(atomic_str, sizeof(atomic_str), "%d\n",
+			atomic_read(atomic_var));
+	return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+
 static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *dev)
 {
 	const mode_t flags = S_IRUGO | S_IWUGO;
+	const mode_t flags_read_only = S_IRUGO;
 
 	int ret = -EINVAL;
 	ECM_IPA_LOG_ENTRY();
@@ -961,6 +993,14 @@
 		ret = -EFAULT;
 		goto fail_file;
 	}
+	dev->outstanding_file = debugfs_create_file("outstanding",
+			flags_read_only, dev->folder, dev,
+			&ecm_ipa_debugfs_atomic_ops);
+	if (!dev->outstanding_file) {
+		ECM_IPA_ERROR("could not create outstanding file\n");
+		ret = -EFAULT;
+		goto fail_file;
+	}
 
 	ECM_IPA_LOG_EXIT();
 	return 0;
@@ -980,7 +1020,6 @@
 {
 	ECM_IPA_LOG_ENTRY();
 	strlcpy(drv_info->driver, DRIVER_NAME, sizeof(drv_info->driver));
-	strlcpy(drv_info->version, DRIVER_VERSION, sizeof(drv_info->version));
 	ECM_IPA_LOG_EXIT();
 }
 
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index b7eca61..2b6ce75 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_IPA) += ipat.o
 ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
-	ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o teth_bridge.o \
+	ipa_utils.o ipa_nat.o a2_service.o ipa_bridge.o ipa_intf.o teth_bridge.o \
 	ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 4b5f0a2..1b33dc0 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -205,6 +205,7 @@
 	smsm_change_state(SMSM_APPS_STATE,
 				clear_bit & SMSM_A2_POWER_CONTROL_ACK,
 				~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
+	IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_apps_acks);
 	clear_bit = ~clear_bit;
 }
 
@@ -216,10 +217,13 @@
 	if (a2_mux_ctx->bam_dmux_uplink_vote == vote)
 		IPADBG("%s: warning - duplicate power vote\n", __func__);
 	a2_mux_ctx->bam_dmux_uplink_vote = vote;
-	if (vote)
+	if (vote) {
 		smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
-	else
+		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_out);
+	} else {
 		smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
+		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_out);
+	}
 }
 
 static inline void ul_powerdown(void)
@@ -634,12 +638,14 @@
 	last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
 	if (new_state & SMSM_A2_POWER_CONTROL) {
 		IPADBG("%s: MODEM PWR CTRL 1\n", __func__);
+		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_in);
 		grab_wakelock();
 		(void) connect_to_bam();
 		queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
 			   &a2_mux_ctx->kickoff_ul_request_resource);
 	} else if (!(new_state & SMSM_A2_POWER_CONTROL)) {
 		IPADBG("%s: MODEM PWR CTRL 0\n", __func__);
+		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_in);
 		(void) disconnect_to_bam();
 		release_wakelock();
 	} else {
@@ -653,6 +659,7 @@
 {
 	IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
 			new_state);
+	IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_modem_acks);
 	complete_all(&a2_mux_ctx->ul_wakeup_ack_completion);
 }
 
@@ -1492,6 +1499,7 @@
 	a2_props.options		= SPS_BAM_OPT_IRQ_WAKEUP;
 	a2_props.num_pipes		= A2_NUM_PIPES;
 	a2_props.summing_threshold	= A2_SUMMING_THRESHOLD;
+	a2_props.manage                 = SPS_BAM_MGR_DEVICE_REMOTE;
 	/* need to free on tear down */
 	rc = sps_register_bam_device(&a2_props, &h);
 	if (rc < 0) {
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 16f722c..466e694 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -50,6 +50,13 @@
 #define IPA_AGGR_STR_IN_BYTES(str) \
 	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
 
+/*
+ * This equals a timer value of 162.56us. This value was
+ * determined empirically and shows good bi-directional
+ * WLAN throughputs
+ */
+#define IPA_HOLB_TMR_DEFAULT_VAL 0x7f
+
 static struct ipa_plat_drv_res ipa_res = {0, };
 static struct of_device_id ipa_plat_drv_match[] = {
 	{
@@ -73,6 +80,18 @@
 		.ab = 0,
 		.ib = 0,
 	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 0,
+		.ib = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 0,
+		.ib = 0,
+	},
 };
 
 static struct msm_bus_vectors ipa_max_perf_vectors[]  = {
@@ -82,6 +101,18 @@
 		.ab = 50000000,
 		.ib = 960000000,
 	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 50000000,
+		.ib = 960000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 50000000,
+		.ib = 960000000,
+	},
 };
 
 static struct msm_bus_paths ipa_usecases[]  = {
@@ -901,7 +932,7 @@
 	/* LAN-WAN OUT (A5->IPA) */
 	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
 	sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
-	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
 	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
 	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
 	if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
@@ -1123,7 +1154,7 @@
 	u32 producer_hdl = 0;
 	u32 consumer_hdl = 0;
 
-	rmnet_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
+	teth_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
 
 	/* configure aggregation on producer */
 	memset(&agg_params, 0, sizeof(struct ipa_ep_cfg_aggr));
@@ -1630,6 +1661,8 @@
 		result = -ENOMEM;
 		goto fail_mem;
 	}
+	ipa_ctx->hol_en = 0x1;
+	ipa_ctx->hol_timer = IPA_HOLB_TMR_DEFAULT_VAL;
 
 	IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
 	ipa_ctx->polling_mode = polling_mode;
@@ -1712,6 +1745,7 @@
 	bam_props.num_pipes = IPA_NUM_PIPES;
 	bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
 	bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+	bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
 
 	result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
 	if (result) {
@@ -1887,7 +1921,7 @@
 	if (result) {
 		IPAERR("ipa bridge init err.\n");
 		result = -ENODEV;
-		goto fail_bridge_init;
+		goto fail_a5_pipes;
 	}
 
 	/* setup the A5-IPA pipes */
@@ -2008,8 +2042,6 @@
 	ipa_cleanup_rx();
 	ipa_teardown_a5_pipes();
 fail_a5_pipes:
-	ipa_bridge_cleanup();
-fail_bridge_init:
 	destroy_workqueue(ipa_ctx->tx_wq);
 fail_tx_wq:
 	destroy_workqueue(ipa_ctx->rx_wq);
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index 83b7175..3ff604c 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -12,10 +12,52 @@
 
 #include <linux/delay.h>
 #include <linux/ratelimit.h>
+#include <mach/msm_smsm.h>
 #include "ipa_i.h"
 
-#define A2_EMBEDDED_PIPE_TX 4
-#define A2_EMBEDDED_PIPE_RX 5
+/*
+ * EP0 (teth)
+ * A2_BAM(1)->(12)DMA_BAM->DMA_BAM(13)->(6)IPA_BAM->IPA_BAM(10)->USB_BAM(0)
+ * A2_BAM(0)<-(15)DMA_BAM<-DMA_BAM(14)<-(7)IPA_BAM<-IPA_BAM(11)<-USB_BAM(1)
+ *
+ * EP2 (emb)
+ * A2_BAM(5)->(16)DMA_BAM->DMA_BAM(17)->(8)IPA_BAM->
+ * A2_BAM(4)<-(19)DMA_BAM<-DMA_BAM(18)<-(9)IPA_BAM<-
+ */
+
+#define A2_TETHERED_PIPE_UL      0
+#define DMA_A2_TETHERED_PIPE_UL  15
+#define DMA_IPA_TETHERED_PIPE_UL 14
+#define A2_TETHERED_PIPE_DL      1
+#define DMA_A2_TETHERED_PIPE_DL  12
+#define DMA_IPA_TETHERED_PIPE_DL 13
+
+#define A2_EMBEDDED_PIPE_UL      4
+#define DMA_A2_EMBEDDED_PIPE_UL  19
+#define DMA_IPA_EMBEDDED_PIPE_UL 18
+#define A2_EMBEDDED_PIPE_DL      5
+#define DMA_A2_EMBEDDED_PIPE_DL  16
+#define DMA_IPA_EMBEDDED_PIPE_DL 17
+
+#define IPA_SMEM_PIPE_MEM_SZ 32768
+
+#define IPA_UL_DATA_FIFO_SZ 0xc00
+#define IPA_UL_DESC_FIFO_SZ 0x530
+#define IPA_DL_DATA_FIFO_SZ 0x2400
+#define IPA_DL_DESC_FIFO_SZ 0x8a0
+
+#define IPA_SMEM_UL_DATA_FIFO_OFST 0x3dd0
+#define IPA_SMEM_UL_DESC_FIFO_OFST 0x49d0
+#define IPA_SMEM_DL_DATA_FIFO_OFST 0x4f00
+#define IPA_SMEM_DL_DESC_FIFO_OFST 0x7300
+
+#define IPA_OCIMEM_UL_DATA_FIFO_OFST 0
+#define IPA_OCIMEM_UL_DESC_FIFO_OFST (IPA_OCIMEM_UL_DATA_FIFO_OFST + \
+		IPA_UL_DATA_FIFO_SZ)
+#define IPA_OCIMEM_DL_DATA_FIFO_OFST (IPA_OCIMEM_UL_DESC_FIFO_OFST + \
+		IPA_UL_DESC_FIFO_SZ)
+#define IPA_OCIMEM_DL_DESC_FIFO_OFST (IPA_OCIMEM_DL_DATA_FIFO_OFST + \
+		IPA_DL_DATA_FIFO_SZ)
 
 enum ipa_pipe_type {
 	IPA_DL_FROM_A2,
@@ -25,678 +67,383 @@
 	IPA_PIPE_TYPE_MAX
 };
 
-static int polling_min_sleep[IPA_BRIDGE_DIR_MAX] = { 950, 950 };
-static int polling_max_sleep[IPA_BRIDGE_DIR_MAX] = { 1050, 1050 };
-static int polling_inactivity[IPA_BRIDGE_DIR_MAX] = { 4, 4 };
-
-struct ipa_pkt_info {
-	void *buffer;
-	dma_addr_t dma_address;
-	uint32_t len;
-	struct list_head link;
-};
-
 struct ipa_bridge_pipe_context {
-	struct list_head head_desc_list;
 	struct sps_pipe *pipe;
-	struct sps_connect connection;
-	struct sps_mem_buffer desc_mem_buf;
-	struct sps_register_event register_event;
-	struct list_head free_desc_list;
+	bool ipa_facing;
 	bool valid;
 };
 
 struct ipa_bridge_context {
 	struct ipa_bridge_pipe_context pipe[IPA_PIPE_TYPE_MAX];
-	struct workqueue_struct *ul_wq;
-	struct workqueue_struct *dl_wq;
-	struct work_struct ul_work;
-	struct work_struct dl_work;
 	enum ipa_bridge_type type;
 };
 
 static struct ipa_bridge_context bridge[IPA_BRIDGE_TYPE_MAX];
 
-static void ipa_do_bridge_work(enum ipa_bridge_dir dir,
-		struct ipa_bridge_context *ctx);
-
-static void ul_work_func(struct work_struct *work)
+static void ipa_get_dma_pipe_num(enum ipa_bridge_dir dir,
+		enum ipa_bridge_type type, int *a2, int *ipa)
 {
-	struct ipa_bridge_context *ctx = container_of(work,
-			struct ipa_bridge_context, ul_work);
-	ipa_do_bridge_work(IPA_BRIDGE_DIR_UL, ctx);
+	if (type == IPA_BRIDGE_TYPE_TETHERED) {
+		if (dir == IPA_BRIDGE_DIR_UL) {
+			*a2 = DMA_A2_TETHERED_PIPE_UL;
+			*ipa = DMA_IPA_TETHERED_PIPE_UL;
+		} else {
+			*a2 = DMA_A2_TETHERED_PIPE_DL;
+			*ipa = DMA_IPA_TETHERED_PIPE_DL;
+		}
+	} else {
+		if (dir == IPA_BRIDGE_DIR_UL) {
+			*a2 = DMA_A2_EMBEDDED_PIPE_UL;
+			*ipa = DMA_IPA_EMBEDDED_PIPE_UL;
+		} else {
+			*a2 = DMA_A2_EMBEDDED_PIPE_DL;
+			*ipa = DMA_IPA_EMBEDDED_PIPE_DL;
+		}
+	}
 }
 
-static void dl_work_func(struct work_struct *work)
+static int ipa_get_desc_fifo_sz(enum ipa_bridge_dir dir,
+		enum ipa_bridge_type type)
 {
-	struct ipa_bridge_context *ctx = container_of(work,
-			struct ipa_bridge_context, dl_work);
-	ipa_do_bridge_work(IPA_BRIDGE_DIR_DL, ctx);
+	int sz;
+
+	if (type == IPA_BRIDGE_TYPE_TETHERED) {
+		if (dir == IPA_BRIDGE_DIR_UL)
+			sz = IPA_UL_DESC_FIFO_SZ;
+		else
+			sz = IPA_DL_DESC_FIFO_SZ;
+	} else {
+		if (dir == IPA_BRIDGE_DIR_UL)
+			sz = IPA_UL_DESC_FIFO_SZ;
+		else
+			sz = IPA_DL_DESC_FIFO_SZ;
+	}
+
+	return sz;
 }
 
-static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir,
-				    struct ipa_bridge_context *ctx)
+static int ipa_get_data_fifo_sz(enum ipa_bridge_dir dir,
+		enum ipa_bridge_type type)
+{
+	int sz;
+
+	if (type == IPA_BRIDGE_TYPE_TETHERED) {
+		if (dir == IPA_BRIDGE_DIR_UL)
+			sz = IPA_UL_DATA_FIFO_SZ;
+		else
+			sz = IPA_DL_DATA_FIFO_SZ;
+	} else {
+		if (dir == IPA_BRIDGE_DIR_UL)
+			sz = IPA_UL_DATA_FIFO_SZ;
+		else
+			sz = IPA_DL_DATA_FIFO_SZ;
+	}
+
+	return sz;
+}
+
+static int ipa_get_a2_pipe_num(enum ipa_bridge_dir dir,
+		enum ipa_bridge_type type)
+{
+	int ep;
+
+	if (type == IPA_BRIDGE_TYPE_TETHERED) {
+		if (dir == IPA_BRIDGE_DIR_UL)
+			ep = A2_TETHERED_PIPE_UL;
+		else
+			ep = A2_TETHERED_PIPE_DL;
+	} else {
+		if (dir == IPA_BRIDGE_DIR_UL)
+			ep = A2_EMBEDDED_PIPE_UL;
+		else
+			ep = A2_EMBEDDED_PIPE_DL;
+	}
+
+	return ep;
+}
+
+int ipa_setup_a2_dma_fifos(enum ipa_bridge_dir dir,
+		enum ipa_bridge_type type,
+		struct sps_mem_buffer *desc,
+		struct sps_mem_buffer *data)
 {
 	int ret;
-	struct ipa_bridge_pipe_context *sys = &ctx->pipe[2 * dir];
 
-	ret = sps_get_config(sys->pipe, &sys->connection);
-	if (ret) {
-		IPAERR("sps_get_config() failed %d type=%d dir=%d\n",
-				ret, ctx->type, dir);
-		goto fail;
-	}
-	sys->register_event.options = SPS_O_EOT;
-	ret = sps_register_event(sys->pipe, &sys->register_event);
-	if (ret) {
-		IPAERR("sps_register_event() failed %d type=%d dir=%d\n",
-				ret, ctx->type, dir);
-		goto fail;
-	}
-	sys->connection.options =
-	   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
-	ret = sps_set_config(sys->pipe, &sys->connection);
-	if (ret) {
-		IPAERR("sps_set_config() failed %d type=%d dir=%d\n",
-				ret, ctx->type, dir);
-		goto fail;
-	}
-	ret = 0;
-fail:
-	return ret;
-}
+	if (type == IPA_BRIDGE_TYPE_EMBEDDED) {
+		if (dir == IPA_BRIDGE_DIR_UL) {
+			desc->base = ipa_ctx->smem_pipe_mem +
+				IPA_SMEM_UL_DESC_FIFO_OFST;
+			desc->phys_base = smem_virt_to_phys(desc->base);
+			desc->size = ipa_get_desc_fifo_sz(dir, type);
+			data->base = ipa_ctx->smem_pipe_mem +
+				IPA_SMEM_UL_DATA_FIFO_OFST;
+			data->phys_base = smem_virt_to_phys(data->base);
+			data->size = ipa_get_data_fifo_sz(dir, type);
+		} else {
+			desc->base = ipa_ctx->smem_pipe_mem +
+				IPA_SMEM_DL_DESC_FIFO_OFST;
+			desc->phys_base = smem_virt_to_phys(desc->base);
+			desc->size = ipa_get_desc_fifo_sz(dir, type);
+			data->base = ipa_ctx->smem_pipe_mem +
+				IPA_SMEM_DL_DATA_FIFO_OFST;
+			data->phys_base = smem_virt_to_phys(data->base);
+			data->size = ipa_get_data_fifo_sz(dir, type);
+		}
+	} else {
+		if (dir == IPA_BRIDGE_DIR_UL) {
+			ret = sps_setup_bam2bam_fifo(data,
+					IPA_OCIMEM_UL_DATA_FIFO_OFST,
+					ipa_get_data_fifo_sz(dir, type), 1);
+			if (ret) {
+				IPAERR("DAFIFO setup fail %d dir %d type %d\n",
+						ret, dir, type);
+				return ret;
+			}
 
-static int ipa_switch_to_poll_mode(enum ipa_bridge_dir dir,
-				    enum ipa_bridge_type type)
-{
-	int ret;
-	struct ipa_bridge_pipe_context *sys = &bridge[type].pipe[2 * dir];
+			ret = sps_setup_bam2bam_fifo(desc,
+					IPA_OCIMEM_UL_DESC_FIFO_OFST,
+					ipa_get_desc_fifo_sz(dir, type), 1);
+			if (ret) {
+				IPAERR("DEFIFO setup fail %d dir %d type %d\n",
+						ret, dir, type);
+				return ret;
+			}
+		} else {
+			ret = sps_setup_bam2bam_fifo(data,
+					IPA_OCIMEM_DL_DATA_FIFO_OFST,
+					ipa_get_data_fifo_sz(dir, type), 1);
+			if (ret) {
+				IPAERR("DAFIFO setup fail %d dir %d type %d\n",
+						ret, dir, type);
+				return ret;
+			}
 
-	ret = sps_get_config(sys->pipe, &sys->connection);
-	if (ret) {
-		IPAERR("sps_get_config() failed %d type=%d dir=%d\n",
-				ret, type, dir);
-		goto fail;
-	}
-	sys->connection.options =
-	   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
-	ret = sps_set_config(sys->pipe, &sys->connection);
-	if (ret) {
-		IPAERR("sps_set_config() failed %d type=%d dir=%d\n",
-				ret, type, dir);
-		goto fail;
-	}
-	ret = 0;
-fail:
-	return ret;
-}
-
-static int queue_rx_single(enum ipa_bridge_dir dir, enum ipa_bridge_type type)
-{
-	struct ipa_bridge_pipe_context *sys_rx = &bridge[type].pipe[2 * dir];
-	struct ipa_pkt_info *info;
-	int ret;
-
-	info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
-	if (!info) {
-		IPAERR("unable to alloc rx_pkt_info type=%d dir=%d\n",
-				type, dir);
-		goto fail_pkt;
+			ret = sps_setup_bam2bam_fifo(desc,
+					IPA_OCIMEM_DL_DESC_FIFO_OFST,
+					ipa_get_desc_fifo_sz(dir, type), 1);
+			if (ret) {
+				IPAERR("DEFIFO setup fail %d dir %d type %d\n",
+						ret, dir, type);
+				return ret;
+			}
+		}
 	}
 
-	info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
-	if (!info->buffer) {
-		IPAERR("unable to alloc rx_pkt_buffer type=%d dir=%d\n",
-				type, dir);
-		goto fail_buffer;
-	}
+	IPADBG("dir=%d type=%d Dpa=%x Dsz=%u Dva=%p dpa=%x dsz=%u dva=%p\n",
+			dir, type, data->phys_base, data->size, data->base,
+			desc->phys_base, desc->size, desc->base);
 
-	info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
-					   DMA_BIDIRECTIONAL);
-	if (info->dma_address == 0 || info->dma_address == ~0) {
-		IPAERR("dma_map_single failure %p for %p type=%d dir=%d\n",
-				(void *)info->dma_address, info->buffer,
-				type, dir);
-		goto fail_dma;
-	}
-
-	list_add_tail(&info->link, &sys_rx->head_desc_list);
-	ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
-			       IPA_RX_SKB_SIZE, info,
-			       SPS_IOVEC_FLAG_INT);
-	if (ret) {
-		list_del(&info->link);
-		dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
-				 DMA_BIDIRECTIONAL);
-		IPAERR("sps_transfer_one failed %d type=%d dir=%d\n", ret,
-				type, dir);
-		goto fail_dma;
-	}
 	return 0;
-
-fail_dma:
-	kfree(info->buffer);
-fail_buffer:
-	kfree(info);
-fail_pkt:
-	IPAERR("failed type=%d dir=%d\n", type, dir);
-	return -ENOMEM;
 }
 
-static int ipa_reclaim_tx(struct ipa_bridge_pipe_context *sys_tx, bool all)
-{
-	struct sps_iovec iov;
-	struct ipa_pkt_info *tx_pkt;
-	int cnt = 0;
-	int ret;
-
-	do {
-		iov.addr = 0;
-		ret = sps_get_iovec(sys_tx->pipe, &iov);
-		if (ret || iov.addr == 0) {
-			break;
-		} else {
-			tx_pkt = list_first_entry(&sys_tx->head_desc_list,
-						  struct ipa_pkt_info,
-						  link);
-			list_move_tail(&tx_pkt->link,
-					&sys_tx->free_desc_list);
-			cnt++;
-		}
-	} while (all);
-
-	return cnt;
-}
-
-static void ipa_do_bridge_work(enum ipa_bridge_dir dir,
-			       struct ipa_bridge_context *ctx)
-{
-	struct ipa_bridge_pipe_context *sys_rx = &ctx->pipe[2 * dir];
-	struct ipa_bridge_pipe_context *sys_tx = &ctx->pipe[2 * dir + 1];
-	struct ipa_pkt_info *tx_pkt;
-	struct ipa_pkt_info *rx_pkt;
-	struct ipa_pkt_info *tmp_pkt;
-	struct sps_iovec iov;
-	int ret;
-	int inactive_cycles = 0;
-
-	while (1) {
-		++inactive_cycles;
-
-		if (ipa_reclaim_tx(sys_tx, false))
-			inactive_cycles = 0;
-
-		iov.addr = 0;
-		ret = sps_get_iovec(sys_rx->pipe, &iov);
-		if (ret || iov.addr == 0) {
-			/* no-op */
-		} else {
-			inactive_cycles = 0;
-
-			rx_pkt = list_first_entry(&sys_rx->head_desc_list,
-						  struct ipa_pkt_info,
-						  link);
-			list_del(&rx_pkt->link);
-			rx_pkt->len = iov.size;
-
-retry_alloc_tx:
-			if (list_empty(&sys_tx->free_desc_list)) {
-				tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
-						GFP_KERNEL);
-				if (!tmp_pkt) {
-					pr_debug_ratelimited("%s: unable to alloc tx_pkt_info type=%d dir=%d\n",
-					       __func__, ctx->type, dir);
-					usleep_range(polling_min_sleep[dir],
-							polling_max_sleep[dir]);
-					goto retry_alloc_tx;
-				}
-
-				tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
-						GFP_KERNEL | GFP_DMA);
-				if (!tmp_pkt->buffer) {
-					pr_debug_ratelimited("%s: unable to alloc tx_pkt_buffer type=%d dir=%d\n",
-					       __func__, ctx->type, dir);
-					kfree(tmp_pkt);
-					usleep_range(polling_min_sleep[dir],
-							polling_max_sleep[dir]);
-					goto retry_alloc_tx;
-				}
-
-				tmp_pkt->dma_address = dma_map_single(NULL,
-						tmp_pkt->buffer,
-						IPA_RX_SKB_SIZE,
-						DMA_BIDIRECTIONAL);
-				if (tmp_pkt->dma_address == 0 ||
-						tmp_pkt->dma_address == ~0) {
-					pr_debug_ratelimited("%s: dma_map_single failure %p for %p type=%d dir=%d\n",
-					       __func__,
-					       (void *)tmp_pkt->dma_address,
-					       tmp_pkt->buffer, ctx->type, dir);
-				}
-
-				list_add_tail(&tmp_pkt->link,
-						&sys_tx->free_desc_list);
-			}
-
-			tx_pkt = list_first_entry(&sys_tx->free_desc_list,
-						  struct ipa_pkt_info,
-						  link);
-			list_del(&tx_pkt->link);
-
-retry_add_rx:
-			list_add_tail(&tx_pkt->link,
-					&sys_rx->head_desc_list);
-			ret = sps_transfer_one(sys_rx->pipe,
-					tx_pkt->dma_address,
-					IPA_RX_SKB_SIZE,
-					tx_pkt,
-					SPS_IOVEC_FLAG_INT);
-			if (ret) {
-				list_del(&tx_pkt->link);
-				pr_debug_ratelimited("%s: sps_transfer_one failed %d type=%d dir=%d\n",
-						__func__, ret, ctx->type, dir);
-				usleep_range(polling_min_sleep[dir],
-						polling_max_sleep[dir]);
-				goto retry_add_rx;
-			}
-
-retry_add_tx:
-			list_add_tail(&rx_pkt->link,
-					&sys_tx->head_desc_list);
-			ret = sps_transfer_one(sys_tx->pipe,
-					       rx_pkt->dma_address,
-					       iov.size,
-					       rx_pkt,
-					       SPS_IOVEC_FLAG_INT |
-					       SPS_IOVEC_FLAG_EOT);
-			if (ret) {
-				pr_debug_ratelimited("%s: fail to add to TX type=%d dir=%d\n",
-						__func__, ctx->type, dir);
-				list_del(&rx_pkt->link);
-				ipa_reclaim_tx(sys_tx, true);
-				usleep_range(polling_min_sleep[dir],
-						polling_max_sleep[dir]);
-				goto retry_add_tx;
-			}
-			IPA_STATS_INC_BRIDGE_CNT(ctx->type, dir,
-					ipa_ctx->stats.bridged_pkts);
-		}
-
-		if (inactive_cycles >= polling_inactivity[dir]) {
-			ipa_switch_to_intr_mode(dir, ctx);
-			break;
-		}
-	}
-}
-
-static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
-{
-	enum ipa_bridge_type type = (enum ipa_bridge_type) notify->user;
-
-	switch (notify->event_id) {
-	case SPS_EVENT_EOT:
-		ipa_switch_to_poll_mode(IPA_BRIDGE_DIR_UL, type);
-		queue_work(bridge[type].ul_wq, &bridge[type].ul_work);
-		break;
-	default:
-		IPAERR("recieved unexpected event id %d type %d\n",
-				notify->event_id, type);
-	}
-}
-
-static int setup_bridge_to_ipa(enum ipa_bridge_dir dir,
+static int setup_dma_bam_bridge(enum ipa_bridge_dir dir,
 			       enum ipa_bridge_type type,
 			       struct ipa_sys_connect_params *props,
 			       u32 *clnt_hdl)
 {
-	struct ipa_bridge_pipe_context *sys;
-	dma_addr_t dma_addr;
-	enum ipa_pipe_type pipe_type;
-	int ipa_ep_idx;
-	int ret;
-	int i;
-
-	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, props->client);
-	if (ipa_ep_idx == -1) {
-		IPAERR("Invalid client=%d mode=%d type=%d dir=%d\n",
-				props->client, ipa_ctx->mode, type, dir);
-		ret = -EINVAL;
-		goto alloc_endpoint_failed;
-	}
-
-	if (ipa_ctx->ep[ipa_ep_idx].valid) {
-		IPAERR("EP %d already allocated type=%d dir=%d\n", ipa_ep_idx,
-				type, dir);
-		ret = -EINVAL;
-		goto alloc_endpoint_failed;
-	}
-
-	pipe_type = (dir == IPA_BRIDGE_DIR_DL) ? IPA_DL_TO_IPA :
-						 IPA_UL_FROM_IPA;
-
-	sys = &bridge[type].pipe[pipe_type];
-	sys->pipe = sps_alloc_endpoint();
-	if (sys->pipe == NULL) {
-		IPAERR("alloc endpoint failed type=%d dir=%d\n", type, dir);
-		ret = -ENOMEM;
-		goto alloc_endpoint_failed;
-	}
-	ret = sps_get_config(sys->pipe, &sys->connection);
-	if (ret) {
-		IPAERR("get config failed %d type=%d dir=%d\n", ret, type, dir);
-		ret = -EINVAL;
-		goto get_config_failed;
-	}
-
-	if (dir == IPA_BRIDGE_DIR_DL) {
-		sys->connection.source = SPS_DEV_HANDLE_MEM;
-		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
-		sys->connection.destination = ipa_ctx->bam_handle;
-		sys->connection.dest_pipe_index = ipa_ep_idx;
-		sys->connection.mode = SPS_MODE_DEST;
-		sys->connection.options =
-		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
-	} else {
-		sys->connection.source = ipa_ctx->bam_handle;
-		sys->connection.src_pipe_index = ipa_ep_idx;
-		sys->connection.destination = SPS_DEV_HANDLE_MEM;
-		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
-		sys->connection.mode = SPS_MODE_SRC;
-		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
-		      SPS_O_ACK_TRANSFERS | SPS_O_NO_DISABLE;
-	}
-
-	sys->desc_mem_buf.size = props->desc_fifo_sz;
-	sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
-						    sys->desc_mem_buf.size,
-						    &dma_addr,
-						    0);
-	if (sys->desc_mem_buf.base == NULL) {
-		IPAERR("memory alloc failed type=%d dir=%d\n", type, dir);
-		ret = -ENOMEM;
-		goto get_config_failed;
-	}
-	sys->desc_mem_buf.phys_base = dma_addr;
-	memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
-	sys->connection.desc = sys->desc_mem_buf;
-	sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
-
-	ret = sps_connect(sys->pipe, &sys->connection);
-	if (ret < 0) {
-		IPAERR("connect error %d type=%d dir=%d\n", ret, type, dir);
-		goto connect_failed;
-	}
-
-	INIT_LIST_HEAD(&sys->head_desc_list);
-	INIT_LIST_HEAD(&sys->free_desc_list);
-
-	memset(&ipa_ctx->ep[ipa_ep_idx], 0,
-	       sizeof(struct ipa_ep_context));
-
-	ipa_ctx->ep[ipa_ep_idx].valid = 1;
-	ipa_ctx->ep[ipa_ep_idx].client_notify = props->notify;
-	ipa_ctx->ep[ipa_ep_idx].priv = props->priv;
-
-	ret = ipa_cfg_ep(ipa_ep_idx, &props->ipa_ep_cfg);
-	if (ret < 0) {
-		IPAERR("ep cfg set error %d type=%d dir=%d\n", ret, type, dir);
-		ipa_ctx->ep[ipa_ep_idx].valid = 0;
-		goto event_reg_failed;
-	}
-
-	if (dir == IPA_BRIDGE_DIR_UL) {
-		sys->register_event.options = SPS_O_EOT;
-		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
-		sys->register_event.xfer_done = NULL;
-		sys->register_event.callback = ipa_sps_irq_rx_notify;
-		sys->register_event.user = (void *)type;
-		ret = sps_register_event(sys->pipe, &sys->register_event);
-		if (ret < 0) {
-			IPAERR("register event error %d type=%d dir=%d\n", ret,
-					type, dir);
-			goto event_reg_failed;
-		}
-
-		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
-			ret = queue_rx_single(dir, type);
-			if (ret < 0)
-				IPAERR("queue fail dir=%d type=%d iter=%d\n",
-				       dir, type, i);
-		}
-	}
-
-	*clnt_hdl = ipa_ep_idx;
-	sys->valid = true;
-
-	return 0;
-
-event_reg_failed:
-	sps_disconnect(sys->pipe);
-connect_failed:
-	dma_free_coherent(NULL,
-			  sys->desc_mem_buf.size,
-			  sys->desc_mem_buf.base,
-			  sys->desc_mem_buf.phys_base);
-get_config_failed:
-	sps_free_endpoint(sys->pipe);
-alloc_endpoint_failed:
-	return ret;
-}
-
-static void bam_mux_rx_notify(struct sps_event_notify *notify)
-{
-	enum ipa_bridge_type type = (enum ipa_bridge_type) notify->user;
-
-	switch (notify->event_id) {
-	case SPS_EVENT_EOT:
-		ipa_switch_to_poll_mode(IPA_BRIDGE_DIR_DL, type);
-		queue_work(bridge[type].dl_wq, &bridge[type].dl_work);
-		break;
-	default:
-		IPAERR("recieved unexpected event id %d type %d\n",
-				notify->event_id, type);
-	}
-}
-
-static int setup_bridge_to_a2(enum ipa_bridge_dir dir,
-			      enum ipa_bridge_type type,
-			      u32 desc_fifo_sz)
-{
-	struct ipa_bridge_pipe_context *sys;
-	struct a2_mux_pipe_connection pipe_conn = { 0 };
-	dma_addr_t dma_addr;
-	u32 a2_handle;
+	struct ipa_connect_params ipa_in_params;
+	struct ipa_sps_params sps_out_params;
+	int dma_a2_pipe;
+	int dma_ipa_pipe;
+	struct sps_pipe *pipe;
+	struct sps_pipe *pipe_a2;
+	struct sps_connect _connection;
+	struct sps_connect *connection = &_connection;
+	struct a2_mux_pipe_connection pipe_conn = {0};
 	enum a2_mux_pipe_direction pipe_dir;
-	enum ipa_pipe_type pipe_type;
+	u32 dma_hdl = sps_dma_get_bam_handle();
+	u32 a2_hdl;
 	u32 pa;
 	int ret;
-	int i;
+
+	memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+	memset(&sps_out_params, 0, sizeof(sps_out_params));
 
 	pipe_dir = (dir == IPA_BRIDGE_DIR_UL) ? IPA_TO_A2 : A2_TO_IPA;
 
 	ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_conn);
 	if (ret) {
-		IPAERR("ipa_get_a2_mux_pipe_info failed type=%d dir=%d\n",
-				type, dir);
-		ret = -EINVAL;
-		goto alloc_endpoint_failed;
+		IPAERR("ipa_get_a2_mux_pipe_info failed dir=%d type=%d\n",
+				dir, type);
+		goto fail_get_a2_prop;
 	}
 
 	pa = (dir == IPA_BRIDGE_DIR_UL) ? pipe_conn.dst_phy_addr :
 					  pipe_conn.src_phy_addr;
 
-	ret = sps_phy2h(pa, &a2_handle);
+	ret = sps_phy2h(pa, &a2_hdl);
 	if (ret) {
-		IPAERR("sps_phy2h failed (A2 BAM) %d type=%d dir=%d\n",
-				ret, type, dir);
-		ret = -EINVAL;
-		goto alloc_endpoint_failed;
+		IPAERR("sps_phy2h failed (A2 BAM) %d dir=%d type=%d\n",
+				ret, dir, type);
+		goto fail_get_a2_prop;
 	}
 
-	pipe_type = (dir == IPA_BRIDGE_DIR_UL) ? IPA_UL_TO_A2 : IPA_DL_FROM_A2;
+	ipa_get_dma_pipe_num(dir, type, &dma_a2_pipe, &dma_ipa_pipe);
 
-	sys = &bridge[type].pipe[pipe_type];
-	sys->pipe = sps_alloc_endpoint();
-	if (sys->pipe == NULL) {
-		IPAERR("alloc endpoint failed type=%d dir=%d\n", type, dir);
+	ipa_in_params.ipa_ep_cfg = props->ipa_ep_cfg;
+	ipa_in_params.client = props->client;
+	ipa_in_params.client_bam_hdl = dma_hdl;
+	ipa_in_params.client_ep_idx = dma_ipa_pipe;
+	ipa_in_params.priv = props->priv;
+	ipa_in_params.notify = props->notify;
+	ipa_in_params.desc_fifo_sz = ipa_get_desc_fifo_sz(dir, type);
+	ipa_in_params.data_fifo_sz = ipa_get_data_fifo_sz(dir, type);
+
+	if (ipa_connect(&ipa_in_params, &sps_out_params, clnt_hdl)) {
+		IPAERR("ipa connect failed dir=%d type=%d\n", dir, type);
+		goto fail_get_a2_prop;
+	}
+
+	pipe = sps_alloc_endpoint();
+	if (pipe == NULL) {
+		IPAERR("sps_alloc_endpoint failed dir=%d type=%d\n", dir, type);
 		ret = -ENOMEM;
-		goto alloc_endpoint_failed;
+		goto fail_sps_alloc;
 	}
-	ret = sps_get_config(sys->pipe, &sys->connection);
+
+	memset(&_connection, 0, sizeof(_connection));
+	ret = sps_get_config(pipe, connection);
 	if (ret) {
-		IPAERR("get config failed %d type=%d dir=%d\n", ret, type, dir);
-		ret = -EINVAL;
-		goto get_config_failed;
+		IPAERR("sps_get_config failed %d dir=%d type=%d\n", ret, dir,
+				type);
+		goto fail_sps_get_config;
 	}
 
-	if (dir == IPA_BRIDGE_DIR_UL) {
-		sys->connection.source = SPS_DEV_HANDLE_MEM;
-		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
-		sys->connection.destination = a2_handle;
-		if (type == IPA_BRIDGE_TYPE_TETHERED)
-			sys->connection.dest_pipe_index =
-			   pipe_conn.dst_pipe_index;
-		else
-			sys->connection.dest_pipe_index = A2_EMBEDDED_PIPE_TX;
-		sys->connection.mode = SPS_MODE_DEST;
-		sys->connection.options =
-		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
-	} else {
-		sys->connection.source = a2_handle;
-		if (type == IPA_BRIDGE_TYPE_TETHERED)
-			sys->connection.src_pipe_index =
-			   pipe_conn.src_pipe_index;
-		else
-			sys->connection.src_pipe_index = A2_EMBEDDED_PIPE_RX;
-		sys->connection.destination = SPS_DEV_HANDLE_MEM;
-		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
-		sys->connection.mode = SPS_MODE_SRC;
-		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
-		      SPS_O_ACK_TRANSFERS;
-	}
-
-	sys->desc_mem_buf.size = desc_fifo_sz;
-	sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
-						    sys->desc_mem_buf.size,
-						    &dma_addr,
-						    0);
-	if (sys->desc_mem_buf.base == NULL) {
-		IPAERR("memory alloc failed type=%d dir=%d\n", type, dir);
-		ret = -ENOMEM;
-		goto get_config_failed;
-	}
-	sys->desc_mem_buf.phys_base = dma_addr;
-	memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
-	sys->connection.desc = sys->desc_mem_buf;
-	sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
-
-	ret = sps_connect(sys->pipe, &sys->connection);
-	if (ret < 0) {
-		IPAERR("connect error %d type=%d dir=%d\n", ret, type, dir);
-		ret = -EINVAL;
-		goto connect_failed;
-	}
-
-	INIT_LIST_HEAD(&sys->head_desc_list);
-	INIT_LIST_HEAD(&sys->free_desc_list);
-
 	if (dir == IPA_BRIDGE_DIR_DL) {
-		sys->register_event.options = SPS_O_EOT;
-		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
-		sys->register_event.xfer_done = NULL;
-		sys->register_event.callback = bam_mux_rx_notify;
-		sys->register_event.user = (void *)type;
-		ret = sps_register_event(sys->pipe, &sys->register_event);
-		if (ret < 0) {
-			IPAERR("register event error %d type=%d dir=%d\n",
-					ret, type, dir);
-			ret = -EINVAL;
-			goto event_reg_failed;
-		}
-
-		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
-			ret = queue_rx_single(dir, type);
-			if (ret < 0)
-				IPAERR("queue fail dir=%d type=%d iter=%d\n",
-				       dir, type, i);
-		}
+		connection->mode = SPS_MODE_SRC;
+		connection->source = dma_hdl;
+		connection->destination = sps_out_params.ipa_bam_hdl;
+		connection->src_pipe_index = dma_ipa_pipe;
+		connection->dest_pipe_index = sps_out_params.ipa_ep_idx;
+	} else {
+		connection->mode = SPS_MODE_DEST;
+		connection->source = sps_out_params.ipa_bam_hdl;
+		connection->destination = dma_hdl;
+		connection->src_pipe_index = sps_out_params.ipa_ep_idx;
+		connection->dest_pipe_index = dma_ipa_pipe;
 	}
 
-	sys->valid = true;
+	connection->event_thresh = IPA_EVENT_THRESHOLD;
+	connection->data = sps_out_params.data;
+	connection->desc = sps_out_params.desc;
+	connection->options = SPS_O_AUTO_ENABLE;
+
+	ret = sps_connect(pipe, connection);
+	if (ret) {
+		IPAERR("sps_connect failed %d dir=%d type=%d\n", ret, dir,
+				type);
+		goto fail_sps_get_config;
+	}
+
+	if (dir == IPA_BRIDGE_DIR_DL) {
+		bridge[type].pipe[IPA_DL_TO_IPA].pipe = pipe;
+		bridge[type].pipe[IPA_DL_TO_IPA].ipa_facing = true;
+		bridge[type].pipe[IPA_DL_TO_IPA].valid = true;
+	} else {
+		bridge[type].pipe[IPA_UL_FROM_IPA].pipe = pipe;
+		bridge[type].pipe[IPA_UL_FROM_IPA].ipa_facing = true;
+		bridge[type].pipe[IPA_UL_FROM_IPA].valid = true;
+	}
+
+	IPADBG("dir=%d type=%d (ipa) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type,
+			connection->source, connection->src_pipe_index,
+			connection->destination, connection->dest_pipe_index);
+
+	pipe_a2 = sps_alloc_endpoint();
+	if (pipe_a2 == NULL) {
+		IPAERR("sps_alloc_endpoint failed2 dir=%d type=%d\n", dir,
+				type);
+		ret = -ENOMEM;
+		goto fail_sps_alloc_a2;
+	}
+
+	memset(&_connection, 0, sizeof(_connection));
+	ret = sps_get_config(pipe_a2, connection);
+	if (ret) {
+		IPAERR("sps_get_config failed2 %d dir=%d type=%d\n", ret, dir,
+				type);
+		goto fail_sps_get_config_a2;
+	}
+
+	if (dir == IPA_BRIDGE_DIR_DL) {
+		connection->mode = SPS_MODE_DEST;
+		connection->source = a2_hdl;
+		connection->destination = dma_hdl;
+		connection->src_pipe_index = ipa_get_a2_pipe_num(dir, type);
+		connection->dest_pipe_index = dma_a2_pipe;
+	} else {
+		connection->mode = SPS_MODE_SRC;
+		connection->source = dma_hdl;
+		connection->destination = a2_hdl;
+		connection->src_pipe_index = dma_a2_pipe;
+		connection->dest_pipe_index = ipa_get_a2_pipe_num(dir, type);
+	}
+
+	connection->event_thresh = IPA_EVENT_THRESHOLD;
+
+	if (ipa_setup_a2_dma_fifos(dir, type, &connection->desc,
+				&connection->data)) {
+		IPAERR("fail to setup A2-DMA FIFOs dir=%d type=%d\n",
+				dir, type);
+		goto fail_sps_get_config_a2;
+	}
+
+	connection->options = SPS_O_AUTO_ENABLE;
+
+	ret = sps_connect(pipe_a2, connection);
+	if (ret) {
+		IPAERR("sps_connect failed2 %d dir=%d type=%d\n", ret, dir,
+				type);
+		goto fail_sps_get_config_a2;
+	}
+
+	if (dir == IPA_BRIDGE_DIR_DL) {
+		bridge[type].pipe[IPA_DL_FROM_A2].pipe = pipe_a2;
+		bridge[type].pipe[IPA_DL_FROM_A2].valid = true;
+	} else {
+		bridge[type].pipe[IPA_UL_TO_A2].pipe = pipe_a2;
+		bridge[type].pipe[IPA_UL_TO_A2].valid = true;
+	}
+
+	IPADBG("dir=%d type=%d (a2) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type,
+			connection->source, connection->src_pipe_index,
+			connection->destination, connection->dest_pipe_index);
 
 	return 0;
 
-event_reg_failed:
-	sps_disconnect(sys->pipe);
-connect_failed:
-	dma_free_coherent(NULL,
-			  sys->desc_mem_buf.size,
-			  sys->desc_mem_buf.base,
-			  sys->desc_mem_buf.phys_base);
-get_config_failed:
-	sps_free_endpoint(sys->pipe);
-alloc_endpoint_failed:
+fail_sps_get_config_a2:
+	sps_free_endpoint(pipe_a2);
+fail_sps_alloc_a2:
+	sps_disconnect(pipe);
+fail_sps_get_config:
+	sps_free_endpoint(pipe);
+fail_sps_alloc:
+	ipa_disconnect(*clnt_hdl);
+fail_get_a2_prop:
 	return ret;
 }
 
 /**
- * ipa_bridge_init() - create workqueues and work items serving SW bridges
+ * ipa_bridge_init()
  *
  * Return codes: 0: success, -ENOMEM: failure
  */
 int ipa_bridge_init(void)
 {
-	int ret;
 	int i;
 
-	bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq =
-		create_singlethread_workqueue("ipa_ul_teth");
-	if (!bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq) {
-		IPAERR("ipa ul teth wq alloc failed\n");
-		ret = -ENOMEM;
-		goto fail_ul_teth;
+	ipa_ctx->smem_pipe_mem = smem_alloc(SMEM_BAM_PIPE_MEMORY,
+			IPA_SMEM_PIPE_MEM_SZ);
+	if (!ipa_ctx->smem_pipe_mem) {
+		IPAERR("smem alloc failed\n");
+		return -ENOMEM;
 	}
+	IPADBG("smem_pipe_mem = %p\n", ipa_ctx->smem_pipe_mem);
 
-	bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq =
-		create_singlethread_workqueue("ipa_dl_teth");
-	if (!bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq) {
-		IPAERR("ipa dl teth wq alloc failed\n");
-		ret = -ENOMEM;
-		goto fail_dl_teth;
-	}
-
-	bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq =
-		create_singlethread_workqueue("ipa_ul_emb");
-	if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq) {
-		IPAERR("ipa ul emb wq alloc failed\n");
-		ret = -ENOMEM;
-		goto fail_ul_emb;
-	}
-
-	bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq =
-		create_singlethread_workqueue("ipa_dl_emb");
-	if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq) {
-		IPAERR("ipa dl emb wq alloc failed\n");
-		ret = -ENOMEM;
-		goto fail_dl_emb;
-	}
-
-	for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) {
-		INIT_WORK(&bridge[i].ul_work, ul_work_func);
-		INIT_WORK(&bridge[i].dl_work, dl_work_func);
+	for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++)
 		bridge[i].type = i;
-	}
 
 	return 0;
-
-fail_dl_emb:
-	destroy_workqueue(bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq);
-fail_ul_emb:
-	destroy_workqueue(bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq);
-fail_dl_teth:
-	destroy_workqueue(bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq);
-fail_ul_teth:
-	return ret;
 }
 
 /**
@@ -720,7 +467,7 @@
 
 	if (props == NULL || clnt_hdl == NULL ||
 	    type >= IPA_BRIDGE_TYPE_MAX || dir >= IPA_BRIDGE_DIR_MAX ||
-	    props->client >= IPA_CLIENT_MAX || props->desc_fifo_sz == 0) {
+	    props->client >= IPA_CLIENT_MAX) {
 		IPAERR("Bad param props=%p clnt_hdl=%p type=%d dir=%d\n",
 		       props, clnt_hdl, type, dir);
 		return -EINVAL;
@@ -728,52 +475,21 @@
 
 	ipa_inc_client_enable_clks();
 
-	if (setup_bridge_to_ipa(dir, type, props, clnt_hdl)) {
+	if (setup_dma_bam_bridge(dir, type, props, clnt_hdl)) {
 		IPAERR("fail to setup SYS pipe to IPA dir=%d type=%d\n",
 		       dir, type);
 		ret = -EINVAL;
 		goto bail_ipa;
 	}
 
-	if (setup_bridge_to_a2(dir, type, props->desc_fifo_sz)) {
-		IPAERR("fail to setup SYS pipe to A2 dir=%d type=%d\n",
-		       dir, type);
-		ret = -EINVAL;
-		goto bail_a2;
-	}
-
-
 	return 0;
 
-bail_a2:
-	ipa_bridge_teardown(dir, type, *clnt_hdl);
 bail_ipa:
 	ipa_dec_client_disable_clks();
 	return ret;
 }
 EXPORT_SYMBOL(ipa_bridge_setup);
 
-static void ipa_bridge_free_pkt(struct ipa_pkt_info *pkt)
-{
-	list_del(&pkt->link);
-	dma_unmap_single(NULL, pkt->dma_address, IPA_RX_SKB_SIZE,
-			 DMA_BIDIRECTIONAL);
-	kfree(pkt->buffer);
-	kfree(pkt);
-}
-
-static void ipa_bridge_free_resources(struct ipa_bridge_pipe_context *pipe)
-{
-	struct ipa_pkt_info *pkt;
-	struct ipa_pkt_info *n;
-
-	list_for_each_entry_safe(pkt, n, &pipe->head_desc_list, link)
-		ipa_bridge_free_pkt(pkt);
-
-	list_for_each_entry_safe(pkt, n, &pipe->free_desc_list, link)
-		ipa_bridge_free_pkt(pkt);
-}
-
 /**
  * ipa_bridge_teardown() - teardown SW bridge leg
  * @dir: downlink or uplink (from air interface perspective)
@@ -808,12 +524,10 @@
 	for (; lo <= hi; lo++) {
 		sys = &bridge[type].pipe[lo];
 		if (sys->valid) {
+			if (sys->ipa_facing)
+				ipa_disconnect(clnt_hdl);
 			sps_disconnect(sys->pipe);
-			dma_free_coherent(NULL, sys->desc_mem_buf.size,
-					  sys->desc_mem_buf.base,
-					  sys->desc_mem_buf.phys_base);
 			sps_free_endpoint(sys->pipe);
-			ipa_bridge_free_resources(sys);
 			sys->valid = false;
 		}
 	}
@@ -825,19 +539,3 @@
 	return 0;
 }
 EXPORT_SYMBOL(ipa_bridge_teardown);
-
-/**
- * ipa_bridge_cleanup() - destroy workqueues serving the SW bridges
- *
- * Return codes:
- * None
- */
-void ipa_bridge_cleanup(void)
-{
-	int i;
-
-	for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) {
-		destroy_workqueue(bridge[i].dl_wq);
-		destroy_workqueue(bridge[i].ul_wq);
-	}
-}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
index 6033510..a78879d 100644
--- a/drivers/platform/msm/ipa/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -13,8 +13,6 @@
 #include <linux/delay.h>
 #include "ipa_i.h"
 
-#define IPA_HOLB_TMR_VAL 0xff
-
 static void ipa_enable_data_path(u32 clnt_hdl)
 {
 	struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
@@ -302,13 +300,13 @@
 			in->client == IPA_CLIENT_HSIC3_CONS ||
 			in->client == IPA_CLIENT_HSIC4_CONS) {
 		IPADBG("disable holb for ep=%d tmr=%d\n", ipa_ep_idx,
-			IPA_HOLB_TMR_VAL);
+			ipa_ctx->hol_timer);
 		ipa_write_reg(ipa_ctx->mmio,
 			IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFST(ipa_ep_idx),
-			0x1);
+			ipa_ctx->hol_en);
 		ipa_write_reg(ipa_ctx->mmio,
 			IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFST(ipa_ep_idx),
-			IPA_HOLB_TMR_VAL);
+			ipa_ctx->hol_timer);
 	}
 
 	IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index bc5aa6f..fb69817 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -94,6 +94,8 @@
 static struct dentry *dent;
 static struct dentry *dfile_gen_reg;
 static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_ep_hol_en;
+static struct dentry *dfile_ep_hol_timer;
 static struct dentry *dfile_hdr;
 static struct dentry *dfile_ip4_rt;
 static struct dentry *dfile_ip6_rt;
@@ -144,6 +146,58 @@
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
+static ssize_t ipa_write_ep_hol_en_reg(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 endp_reg_val;
+	unsigned long missing;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtou32(dbg_buff, 16, &endp_reg_val))
+		return -EFAULT;
+
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFST(ep_reg_idx),
+		endp_reg_val);
+
+	ipa_ctx->hol_en = endp_reg_val;
+
+	return count;
+}
+
+static ssize_t ipa_write_ep_hol_timer_reg(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 endp_reg_val;
+	unsigned long missing;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtou32(dbg_buff, 16, &endp_reg_val))
+		return -EFAULT;
+
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFST(ep_reg_idx),
+		endp_reg_val);
+
+	ipa_ctx->hol_timer = endp_reg_val;
+
+	return count;
+}
+
 static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
 		size_t count, loff_t *ppos)
 {
@@ -556,7 +610,13 @@
 			"x_intr_repost=%u\n"
 			"rx_q_len=%u\n"
 			"act_clnt=%u\n"
-			"con_clnt_bmap=0x%x\n",
+			"con_clnt_bmap=0x%x\n"
+			"a2_power_on_reqs_in=%u\n"
+			"a2_power_on_reqs_out=%u\n"
+			"a2_power_off_reqs_in=%u\n"
+			"a2_power_off_reqs_out=%u\n"
+			"a2_power_modem_acks=%u\n"
+			"a2_power_apps_acks=%u\n",
 			ipa_ctx->stats.tx_sw_pkts,
 			ipa_ctx->stats.tx_hw_pkts,
 			ipa_ctx->stats.rx_pkts,
@@ -564,7 +624,13 @@
 			ipa_ctx->stats.x_intr_repost,
 			ipa_ctx->stats.rx_q_len,
 			ipa_ctx->ipa_active_clients,
-			connect);
+			connect,
+			ipa_ctx->stats.a2_power_on_reqs_in,
+			ipa_ctx->stats.a2_power_on_reqs_out,
+			ipa_ctx->stats.a2_power_off_reqs_in,
+			ipa_ctx->stats.a2_power_off_reqs_out,
+			ipa_ctx->stats.a2_power_modem_acks,
+			ipa_ctx->stats.a2_power_apps_acks);
 	cnt += nbytes;
 
 	for (i = 0; i < MAX_NUM_EXCP; i++) {
@@ -663,6 +729,13 @@
 	.write = ipa_write_ep_reg,
 };
 
+const struct file_operations ipa_ep_hol_en_ops = {
+	.write = ipa_write_ep_hol_en_reg,
+};
+const struct file_operations ipa_ep_hol_timer_ops = {
+	.write = ipa_write_ep_hol_timer_reg,
+};
+
 const struct file_operations ipa_hdr_ops = {
 	.read = ipa_read_hdr,
 };
@@ -695,6 +768,7 @@
 	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
 	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
 			S_IWUSR | S_IWGRP | S_IWOTH;
+	const mode_t write_only_mode = S_IWUSR | S_IWGRP | S_IWOTH;
 
 	dent = debugfs_create_dir("ipa", 0);
 	if (IS_ERR(dent)) {
@@ -716,6 +790,20 @@
 		goto fail;
 	}
 
+	dfile_ep_hol_en = debugfs_create_file("hol_en", write_only_mode, dent,
+			0, &ipa_ep_hol_en_ops);
+	if (!dfile_ep_hol_en || IS_ERR(dfile_ep_hol_en)) {
+		IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
+		goto fail;
+	}
+
+	dfile_ep_hol_timer = debugfs_create_file("hol_timer", write_only_mode,
+			dent, 0, &ipa_ep_hol_timer_ops);
+	if (!dfile_ep_hol_timer || IS_ERR(dfile_ep_hol_timer)) {
+		IPAERR("fail to create file for debug_fs dfile_ep_hol_timer\n");
+		goto fail;
+	}
+
 	dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
 			&ipa_hdr_ops);
 	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
index b63b939..edb9fb1 100644
--- a/drivers/platform/msm/ipa/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -368,6 +368,7 @@
 	return 0;
 proc_err:
 	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	mem->base = NULL;
 error:
 
 	return -EPERM;
@@ -456,7 +457,7 @@
 
 	if (mem->size > avail) {
 		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
-		goto fail_hw_tbl_gen;
+		goto fail_send_cmd;
 	}
 
 	if (ip == IPA_IP_v4) {
diff --git a/drivers/platform/msm/ipa/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_hdr.c
index 7d0bc24..9618da2 100644
--- a/drivers/platform/msm/ipa/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_hdr.c
@@ -89,7 +89,7 @@
 	if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
 		IPAERR("tbl too big, needed %d avail %d\n", mem->size,
 				IPA_RAM_HDR_SIZE);
-		goto fail_hw_tbl_gen;
+		goto fail_send_cmd;
 	}
 
 	cmd->hdr_table_addr = mem->phys_base;
@@ -126,7 +126,7 @@
 	return 0;
 
 fail_send_cmd:
-	if (mem->phys_base)
+	if (mem->base)
 		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
 fail_hw_tbl_gen:
 	kfree(cmd);
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index a03ba16..cc3e630 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -30,7 +30,8 @@
 #define IPA_COOKIE 0xfacefeed
 
 #define IPA_NUM_PIPES 0x14
-#define IPA_SYS_DESC_FIFO_SZ (0x800)
+#define IPA_SYS_DESC_FIFO_SZ 0x800
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
 
 #ifdef IPA_DEBUG
 #define IPADBG(fmt, args...) \
@@ -40,10 +41,11 @@
 #define IPADBG(fmt, args...)
 #endif
 
-#define WLAN_AMPDU_TX_EP (15)
-#define WLAN_PROD_TX_EP (19)
-#define MAX_NUM_EXCP	 (8)
-#define MAX_NUM_IMM_CMD	 (17)
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP  19
+
+#define MAX_NUM_EXCP     8
+#define MAX_NUM_IMM_CMD 17
 
 #define IPA_STATS
 
@@ -531,6 +533,12 @@
 	u32 rx_q_len;
 	u32 msg_w[IPA_EVENT_MAX];
 	u32 msg_r[IPA_EVENT_MAX];
+	u32 a2_power_on_reqs_in;
+	u32 a2_power_on_reqs_out;
+	u32 a2_power_off_reqs_in;
+	u32 a2_power_off_reqs_out;
+	u32 a2_power_modem_acks;
+	u32 a2_power_apps_acks;
 };
 
 /**
@@ -659,6 +667,10 @@
 	enum ipa_hw_mode ipa_hw_mode;
 	/* featurize if memory footprint becomes a concern */
 	struct ipa_stats stats;
+	void *smem_pipe_mem;
+	/* store HOLB configuration for WLAN TX pipes */
+	u32 hol_en;
+	u32 hol_timer;
 };
 
 /**
@@ -743,7 +755,7 @@
 				struct a2_mux_pipe_connection *pipe_connect);
 int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
 			    u32 *a2_bam_irq);
-void rmnet_bridge_get_client_handles(u32 *producer_handle,
+void teth_bridge_get_client_handles(u32 *producer_handle,
 		u32 *consumer_handle);
 int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
 		bool in_atomic);
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
index fc5f668..6430c07 100644
--- a/drivers/platform/msm/ipa/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -305,6 +305,7 @@
 			  rt_tbl_mem.base, rt_tbl_mem.phys_base);
 proc_err:
 	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	mem->base = NULL;
 error:
 	return -EPERM;
 }
@@ -378,7 +379,7 @@
 
 	if (mem->size > avail) {
 		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
-		goto fail_hw_tbl_gen;
+		goto fail_send_cmd;
 	}
 
 	if (ip == IPA_IP_v4) {
@@ -413,7 +414,7 @@
 	return 0;
 
 fail_send_cmd:
-	if (mem->phys_base)
+	if (mem->base)
 		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
 fail_hw_tbl_gen:
 	kfree(cmd);
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
deleted file mode 100644
index 696b363..0000000
--- a/drivers/platform/msm/ipa/rmnet_bridge.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <mach/bam_dmux.h>
-#include <mach/ipa.h>
-#include <mach/sps.h>
-
-static struct rmnet_bridge_cb_type {
-	u32 producer_handle;
-	u32 consumer_handle;
-	u32 ipa_producer_handle;
-	u32 ipa_consumer_handle;
-	bool is_connected;
-} rmnet_bridge_cb;
-
-/**
-* rmnet_bridge_init() - Initialize RmNet bridge module
-*
-* Return codes:
-* 0: success
-*/
-int rmnet_bridge_init(void)
-{
-	memset(&rmnet_bridge_cb, 0, sizeof(struct rmnet_bridge_cb_type));
-
-	return 0;
-}
-EXPORT_SYMBOL(rmnet_bridge_init);
-
-/**
-* rmnet_bridge_disconnect() - Disconnect RmNet bridge module
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int rmnet_bridge_disconnect(void)
-{
-	int ret = 0;
-	if (false == rmnet_bridge_cb.is_connected) {
-		pr_err("%s: trying to disconnect already disconnected RmNet bridge\n",
-		       __func__);
-		goto bail;
-	}
-
-	rmnet_bridge_cb.is_connected = false;
-
-	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
-				  rmnet_bridge_cb.ipa_consumer_handle);
-	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
-				  rmnet_bridge_cb.ipa_producer_handle);
-bail:
-	return ret;
-}
-EXPORT_SYMBOL(rmnet_bridge_disconnect);
-
-/**
-* rmnet_bridge_connect() - Connect RmNet bridge module
-* @producer_hdl:	IPA producer handle
-* @consumer_hdl:	IPA consumer handle
-* @wwan_logical_channel_id:	WWAN logical channel ID
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int rmnet_bridge_connect(u32 producer_hdl,
-			 u32 consumer_hdl,
-			 int wwan_logical_channel_id)
-{
-	struct ipa_sys_connect_params props;
-	int ret = 0;
-
-	if (true == rmnet_bridge_cb.is_connected) {
-		ret = 0;
-		pr_err("%s: trying to connect already connected RmNet bridge\n",
-		       __func__);
-		goto bail;
-	}
-
-	rmnet_bridge_cb.consumer_handle = consumer_hdl;
-	rmnet_bridge_cb.producer_handle = producer_hdl;
-	rmnet_bridge_cb.is_connected = true;
-
-	memset(&props, 0, sizeof(props));
-	props.ipa_ep_cfg.mode.mode = IPA_DMA;
-	props.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
-	props.client = IPA_CLIENT_A2_TETHERED_PROD;
-	props.desc_fifo_sz = 0x800;
-	/* setup notification callback if needed */
-
-	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
-			&props, &rmnet_bridge_cb.ipa_consumer_handle);
-	if (ret) {
-		pr_err("%s: IPA DL bridge setup failure\n", __func__);
-		goto bail_dl;
-	}
-
-	memset(&props, 0, sizeof(props));
-	props.client = IPA_CLIENT_A2_TETHERED_CONS;
-	props.desc_fifo_sz = 0x800;
-	/* setup notification callback if needed */
-
-	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
-			&props, &rmnet_bridge_cb.ipa_producer_handle);
-	if (ret) {
-		pr_err("%s: IPA UL bridge setup failure\n", __func__);
-		goto bail_ul;
-	}
-	return 0;
-bail_ul:
-	ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
-			    rmnet_bridge_cb.ipa_consumer_handle);
-bail_dl:
-	rmnet_bridge_cb.is_connected = false;
-bail:
-	return ret;
-}
-EXPORT_SYMBOL(rmnet_bridge_connect);
-
-void rmnet_bridge_get_client_handles(u32 *producer_handle,
-		u32 *consumer_handle)
-{
-	if (producer_handle == NULL || consumer_handle == NULL)
-		return;
-
-	*producer_handle = rmnet_bridge_cb.producer_handle;
-	*consumer_handle = rmnet_bridge_cb.consumer_handle;
-}
diff --git a/drivers/platform/msm/ipa/teth_bridge.c b/drivers/platform/msm/ipa/teth_bridge.c
index 774c0e6..40c8fc7 100644
--- a/drivers/platform/msm/ipa/teth_bridge.c
+++ b/drivers/platform/msm/ipa/teth_bridge.c
@@ -60,6 +60,15 @@
 
 #define TETH_MTU_BYTE 1500
 
+#define TETH_INACTIVITY_TIME_MSEC (1000)
+
+#define TETH_WORKQUEUE_NAME "tethering_bridge_wq"
+
+#define TETH_TOTAL_HDR_ENTRIES 6
+#define TETH_TOTAL_RT_ENTRIES_IP 3
+#define TETH_TOTAL_FLT_ENTRIES_IP 2
+#define TETH_IP_FAMILIES 2
+
 struct mac_addresses_type {
 	u8 host_pc_mac_addr[ETH_ALEN];
 	bool host_pc_mac_addr_known;
@@ -70,6 +79,7 @@
 struct stats {
 	u64 a2_to_usb_num_sw_tx_packets;
 	u64 usb_to_a2_num_sw_tx_packets;
+	u64 num_sw_tx_packets_during_resource_wakeup;
 };
 
 struct teth_bridge_ctx {
@@ -94,9 +104,24 @@
 	bool comp_hw_bridge_in_progress;
 	struct teth_aggr_capabilities *aggr_caps;
 	struct stats stats;
+	struct workqueue_struct *teth_wq;
+	u16 a2_ipa_hdr_len;
+	struct ipa_ioc_del_hdr *hdr_del;
+	struct ipa_ioc_del_rt_rule *routing_del[TETH_IP_FAMILIES];
+	struct ipa_ioc_del_flt_rule *filtering_del[TETH_IP_FAMILIES];
+};
+static struct teth_bridge_ctx *teth_ctx;
+
+enum teth_packet_direction {
+	TETH_USB_TO_A2,
+	TETH_A2_TO_USB,
 };
 
-static struct teth_bridge_ctx *teth_ctx;
+struct teth_work {
+	struct work_struct work;
+	struct sk_buff *skb;
+	enum teth_packet_direction dir;
+};
 
 #ifdef CONFIG_DEBUG_FS
 #define TETH_MAX_MSG_LEN 512
@@ -110,6 +135,7 @@
 	struct ipa_ioc_add_hdr *hdrs;
 	struct ethhdr hdr_ipv4;
 	struct ethhdr hdr_ipv6;
+	int idx1;
 
 	TETH_DBG_FUNC_ENTRY();
 	memcpy(hdr_ipv4.h_source, src_mac_addr, ETH_ALEN);
@@ -144,6 +170,13 @@
 	res = ipa_add_hdr(hdrs);
 	if (res || hdrs->hdr[0].status || hdrs->hdr[1].status)
 		TETH_ERR("Header insertion failed\n");
+
+	/* Save the headers handles in order to delete them later */
+	for (idx1 = 0; idx1 < hdrs->num_hdrs; idx1++) {
+		int idx2 = teth_ctx->hdr_del->num_hdls++;
+		teth_ctx->hdr_del->hdl[idx2].hdl = hdrs->hdr[idx1].hdr_hdl;
+	}
+
 	kfree(hdrs);
 	TETH_DBG_FUNC_EXIT();
 
@@ -169,6 +202,7 @@
 	}
 
 	hdr_cfg.hdr_len = a2_ipa_hdr_len;
+	teth_ctx->a2_ipa_hdr_len = a2_ipa_hdr_len;
 	res = ipa_cfg_ep_hdr(teth_ctx->a2_ipa_pipe_hdl, &hdr_cfg);
 	if (res) {
 		TETH_ERR("Header removal config for A2->IPA pipe failed\n");
@@ -200,6 +234,7 @@
 	int res;
 	struct ipa_ioc_add_hdr *mbim_hdr;
 	u8 mbim_stream_id = 0;
+	int idx;
 
 	TETH_DBG_FUNC_ENTRY();
 	mbim_hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
@@ -223,6 +258,11 @@
 	} else {
 		TETH_DBG("Added MBIM stream ID header\n");
 	}
+
+	/* Save the header handle in order to delete it later */
+	idx = teth_ctx->hdr_del->num_hdls++;
+	teth_ctx->hdr_del->hdl[idx].hdl = mbim_hdr->hdr[0].hdr_hdl;
+
 	kfree(mbim_hdr);
 	TETH_DBG_FUNC_EXIT();
 
@@ -299,6 +339,7 @@
 	struct ipa_ioc_add_rt_rule *rt_rule;
 	struct ipa_ioc_get_hdr hdr_info;
 	int res;
+	int idx;
 
 	TETH_DBG_FUNC_ENTRY();
 	/* Get the header handle */
@@ -325,6 +366,12 @@
 	res = ipa_add_rt_rule(rt_rule);
 	if (res || rt_rule->rules[0].status)
 		TETH_ERR("Failed adding routing rule\n");
+
+	/* Save the routing rule handle in order to delete it later */
+	idx = teth_ctx->routing_del[ip_address_family]->num_hdls++;
+	teth_ctx->routing_del[ip_address_family]->hdl[idx].hdl =
+		rt_rule->rules[0].rt_rule_hdl;
+
 	kfree(rt_rule);
 	TETH_DBG_FUNC_EXIT();
 
@@ -432,6 +479,7 @@
 	struct ipa_ioc_add_flt_rule *flt_tbl;
 	struct ipa_ioc_get_rt_tbl rt_tbl_info;
 	int res;
+	int idx;
 
 	TETH_DBG_FUNC_ENTRY();
 	/* Get the needed routing table handle */
@@ -462,6 +510,12 @@
 	res = ipa_add_flt_rule(flt_tbl);
 	if (res || flt_tbl->rules[0].status)
 		TETH_ERR("Failed adding filtering table\n");
+
+	/* Save the filtering rule handle in order to delete it later */
+	idx = teth_ctx->filtering_del[ip_address_family]->num_hdls++;
+	teth_ctx->filtering_del[ip_address_family]->hdl[idx].hdl =
+		flt_tbl->rules[0].flt_rule_hdl;
+
 	kfree(flt_tbl);
 	TETH_DBG_FUNC_EXIT();
 
@@ -568,7 +622,6 @@
 	u32 pipe_hdl)
 {
 	struct ipa_ep_cfg_aggr agg_params;
-	struct ipa_ep_cfg_hdr hdr_params;
 	int res;
 
 	TETH_DBG_FUNC_ENTRY();
@@ -585,18 +638,7 @@
 		TETH_ERR("ipa_cfg_ep_aggr() failed\n");
 		goto bail;
 	}
-
-	if (!client_is_prod) {
-		memset(&hdr_params, 0, sizeof(hdr_params));
-		hdr_params.hdr_len = 1;
-		res = ipa_cfg_ep_hdr(pipe_hdl, &hdr_params);
-		if (res) {
-			TETH_ERR("ipa_cfg_ep_hdr() failed\n");
-			goto bail;
-		}
-	}
 	TETH_DBG_FUNC_EXIT();
-
 bail:
 	return res;
 }
@@ -627,6 +669,19 @@
 	char aggr_prot_str[20];
 
 	TETH_DBG_FUNC_ENTRY();
+	if (!teth_ctx->aggr_params_known) {
+		TETH_ERR("Aggregation parameters unknown.\n");
+		return -EINVAL;
+	}
+
+	if ((teth_ctx->usb_ipa_pipe_hdl == 0) ||
+	    (teth_ctx->ipa_usb_pipe_hdl == 0))
+		return 0;
+		/*
+		 * Returning 0 in case pipe handles are 0 becuase aggregation
+		 * params will be set later
+		 */
+
 	if (teth_ctx->aggr_params.ul.aggr_prot == TETH_AGGR_PROTOCOL_MBIM ||
 	    teth_ctx->aggr_params.dl.aggr_prot == TETH_AGGR_PROTOCOL_MBIM) {
 		res = ipa_set_aggr_mode(IPA_MBIM);
@@ -672,6 +727,23 @@
 	return res;
 }
 
+static int teth_request_resource(void)
+{
+	int res;
+
+	INIT_COMPLETION(teth_ctx->is_bridge_prod_up);
+	res = ipa_rm_inactivity_timer_request_resource(
+		IPA_RM_RESOURCE_BRIDGE_PROD);
+	if (res < 0) {
+		if (res == -EINPROGRESS)
+			wait_for_completion(&teth_ctx->is_bridge_prod_up);
+		else
+			return res;
+	}
+
+	return 0;
+}
+
 static void complete_hw_bridge(struct work_struct *work)
 {
 	int res;
@@ -682,6 +754,12 @@
 		 "ETHERNET" :
 		 "IP");
 
+	res = teth_request_resource();
+	if (res) {
+		TETH_ERR("request_resource() failed.\n");
+		goto bail;
+	}
+
 	res = teth_set_aggregation();
 	if (res) {
 		TETH_ERR("Failed setting aggregation params\n");
@@ -719,6 +797,7 @@
 	teth_ctx->is_hw_bridge_complete = true;
 bail:
 	teth_ctx->comp_hw_bridge_in_progress = false;
+	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
 	TETH_DBG_FUNC_EXIT();
 
 	return;
@@ -758,10 +837,78 @@
 	    (teth_ctx->aggr_params_known)) {
 		INIT_WORK(&teth_ctx->comp_hw_bridge_work, complete_hw_bridge);
 		teth_ctx->comp_hw_bridge_in_progress = true;
-		schedule_work(&teth_ctx->comp_hw_bridge_work);
+		queue_work(teth_ctx->teth_wq, &teth_ctx->comp_hw_bridge_work);
 	}
 }
 
+static void teth_send_skb_work(struct work_struct *work)
+{
+	struct teth_work *work_data =
+		container_of(work, struct teth_work, work);
+	int res;
+
+	res = teth_request_resource();
+	if (res) {
+		TETH_ERR("Packet send failure, dropping packet !\n");
+		goto bail;
+	}
+
+	switch (work_data->dir) {
+	case TETH_USB_TO_A2:
+		res = a2_mux_write(A2_MUX_TETHERED_0, work_data->skb);
+		if (res) {
+			TETH_ERR("Packet send failure, dropping packet !\n");
+			goto bail;
+		}
+		teth_ctx->stats.usb_to_a2_num_sw_tx_packets++;
+		break;
+
+	case TETH_A2_TO_USB:
+		res = ipa_tx_dp(IPA_CLIENT_USB_CONS, work_data->skb, NULL);
+		if (res) {
+			TETH_ERR("Packet send failure, dropping packet !\n");
+			goto bail;
+		}
+		teth_ctx->stats.a2_to_usb_num_sw_tx_packets++;
+		break;
+
+	default:
+		TETH_ERR("Unsupported direction to send !\n");
+		WARN_ON(1);
+	}
+	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+	kfree(work_data);
+	teth_ctx->stats.num_sw_tx_packets_during_resource_wakeup++;
+
+	return;
+bail:
+	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+	dev_kfree_skb(work_data->skb);
+	kfree(work_data);
+}
+
+static void defer_skb_send(struct sk_buff *skb, enum teth_packet_direction dir)
+{
+	struct teth_work *work = kmalloc(sizeof(struct teth_work), GFP_KERNEL);
+
+	if (!work) {
+		TETH_ERR("No mem, dropping packet\n");
+		dev_kfree_skb(skb);
+		ipa_rm_inactivity_timer_release_resource
+			(IPA_RM_RESOURCE_BRIDGE_PROD);
+		return;
+	}
+
+	/*
+	 * Since IPA uses a single Rx thread, we don't
+	 * want to wait for completion here
+	 */
+	INIT_WORK(&work->work, teth_send_skb_work);
+	work->dir = dir;
+	work->skb = skb;
+	queue_work(teth_ctx->teth_wq, &work->work);
+}
+
 static void usb_notify_cb(void *priv,
 			  enum ipa_dp_evt_type evt,
 			  unsigned long data)
@@ -778,13 +925,36 @@
 				&teth_ctx->mac_addresses.host_pc_mac_addr_known,
 				&teth_ctx->mac_addresses.device_mac_addr_known);
 
-		/* Send the packet to A2, using a2_service driver API */
-		teth_ctx->stats.usb_to_a2_num_sw_tx_packets++;
+		/*
+		 * Request the BRIDGE_PROD resource, send the packet and release
+		 * the resource
+		 */
+		res = ipa_rm_inactivity_timer_request_resource(
+			IPA_RM_RESOURCE_BRIDGE_PROD);
+		if (res < 0) {
+			if (res == -EINPROGRESS) {
+				/* The resource is waking up */
+				defer_skb_send(skb, TETH_USB_TO_A2);
+			} else {
+				TETH_ERR(
+					"Packet send failure, dropping packet !\n");
+				dev_kfree_skb(skb);
+			}
+			ipa_rm_inactivity_timer_release_resource(
+				IPA_RM_RESOURCE_BRIDGE_PROD);
+			return;
+		}
 		res = a2_mux_write(A2_MUX_TETHERED_0, skb);
 		if (res) {
 			TETH_ERR("Packet send failure, dropping packet !\n");
 			dev_kfree_skb(skb);
+			ipa_rm_inactivity_timer_release_resource(
+				IPA_RM_RESOURCE_BRIDGE_PROD);
+			return;
 		}
+		teth_ctx->stats.usb_to_a2_num_sw_tx_packets++;
+		ipa_rm_inactivity_timer_release_resource(
+			IPA_RM_RESOURCE_BRIDGE_PROD);
 		break;
 
 	case IPA_WRITE_DONE:
@@ -816,13 +986,37 @@
 				&teth_ctx->
 				mac_addresses.host_pc_mac_addr_known);
 
-		/* Send the packet to USB */
-		teth_ctx->stats.a2_to_usb_num_sw_tx_packets++;
+		/*
+		 * Request the BRIDGE_PROD resource, send the packet and release
+		 * the resource
+		 */
+		res = ipa_rm_inactivity_timer_request_resource(
+			IPA_RM_RESOURCE_BRIDGE_PROD);
+		if (res < 0) {
+			if (res == -EINPROGRESS) {
+				/* The resource is waking up */
+				defer_skb_send(skb, TETH_A2_TO_USB);
+			} else {
+				TETH_ERR(
+					"Packet send failure, dropping packet !\n");
+				dev_kfree_skb(skb);
+			}
+			ipa_rm_inactivity_timer_release_resource(
+				IPA_RM_RESOURCE_BRIDGE_PROD);
+			return;
+		}
+
 		res = ipa_tx_dp(IPA_CLIENT_USB_CONS, skb, NULL);
 		if (res) {
 			TETH_ERR("Packet send failure, dropping packet !\n");
 			dev_kfree_skb(skb);
+			ipa_rm_inactivity_timer_release_resource(
+				IPA_RM_RESOURCE_BRIDGE_PROD);
+			return;
 		}
+		teth_ctx->stats.a2_to_usb_num_sw_tx_packets++;
+		ipa_rm_inactivity_timer_release_resource(
+			IPA_RM_RESOURCE_BRIDGE_PROD);
 		break;
 
 	case A2_MUX_WRITE_DONE:
@@ -841,8 +1035,28 @@
 				  enum ipa_rm_event event,
 				  unsigned long data)
 {
+	int res;
+	struct ipa_ep_cfg ipa_ep_cfg;
+
 	switch (event) {
 	case IPA_RM_RESOURCE_GRANTED:
+		res = a2_mux_get_tethered_client_handles(
+			A2_MUX_TETHERED_0,
+			&teth_ctx->ipa_a2_pipe_hdl,
+			&teth_ctx->a2_ipa_pipe_hdl);
+		if (res) {
+			TETH_ERR(
+				"a2_mux_get_tethered_client_handles() failed, res = %d\n",
+				res);
+			return;
+		}
+
+		/* Reset the various endpoints configuration */
+		memset(&ipa_ep_cfg, 0, sizeof(ipa_ep_cfg));
+		ipa_cfg_ep(teth_ctx->ipa_a2_pipe_hdl, &ipa_ep_cfg);
+
+		ipa_ep_cfg.hdr.hdr_len = teth_ctx->a2_ipa_hdr_len;
+		ipa_cfg_ep(teth_ctx->a2_ipa_pipe_hdl, &ipa_ep_cfg);
 		complete(&teth_ctx->is_bridge_prod_up);
 		break;
 
@@ -886,32 +1100,34 @@
 	/* Build IPA Resource manager dependency graph */
 	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
 				    IPA_RM_RESOURCE_USB_CONS);
-	if (res && res != -EEXIST) {
+	if (res && res != -EINPROGRESS) {
 		TETH_ERR("ipa_rm_add_dependency() failed\n");
 		goto bail;
 	}
 
 	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
 				    IPA_RM_RESOURCE_A2_CONS);
-	if (res && res != -EEXIST) {
+	if (res && res != -EINPROGRESS) {
 		TETH_ERR("ipa_rm_add_dependency() failed\n");
 		goto fail_add_dependency_1;
 	}
 
 	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
 				    IPA_RM_RESOURCE_A2_CONS);
-	if (res && res != -EEXIST) {
+	if (res && res != -EINPROGRESS) {
 		TETH_ERR("ipa_rm_add_dependency() failed\n");
 		goto fail_add_dependency_2;
 	}
 
 	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_A2_PROD,
 				    IPA_RM_RESOURCE_USB_CONS);
-	if (res && res != -EEXIST) {
+	if (res && res != -EINPROGRESS) {
 		TETH_ERR("ipa_rm_add_dependency() failed\n");
 		goto fail_add_dependency_3;
 	}
 
+	/* Return 0 as EINPROGRESS is a valid return value at this point */
+	res = 0;
 	goto bail;
 
 fail_add_dependency_3:
@@ -929,6 +1145,57 @@
 }
 EXPORT_SYMBOL(teth_bridge_init);
 
+static void initialize_context(void)
+{
+	TETH_DBG_FUNC_ENTRY();
+	/* Initialize context variables */
+	teth_ctx->usb_ipa_pipe_hdl = 0;
+	teth_ctx->ipa_a2_pipe_hdl = 0;
+	teth_ctx->a2_ipa_pipe_hdl = 0;
+	teth_ctx->ipa_usb_pipe_hdl = 0;
+	teth_ctx->is_connected = false;
+
+	/* The default link protocol is Ethernet */
+	teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+
+	memset(&teth_ctx->mac_addresses, 0, sizeof(teth_ctx->mac_addresses));
+	teth_ctx->is_hw_bridge_complete = false;
+	memset(&teth_ctx->aggr_params, 0, sizeof(teth_ctx->aggr_params));
+	teth_ctx->aggr_params_known = false;
+	teth_ctx->tethering_mode = 0;
+	INIT_COMPLETION(teth_ctx->is_bridge_prod_up);
+	INIT_COMPLETION(teth_ctx->is_bridge_prod_down);
+	teth_ctx->comp_hw_bridge_in_progress = false;
+	memset(&teth_ctx->stats, 0, sizeof(teth_ctx->stats));
+	teth_ctx->a2_ipa_hdr_len = 0;
+	memset(teth_ctx->hdr_del,
+	       0,
+	       sizeof(struct ipa_ioc_del_hdr) + TETH_TOTAL_HDR_ENTRIES *
+	       sizeof(struct ipa_hdr_del));
+	memset(teth_ctx->routing_del[IPA_IP_v4],
+	       0,
+	       sizeof(struct ipa_ioc_del_rt_rule) +
+	       TETH_TOTAL_RT_ENTRIES_IP * sizeof(struct ipa_rt_rule_del));
+	teth_ctx->routing_del[IPA_IP_v4]->ip = IPA_IP_v4;
+	memset(teth_ctx->routing_del[IPA_IP_v6],
+	       0,
+	       sizeof(struct ipa_ioc_del_rt_rule) +
+	       TETH_TOTAL_RT_ENTRIES_IP * sizeof(struct ipa_rt_rule_del));
+	teth_ctx->routing_del[IPA_IP_v6]->ip = IPA_IP_v6;
+	memset(teth_ctx->filtering_del[IPA_IP_v4],
+	       0,
+	       sizeof(struct ipa_ioc_del_flt_rule) +
+	       TETH_TOTAL_FLT_ENTRIES_IP * sizeof(struct ipa_flt_rule_del));
+	teth_ctx->filtering_del[IPA_IP_v4]->ip = IPA_IP_v4;
+	memset(teth_ctx->filtering_del[IPA_IP_v6],
+	       0,
+	       sizeof(struct ipa_ioc_del_flt_rule) +
+	       TETH_TOTAL_FLT_ENTRIES_IP * sizeof(struct ipa_flt_rule_del));
+	teth_ctx->filtering_del[IPA_IP_v6]->ip = IPA_IP_v6;
+
+	TETH_DBG_FUNC_EXIT();
+}
+
 /**
 * teth_bridge_disconnect() - Disconnect tethering bridge module
 *
@@ -938,38 +1205,82 @@
 */
 int teth_bridge_disconnect(void)
 {
-	int res = -EPERM;
+	int res;
 
 	TETH_DBG_FUNC_ENTRY();
 	if (!teth_ctx->is_connected) {
 		TETH_ERR(
-		"Trying to disconnect an already disconnected bridge\n");
+			"Trying to disconnect an already disconnected bridge\n");
+		goto bail;
+	}
+
+	/* Request the BRIDGE_PROD resource */
+	res = teth_request_resource();
+	if (res) {
+		TETH_ERR("request_resource() failed.\n");
 		goto bail;
 	}
 
 	teth_ctx->is_connected = false;
 
-	res = ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
-	if (res == -EINPROGRESS)
-		wait_for_completion(&teth_ctx->is_bridge_prod_down);
+	/* Close the channel to A2 */
+	if (a2_mux_close_channel(A2_MUX_TETHERED_0))
+		TETH_ERR("a2_mux_close_channel() failed\n");
 
-	/* Initialize statistics */
-	memset(&teth_ctx->stats, 0, sizeof(teth_ctx->stats));
+	if (teth_ctx->is_hw_bridge_complete) {
+		/* Delete header entries */
+		if (ipa_del_hdr(teth_ctx->hdr_del))
+			TETH_ERR("ipa_del_hdr() failed\n");
+
+		/* Delete installed routing rules */
+		if (ipa_del_rt_rule(teth_ctx->routing_del[IPA_IP_v4]))
+			TETH_ERR("ipa_del_rt_rule() failed\n");
+		if (ipa_del_rt_rule(teth_ctx->routing_del[IPA_IP_v6]))
+			TETH_ERR("ipa_del_rt_rule() failed\n");
+
+		/* Delete installed filtering rules */
+		if (ipa_del_flt_rule(teth_ctx->filtering_del[IPA_IP_v4]))
+			TETH_ERR("ipa_del_flt_rule() failed\n");
+		if (ipa_del_flt_rule(teth_ctx->filtering_del[IPA_IP_v6]))
+			TETH_ERR("ipa_del_flt_rule() failed\n");
+
+		/*
+		 * Commit all the data to HW, including header, routing and
+		 * filtering blocks, IPv4 and IPv6
+		 */
+		if (ipa_commit_hdr())
+			TETH_ERR("Failed committing headers\n");
+	}
+
+	initialize_context();
+
+	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
 
 	/* Delete IPA Resource manager dependency graph */
 	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
 				       IPA_RM_RESOURCE_USB_CONS);
-	res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
-					IPA_RM_RESOURCE_A2_CONS);
-	res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
-					IPA_RM_RESOURCE_A2_CONS);
-	res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_A2_PROD,
-					IPA_RM_RESOURCE_USB_CONS);
-	if (res)
-		TETH_ERR("Failed deleting ipa_rm dependency.\n");
+	if ((res != 0) && (res != -EINPROGRESS))
+		TETH_ERR(
+			"Failed deleting ipa_rm dependency BRIDGE_PROD <-> USB_CONS\n");
+	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+				       IPA_RM_RESOURCE_A2_CONS);
+	if ((res != 0) && (res != -EINPROGRESS))
+		TETH_ERR(
+			"Failed deleting ipa_rm dependency BRIDGE_PROD <-> A2_CONS\n");
+	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+				       IPA_RM_RESOURCE_A2_CONS);
+	if ((res != 0) && (res != -EINPROGRESS))
+		TETH_ERR(
+			"Failed deleting ipa_rm dependency USB_PROD <-> A2_CONS\n");
+	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_A2_PROD,
+				       IPA_RM_RESOURCE_USB_CONS);
+	if ((res != 0) && (res != -EINPROGRESS))
+		TETH_ERR(
+			"Failed deleting ipa_rm dependency A2_PROD <-> USB_CONS\n");
 bail:
 	TETH_DBG_FUNC_EXIT();
-	return res;
+
+	return 0;
 }
 EXPORT_SYMBOL(teth_bridge_disconnect);
 
@@ -1003,12 +1314,10 @@
 	teth_ctx->usb_ipa_pipe_hdl = connect_params->usb_ipa_pipe_hdl;
 	teth_ctx->tethering_mode = connect_params->tethering_mode;
 
-	res = ipa_rm_request_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
-	if (res < 0) {
-		if (res == -EINPROGRESS)
-			wait_for_completion(&teth_ctx->is_bridge_prod_up);
-		else
-			goto bail;
+	res = teth_request_resource();
+	if (res) {
+		TETH_ERR("request_resource() failed.\n");
+		goto bail;
 	}
 
 	res = a2_mux_open_channel(A2_MUX_TETHERED_0,
@@ -1039,10 +1348,28 @@
 
 	if (teth_ctx->tethering_mode == TETH_TETHERING_MODE_MBIM)
 		teth_ctx->link_protocol = TETH_LINK_PROTOCOL_IP;
-	TETH_DBG_FUNC_EXIT();
+
+	if (teth_ctx->aggr_params_known) {
+		res = teth_set_aggregation();
+		if (res) {
+			TETH_ERR("Failed setting aggregation params\n");
+			goto bail;
+		}
+	}
+
+	/* In case of IP link protocol, complete HW bridge */
+	if ((teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP) &&
+	    (!teth_ctx->comp_hw_bridge_in_progress) &&
+	    (teth_ctx->aggr_params_known) &&
+	    (!teth_ctx->is_hw_bridge_complete)) {
+		INIT_WORK(&teth_ctx->comp_hw_bridge_work, complete_hw_bridge);
+		teth_ctx->comp_hw_bridge_in_progress = true;
+		queue_work(teth_ctx->teth_wq, &teth_ctx->comp_hw_bridge_work);
+	}
 bail:
-	if (res)
-		ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+	TETH_DBG_FUNC_EXIT();
+
 	return res;
 }
 EXPORT_SYMBOL(teth_bridge_connect);
@@ -1068,6 +1395,7 @@
 {
 	int res;
 
+	TETH_DBG_FUNC_ENTRY();
 	if (!aggr_params) {
 		TETH_ERR("Invalid parameter\n");
 		return -EINVAL;
@@ -1086,6 +1414,14 @@
 		aggr_params->ul.max_transfer_size_byte =
 			TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT;
 
+	/* Ethernet link protocol and MBIM aggregation is not supported */
+	if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET &&
+	    (aggr_params->dl.aggr_prot == TETH_AGGR_PROTOCOL_MBIM ||
+	     aggr_params->ul.aggr_prot == TETH_AGGR_PROTOCOL_MBIM)) {
+		TETH_ERR("Ethernet with MBIM is not supported.\n");
+		return -EINVAL;
+	}
+
 	memcpy(&teth_ctx->aggr_params,
 	       aggr_params,
 	       sizeof(struct teth_aggr_params));
@@ -1096,6 +1432,7 @@
 	res = teth_set_aggregation();
 	if (res)
 		TETH_ERR("Failed setting aggregation params\n");
+	TETH_DBG_FUNC_EXIT();
 
 	return res;
 }
@@ -1135,6 +1472,19 @@
 		}
 
 		res = teth_bridge_set_aggr_params(&aggr_params);
+		if (res)
+			break;
+
+		/* In case of IP link protocol, complete HW bridge */
+		if ((teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP) &&
+		    (!teth_ctx->comp_hw_bridge_in_progress) &&
+		    (!teth_ctx->is_hw_bridge_complete)) {
+			INIT_WORK(&teth_ctx->comp_hw_bridge_work,
+				  complete_hw_bridge);
+			teth_ctx->comp_hw_bridge_in_progress = true;
+			queue_work(teth_ctx->teth_wq,
+				   &teth_ctx->comp_hw_bridge_work);
+		}
 		break;
 
 	case TETH_BRIDGE_IOC_GET_AGGR_PARAMS:
@@ -1190,7 +1540,7 @@
 	return res;
 }
 
-static void set_aggr_capabilities(void)
+static int set_aggr_capabilities(void)
 {
 	u16 NUM_PROTOCOLS = 2;
 
@@ -1198,9 +1548,9 @@
 				      NUM_PROTOCOLS *
 				      sizeof(struct teth_aggr_params_link),
 				      GFP_KERNEL);
-	if (teth_ctx->aggr_caps == NULL) {
+	if (!teth_ctx->aggr_caps) {
 		TETH_ERR("Memory alloc failed for aggregation capabilities.\n");
-		return;
+		return -ENOMEM;
 	}
 
 	teth_ctx->aggr_caps->num_protocols = NUM_PROTOCOLS;
@@ -1210,8 +1560,15 @@
 
 	teth_ctx->aggr_caps->prot_caps[1].aggr_prot = TETH_AGGR_PROTOCOL_TLP;
 	set_aggr_default_params(&teth_ctx->aggr_caps->prot_caps[1]);
+
+	return 0;
 }
 
+/**
+* teth_bridge_get_client_handles() - Get USB <--> IPA pipe handles
+* @producer_handle:	USB --> IPA pipe handle
+* @consumer_handle:	IPA --> USB pipe handle
+*/
 void teth_bridge_get_client_handles(u32 *producer_handle,
 		u32 *consumer_handle)
 {
@@ -1379,6 +1736,11 @@
 			    TETH_MAX_MSG_LEN - nbytes,
 			   "A2 to USB SW Tx packets: %lld\n",
 			    teth_ctx->stats.a2_to_usb_num_sw_tx_packets);
+	nbytes += scnprintf(
+		&dbg_buff[nbytes],
+		TETH_MAX_MSG_LEN - nbytes,
+		"SW Tx packets sent during resource wakeup: %lld\n",
+		teth_ctx->stats.num_sw_tx_packets_during_resource_wakeup);
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
@@ -1505,7 +1867,59 @@
 		return -ENOMEM;
 	}
 
-	set_aggr_capabilities();
+	res = set_aggr_capabilities();
+	if (res) {
+		TETH_ERR("kzalloc err.\n");
+		goto fail_alloc_aggr_caps;
+	}
+
+	res = -ENOMEM;
+	teth_ctx->hdr_del = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+				    TETH_TOTAL_HDR_ENTRIES *
+				    sizeof(struct ipa_hdr_del),
+				    GFP_KERNEL);
+	if (!teth_ctx->hdr_del) {
+		TETH_ERR("kzalloc err.\n");
+		goto fail_alloc_hdr_del;
+	}
+
+	teth_ctx->routing_del[IPA_IP_v4] =
+		kzalloc(sizeof(struct ipa_ioc_del_rt_rule) +
+			TETH_TOTAL_RT_ENTRIES_IP *
+			sizeof(struct ipa_rt_rule_del),
+			GFP_KERNEL);
+	if (!teth_ctx->routing_del[IPA_IP_v4]) {
+		TETH_ERR("kzalloc err.\n");
+		goto fail_alloc_routing_del_ipv4;
+	}
+	teth_ctx->routing_del[IPA_IP_v6] =
+		kzalloc(sizeof(struct ipa_ioc_del_rt_rule) +
+			TETH_TOTAL_RT_ENTRIES_IP *
+			sizeof(struct ipa_rt_rule_del),
+			GFP_KERNEL);
+	if (!teth_ctx->routing_del[IPA_IP_v6]) {
+		TETH_ERR("kzalloc err.\n");
+		goto fail_alloc_routing_del_ipv6;
+	}
+
+	teth_ctx->filtering_del[IPA_IP_v4] =
+		kzalloc(sizeof(struct ipa_ioc_del_flt_rule) +
+			TETH_TOTAL_FLT_ENTRIES_IP *
+			sizeof(struct ipa_flt_rule_del),
+			GFP_KERNEL);
+	if (!teth_ctx->filtering_del[IPA_IP_v4]) {
+		TETH_ERR("kzalloc err.\n");
+		goto fail_alloc_filtering_del_ipv4;
+	}
+	teth_ctx->filtering_del[IPA_IP_v6] =
+		kzalloc(sizeof(struct ipa_ioc_del_flt_rule) +
+			TETH_TOTAL_FLT_ENTRIES_IP *
+			sizeof(struct ipa_flt_rule_del),
+			GFP_KERNEL);
+	if (!teth_ctx->filtering_del[IPA_IP_v6]) {
+		TETH_ERR("kzalloc err.\n");
+		goto fail_alloc_filtering_del_ipv6;
+	}
 
 	teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
 
@@ -1536,8 +1950,6 @@
 		goto fail_cdev_add;
 	}
 
-	teth_ctx->comp_hw_bridge_in_progress = false;
-
 	teth_debugfs_init();
 
 	/* Create BRIDGE_PROD entity in IPA Resource Manager */
@@ -1552,9 +1964,21 @@
 	init_completion(&teth_ctx->is_bridge_prod_up);
 	init_completion(&teth_ctx->is_bridge_prod_down);
 
-	/* The default link protocol is Ethernet */
-	teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+	res = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_BRIDGE_PROD,
+					   TETH_INACTIVITY_TIME_MSEC);
+	if (res) {
+		TETH_ERR("ipa_rm_inactivity_timer_init() failed, res=%d\n",
+			 res);
+		goto fail_cdev_add;
+	}
 
+	teth_ctx->teth_wq = create_workqueue(TETH_WORKQUEUE_NAME);
+	if (!teth_ctx->teth_wq) {
+		TETH_ERR("workqueue creation failed\n");
+		goto fail_cdev_add;
+	}
+
+	initialize_context();
 	TETH_DBG("Tethering bridge driver init OK\n");
 
 	return 0;
@@ -1563,7 +1987,18 @@
 fail_device_create:
 	unregister_chrdev_region(teth_ctx->dev_num, 1);
 fail_alloc_chrdev_region:
+	kfree(teth_ctx->filtering_del[IPA_IP_v6]);
+fail_alloc_filtering_del_ipv6:
+	kfree(teth_ctx->filtering_del[IPA_IP_v4]);
+fail_alloc_filtering_del_ipv4:
+	kfree(teth_ctx->routing_del[IPA_IP_v6]);
+fail_alloc_routing_del_ipv6:
+	kfree(teth_ctx->routing_del[IPA_IP_v4]);
+fail_alloc_routing_del_ipv4:
+	kfree(teth_ctx->hdr_del);
+fail_alloc_hdr_del:
 	kfree(teth_ctx->aggr_caps);
+fail_alloc_aggr_caps:
 	kfree(teth_ctx);
 	teth_ctx = NULL;
 
diff --git a/drivers/power/battery_current_limit.c b/drivers/power/battery_current_limit.c
index ecda153..69fa4a8 100644
--- a/drivers/power/battery_current_limit.c
+++ b/drivers/power/battery_current_limit.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -120,8 +120,10 @@
 
 	if (psy == NULL) {
 		psy = power_supply_get_by_name("battery");
-		if (psy == NULL)
+		if (psy == NULL) {
+			pr_err("failed to get ps battery\n");
 			return;
+		}
 	}
 
 	if (psy->get_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW, &ret))
@@ -143,6 +145,7 @@
 	gbcl->bcl_imax_ma = imax_ma;
 	gbcl->bcl_vbat_mv = vbatt_mv;
 
+	pr_debug("ibatt %d, imax %d, vbatt %d\n", ibatt_ma, imax_ma, vbatt_mv);
 	if (gbcl->bcl_threshold_mode[BCL_IBAT_IMAX_THRESHOLD_TYPE_HIGH]
 		== BCL_IBAT_IMAX_THRESHOLD_ENABLED) {
 		imax_high_threshold =
@@ -179,8 +182,7 @@
 		bcl_calculate_imax_trigger();
 		/* restart the delay work for caculating imax */
 		schedule_delayed_work(&bcl->bcl_imax_work,
-			round_jiffies_relative(msecs_to_jiffies
-				(bcl->bcl_poll_interval_msec)));
+			msecs_to_jiffies(bcl->bcl_poll_interval_msec));
 	}
 }
 
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 2e091cc..1b3a7abe 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -940,6 +940,7 @@
 static int smux_ctl_remove(struct platform_device *pdev)
 {
 	int i;
+	int ret;
 
 	SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
 
@@ -950,6 +951,13 @@
 		devp->abort_wait = 1;
 		wake_up(&devp->write_wait_queue);
 		wake_up(&devp->read_wait_queue);
+
+		if (atomic_read(&devp->ref_count)) {
+			ret = msm_smux_close(devp->id);
+			if (ret)
+				pr_err("%s: unable to close ch %d, ret %d\n",
+						__func__, devp->id, ret);
+		}
 		mutex_unlock(&devp->dev_lock);
 
 		/* Empty RX queue */
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fc8f4b3..fab5219 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -488,7 +488,7 @@
 	if (!dev->dma_mask)
 		dev->dma_mask = &dwc3_dma_mask;
 	if (!dev->coherent_dma_mask)
-		dev->coherent_dma_mask = DMA_BIT_MASK(32);
+		dev->coherent_dma_mask = DMA_BIT_MASK(64);
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (!res) {
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 93504eb..df95646 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -693,9 +693,10 @@
 	list_for_each(ptr, &dep->request_list) {
 		req = list_entry(ptr, struct dwc3_request, list);
 
-		seq_printf(s, "req:0x%p len: %d sts: %d dma:0x%x num_sgs: %d\n",
+		seq_printf(s,
+			"req:0x%p len: %d sts: %d dma:0x%pa num_sgs: %d\n",
 			req, req->request.length, req->request.status,
-			req->request.dma, req->request.num_sgs);
+			&req->request.dma, req->request.num_sgs);
 	}
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -731,9 +732,10 @@
 	list_for_each(ptr, &dep->req_queued) {
 		req = list_entry(ptr, struct dwc3_request, list);
 
-		seq_printf(s, "req:0x%p len:%d sts:%d dma:%x nsg:%d trb:0x%p\n",
+		seq_printf(s,
+			"req:0x%p len:%d sts:%d dma:%pa nsg:%d trb:0x%p\n",
 			req, req->request.length, req->request.status,
-			req->request.dma, req->request.num_sgs, req->trb);
+			&req->request.dma, req->request.num_sgs, req->trb);
 	}
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 6fca910..7101d56 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -445,7 +445,8 @@
 	int n = hw_ep_bit(num, dir);
 	struct ci13xxx_ep *mEp = &_udc->ci13xxx_ep[n];
 
-	if (_udc->skip_flush || list_empty(&mEp->qh.queue))
+	/* Flush ep0 even when queue is empty */
+	if (_udc->skip_flush || (num && list_empty(&mEp->qh.queue)))
 		return 0;
 
 	start = ktime_get();
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index 3b1843b..3355e19 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -139,9 +139,6 @@
  * @out_desc: USB OUT endpoint descriptor struct
  * @read_pool: List of requests used for Rx (OUT ep)
  * @write_pool: List of requests used for Tx (IN ep)
- * @config_work: Work item schedule after interface is configured to notify
- *               CONNECT event to diag char driver and updating product id
- *               and serial number to MODEM/IMEM.
  * @lock: Spinlock to proctect read_pool, write_pool lists
  * @cdev: USB composite device struct
  * @ch: USB diag channel
@@ -153,7 +150,6 @@
 	struct usb_ep *in;
 	struct list_head read_pool;
 	struct list_head write_pool;
-	struct work_struct config_work;
 	spinlock_t lock;
 	unsigned configured;
 	struct usb_composite_dev *cdev;
@@ -176,21 +172,20 @@
 	return container_of(f, struct diag_context, function);
 }
 
-static void usb_config_work_func(struct work_struct *work)
+static void diag_update_pid_and_serial_num(struct diag_context *ctxt)
 {
-	struct diag_context *ctxt = container_of(work,
-			struct diag_context, config_work);
 	struct usb_composite_dev *cdev = ctxt->cdev;
 	struct usb_gadget_strings *table;
 	struct usb_string *s;
 
-	if (!ctxt->ch)
+	if (!ctxt->update_pid_and_serial_num)
 		return;
 
-	if (ctxt->ch->notify)
-		ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_CONNECT, NULL);
-
-	if (!ctxt->update_pid_and_serial_num)
+	/*
+	 * update pid and serail number to dload only if diag
+	 * interface is zeroth interface.
+	 */
+	if (intf_desc.bInterfaceNumber)
 		return;
 
 	/* pass on product id and serial number to dload */
@@ -612,7 +607,6 @@
 		usb_ep_disable(dev->in);
 		return rc;
 	}
-	schedule_work(&dev->config_work);
 
 	dev->dpkts_tolaptop = 0;
 	dev->dpkts_tomodem = 0;
@@ -622,6 +616,9 @@
 	dev->configured = 1;
 	spin_unlock_irqrestore(&dev->lock, flags);
 
+	if (dev->ch->notify)
+		dev->ch->notify(dev->ch->priv, USB_DIAG_CONNECT, NULL);
+
 	return rc;
 }
 
@@ -699,6 +696,7 @@
 		if (!f->ss_descriptors)
 			goto fail;
 	}
+	diag_update_pid_and_serial_num(ctxt);
 	return 0;
 fail:
 	if (f->ss_descriptors)
@@ -761,7 +759,6 @@
 	spin_lock_init(&dev->lock);
 	INIT_LIST_HEAD(&dev->read_pool);
 	INIT_LIST_HEAD(&dev->write_pool);
-	INIT_WORK(&dev->config_work, usb_config_work_func);
 
 	ret = usb_add_function(c, &dev->function);
 	if (ret) {
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
index 51f0e50..46a6bba 100644
--- a/drivers/usb/gadget/f_qc_ecm.c
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -849,6 +849,10 @@
 	usb_ep_free_request(ecm->notify, ecm->notify_req);
 
 	ecm_qc_string_defs[1].s = NULL;
+
+	if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA)
+		ecm_ipa_cleanup(ipa_params.ipa_priv);
+
 	kfree(ecm);
 }
 
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_qti.c b/drivers/usb/gadget/u_rmnet_ctrl_qti.c
index e92978f..182cd40 100644
--- a/drivers/usb/gadget/u_rmnet_ctrl_qti.c
+++ b/drivers/usb/gadget/u_rmnet_ctrl_qti.c
@@ -259,20 +259,6 @@
 		return -EBUSY;
 	}
 
-	/* block until online */
-	while (!(atomic_read(&port->connected))) {
-		pr_debug("Not connected. Wait.\n");
-		ret = wait_event_interruptible(port->read_wq,
-			atomic_read(&port->connected));
-		if (ret < 0) {
-			rmnet_ctrl_unlock(&port->read_excl);
-			if (ret == -ERESTARTSYS)
-				return -ERESTARTSYS;
-			else
-				return -EINTR;
-		}
-	}
-
 	/* block until a new packet is available */
 	do {
 		spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 4a085aa..7674d88 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -78,6 +78,7 @@
 	struct clk		*alt_core_clk;
 	struct clk		*phy_clk;
 	struct clk		*cal_clk;
+	struct clk		*inactivity_clk;
 	struct regulator	*hsic_vddcx;
 	struct regulator	*hsic_gdsc;
 	atomic_t		async_int;
@@ -575,6 +576,8 @@
 		clk_disable_unprepare(mehci->phy_clk);
 		clk_disable_unprepare(mehci->cal_clk);
 		clk_disable_unprepare(mehci->ahb_clk);
+		if (!IS_ERR(mehci->inactivity_clk))
+			clk_disable_unprepare(mehci->inactivity_clk);
 
 		ret = clk_reset(mehci->core_clk, CLK_RESET_ASSERT);
 		if (ret) {
@@ -596,6 +599,8 @@
 		clk_prepare_enable(mehci->phy_clk);
 		clk_prepare_enable(mehci->cal_clk);
 		clk_prepare_enable(mehci->ahb_clk);
+		if (!IS_ERR(mehci->inactivity_clk))
+			clk_prepare_enable(mehci->inactivity_clk);
 	}
 }
 
@@ -794,6 +799,8 @@
 	clk_disable_unprepare(mehci->phy_clk);
 	clk_disable_unprepare(mehci->cal_clk);
 	clk_disable_unprepare(mehci->ahb_clk);
+	if (!IS_ERR(mehci->inactivity_clk))
+		clk_disable_unprepare(mehci->inactivity_clk);
 
 	none_vol = vdd_val[mehci->vdd_type][VDD_NONE];
 	max_vol = vdd_val[mehci->vdd_type][VDD_MAX];
@@ -876,6 +883,8 @@
 	clk_prepare_enable(mehci->phy_clk);
 	clk_prepare_enable(mehci->cal_clk);
 	clk_prepare_enable(mehci->ahb_clk);
+	if (!IS_ERR(mehci->inactivity_clk))
+		clk_prepare_enable(mehci->inactivity_clk);
 
 	temp = readl_relaxed(USB_USBCMD);
 	temp &= ~ASYNC_INTR_CTRL;
@@ -1507,10 +1516,21 @@
 		goto put_cal_clk;
 	}
 
+	/*
+	 * Inactivity_clk is required for hsic bam inactivity timer.
+	 * This clock is not compulsory and is defined in clock lookup
+	 * only for targets that need to use the inactivity timer feature.
+	 */
+	mehci->inactivity_clk = clk_get(mehci->dev, "inactivity_clk");
+	if (IS_ERR(mehci->inactivity_clk))
+		dev_dbg(mehci->dev, "failed to get inactivity_clk\n");
+
 	clk_prepare_enable(mehci->core_clk);
 	clk_prepare_enable(mehci->phy_clk);
 	clk_prepare_enable(mehci->cal_clk);
 	clk_prepare_enable(mehci->ahb_clk);
+	if (!IS_ERR(mehci->inactivity_clk))
+		clk_prepare_enable(mehci->inactivity_clk);
 
 	return 0;
 
@@ -1520,7 +1540,11 @@
 		clk_disable_unprepare(mehci->phy_clk);
 		clk_disable_unprepare(mehci->cal_clk);
 		clk_disable_unprepare(mehci->ahb_clk);
+		if (!IS_ERR(mehci->inactivity_clk))
+			clk_disable_unprepare(mehci->inactivity_clk);
 	}
+	if (!IS_ERR(mehci->inactivity_clk))
+		clk_put(mehci->inactivity_clk);
 	clk_put(mehci->ahb_clk);
 put_cal_clk:
 	clk_put(mehci->cal_clk);
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index 99b7604..929e5f8 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -434,11 +434,32 @@
 	mutex_unlock(&mdp3_session->lock);
 }
 
+static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata)
+{
+	int ret = 0;
+	switch (metadata->op) {
+	case metadata_op_frame_rate:
+		metadata->data.panel_frame_rate =
+			mfd->panel_info->mipi.frame_rate;
+		break;
+	case metadata_op_get_caps:
+		metadata->data.caps.mdp_rev = 304;
+		break;
+	default:
+		pr_warn("Unsupported request to MDP META IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
 static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
 					u32 cmd, void __user *argp)
 {
 	int rc = -EINVAL;
 	struct mdp3_session_data *mdp3_session;
+	struct msmfb_metadata metadata;
 	int val;
 
 	pr_debug("mdp3_ctrl_ioctl_handler\n");
@@ -464,6 +485,14 @@
 			rc = -EFAULT;
 		}
 		break;
+	case MSMFB_METADATA_GET:
+		rc = copy_from_user(&metadata, argp, sizeof(metadata));
+		if (rc)
+			return rc;
+		rc = mdp3_get_metadata(mfd, &metadata);
+		if (!rc)
+			rc = copy_to_user(argp, &metadata, sizeof(metadata));
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index c3957c5..eb4eee2 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -1143,6 +1143,7 @@
 	struct mdp_display_commit disp_commit;
 	memset(&disp_commit, 0, sizeof(disp_commit));
 	disp_commit.wait_for_finish = true;
+	memcpy(&disp_commit.var, var, sizeof(struct fb_var_screeninfo));
 	return mdss_fb_pan_display_ex(info, &disp_commit);
 }
 
diff --git a/fs/file.c b/fs/file.c
index ba3f605..2f989c3 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -476,6 +476,7 @@
 	spin_unlock(&files->file_lock);
 	return error;
 }
+EXPORT_SYMBOL(alloc_fd);
 
 int get_unused_fd(void)
 {
diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h
new file mode 100644
index 0000000..ba53a40
--- /dev/null
+++ b/include/linux/bluetooth-power.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_BLUETOOTH_POWER_H
+#define __LINUX_BLUETOOTH_POWER_H
+
+/*
+ * voltage regulator information required for configuring the
+ * bluetooth chipset
+ */
+struct bt_power_vreg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage levels to be set */
+	unsigned int low_vol_level;
+	unsigned int high_vol_level;
+	/*
+	 * is set voltage supported for this regulator?
+	 * false => set voltage is not supported
+	 * true  => set voltage is supported
+	 *
+	 * Some regulators (like gpio-regulators, LVS (low voltage swtiches)
+	 * PMIC regulators) dont have the capability to call
+	 * regulator_set_voltage or regulator_set_optimum_mode
+	 * Use this variable to indicate if its a such regulator or not
+	 */
+	bool set_voltage_sup;
+	/* is this regulator enabled? */
+	bool is_enabled;
+};
+
+/*
+ * Platform data for the bluetooth power driver.
+ */
+struct bluetooth_power_platform_data {
+	/* Bluetooth reset gpio */
+	int bt_gpio_sys_rst;
+	/* VDDIO voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_io;
+	/* VDD_PA voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_pa;
+	/* VDD_LDOIN voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_ldo;
+	/* Optional: chip power down gpio-regulator
+	 * chip power down data is required when bluetooth module
+	 * and other modules like wifi co-exist in a single chip and
+	 * shares a common gpio to bring chip out of reset.
+	 */
+	struct bt_power_vreg_data *bt_chip_pwd;
+	/* Optional: Bluetooth power setup function */
+	int (*bt_power_setup) (int);
+};
+
+#endif /* __LINUX_BLUETOOTH_POWER_H */
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index c2b35c8..c219725 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -368,6 +368,9 @@
 	/* DTS value associated with the buffer */
 	__u64 dts;
 
+	/* STC value associated with the buffer in 27MHz */
+	__u64 stc;
+
 	/*
 	 * Number of TS packets with Transport Error Indicator (TEI) set
 	 * in the TS packet header since last reported event
diff --git a/include/linux/qpnp-misc.h b/include/linux/qpnp-misc.h
index b241e5d..ee614a4 100644
--- a/include/linux/qpnp-misc.h
+++ b/include/linux/qpnp-misc.h
@@ -30,7 +30,7 @@
 
 int qpnp_misc_irqs_available(struct device *consumer_dev);
 #else
-static int qpnp_misc_irq_available(struct device *consumer_dev)
+static int qpnp_misc_irqs_available(struct device *consumer_dev)
 {
 	return 0;
 }
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index bce6af3..b71aa30 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -207,6 +207,7 @@
 	uint8_t settle_cnt;
 	uint16_t lane_mask;
 	uint8_t combo_mode;
+	uint8_t csid_core;
 };
 
 struct msm_camera_csi2_params {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index afd5a42..b4b3bfc 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -1396,6 +1396,7 @@
 	uint8_t settle_cnt;
 	uint16_t lane_mask;
 	uint8_t combo_mode;
+	uint8_t csid_core;
 };
 
 struct msm_camera_csi2_params {
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 6fb1a65..bf6b23b 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -168,12 +168,6 @@
 	enum msm_vfe_frame_skip_pattern skip_pattern;
 };
 
-enum msm_vfe_stats_pipeline_policy {
-	STATS_COMP_ALL,
-	STATS_COMP_NONE,
-	MAX_STATS_POLICY,
-};
-
 enum msm_isp_stats_type {
 	MSM_ISP_STATS_AEC,   /* legacy based AEC */
 	MSM_ISP_STATS_AF,    /* legacy based AF */
@@ -193,11 +187,11 @@
 	uint32_t session_id;
 	uint32_t stream_id;
 	enum msm_isp_stats_type stats_type;
+	uint32_t composite_flag;
 	uint32_t framedrop_pattern;
 	uint32_t irq_subsample_pattern;
 	uint32_t buffer_offset;
 	uint32_t stream_handle;
-	uint8_t comp_flag;
 };
 
 struct msm_vfe_stats_stream_release_cmd {
@@ -209,12 +203,6 @@
 	uint8_t enable;
 };
 
-struct msm_vfe_stats_comp_policy_cfg {
-	enum msm_vfe_stats_pipeline_policy stats_pipeline_policy;
-	uint32_t comp_framedrop_pattern;
-	uint32_t comp_irq_subsample_pattern;
-};
-
 enum msm_vfe_reg_cfg_type {
 	VFE_WRITE,
 	VFE_WRITE_MB,
@@ -319,7 +307,7 @@
 #define ISP_EVENT_EOF             (ISP_EVENT_BASE + ISP_EOF)
 #define ISP_EVENT_BUF_DIVERT      (ISP_BUF_EVENT_BASE)
 #define ISP_EVENT_STATS_NOTIFY    (ISP_STATS_EVENT_BASE)
-
+#define ISP_EVENT_COMP_STATS_NOTIFY (ISP_EVENT_STATS_NOTIFY + MSM_ISP_STATS_MAX)
 /* The msm_v4l2_event_data structure should match the
  * v4l2_event.u.data field.
  * should not exceed 64 bytes */
@@ -412,10 +400,6 @@
 	_IOWR('V', BASE_VIDIOC_PRIVATE+11, \
 	struct msm_vfe_stats_stream_release_cmd)
 
-#define VIDIOC_MSM_ISP_CFG_STATS_COMP_POLICY \
-	_IOWR('V', BASE_VIDIOC_PRIVATE+12,   \
-	      struct msm_vfe_stats_comp_policy_cfg)
-
 #define VIDIOC_MSM_ISP_UPDATE_STREAM \
 	_IOWR('V', BASE_VIDIOC_PRIVATE+13, struct msm_vfe_axi_stream_update_cmd)
 
diff --git a/kernel/pid.c b/kernel/pid.c
index 9f08dfa..7acf590 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -430,6 +430,7 @@
 {
 	return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
 }
+EXPORT_SYMBOL_GPL(find_task_by_vpid);
 
 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a2bad88..862e172 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4757,6 +4757,7 @@
 	delayacct_blkio_end();
 	return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7f4a46c..7e31770 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2126,11 +2126,11 @@
 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
 	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct cfs_rq *cfs_rq = &rq->cfs;
 
 	WARN_ON(task_rq(p) != rq);
 
-	if (cfs_rq->nr_running > 1) {
+	if (cfs_rq->h_nr_running > 1) {
 		u64 slice = sched_slice(cfs_rq, se);
 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 		s64 delta = slice - ran;
@@ -2154,8 +2154,7 @@
 
 /*
  * called from enqueue/dequeue and updates the hrtick when the
- * current task is from our class and nr_running is low enough
- * to matter.
+ * current task is from our class.
  */
 static void hrtick_update(struct rq *rq)
 {
@@ -2164,8 +2163,7 @@
 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
 		return;
 
-	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
-		hrtick_start_fair(rq, curr);
+	hrtick_start_fair(rq, curr);
 }
 #else /* !CONFIG_SCHED_HRTICK */
 static inline void
diff --git a/scripts/build-all.py b/scripts/build-all.py
index 3cecbe2..4789af7 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -88,7 +88,6 @@
         r'[fm]sm[0-9]*_defconfig',
         r'apq*_defconfig',
         r'qsd*_defconfig',
-        r'msmzinc*_defconfig',
         )
     for p in arch_pats:
         for n in glob.glob('arch/arm/configs/' + p):
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index a7069a6..67674f3 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -4371,6 +4371,12 @@
 	return ret;
 }
 
+static void tapan_cleanup_irqs(struct tapan_priv *tapan)
+{
+	struct snd_soc_codec *codec = tapan->codec;
+	wcd9xxx_free_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS, tapan);
+}
+
 int tapan_hs_detect(struct snd_soc_codec *codec,
 		    struct wcd9xxx_mbhc_config *mbhc_cfg)
 {
@@ -4512,6 +4518,10 @@
 	mutex_unlock(&dapm->codec->mutex);
 
 	codec->ignore_pmdown_time = 1;
+
+	if (ret)
+		tapan_cleanup_irqs(tapan);
+
 	return ret;
 
 err_pdata:
@@ -4532,6 +4542,9 @@
 		wcd9xxx_resmgr_put_bandgap(&tapan->resmgr,
 					   WCD9XXX_BANDGAP_AUDIO_MODE);
 	WCD9XXX_BCL_UNLOCK(&tapan->resmgr);
+
+	tapan_cleanup_irqs(tapan);
+
 	/* cleanup MBHC */
 	wcd9xxx_mbhc_deinit(&tapan->mbhc);
 	/* cleanup resmgr */
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 7ba43c0..43a1042 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -5841,28 +5841,39 @@
 				taiko_codec_reg_init_val[i].val);
 }
 
-static int taiko_setup_irqs(struct taiko_priv *taiko)
+static void taiko_slim_interface_init_reg(struct snd_soc_codec *codec)
 {
 	int i;
-	int ret = 0;
-	struct snd_soc_codec *codec = taiko->codec;
-
-	ret = wcd9xxx_request_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS,
-				  taiko_slimbus_irq, "SLIMBUS Slave", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-		       WCD9XXX_IRQ_SLIMBUS);
-		goto exit;
-	}
 
 	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
 		wcd9xxx_interface_reg_write(codec->control_data,
 					    TAIKO_SLIM_PGD_PORT_INT_EN0 + i,
 					    0xFF);
-exit:
+}
+
+static int taiko_setup_irqs(struct taiko_priv *taiko)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = taiko->codec;
+
+	ret = wcd9xxx_request_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS,
+				  taiko_slimbus_irq, "SLIMBUS Slave", taiko);
+	if (ret)
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       WCD9XXX_IRQ_SLIMBUS);
+	else
+		taiko_slim_interface_init_reg(codec);
+
 	return ret;
 }
 
+static void taiko_cleanup_irqs(struct taiko_priv *taiko)
+{
+	struct snd_soc_codec *codec = taiko->codec;
+
+	wcd9xxx_free_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS, taiko);
+}
+
 int taiko_hs_detect(struct snd_soc_codec *codec,
 		    struct wcd9xxx_mbhc_config *mbhc_cfg)
 {
@@ -5921,6 +5932,7 @@
 		pr_err("%s: bad pdata\n", __func__);
 
 	taiko_init_slim_slave_cfg(codec);
+	taiko_slim_interface_init_reg(codec);
 
 	wcd9xxx_mbhc_deinit(&taiko->mbhc);
 	ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
@@ -6063,10 +6075,8 @@
 			tx_hpf_corner_freq_callback);
 	}
 
-
 	snd_soc_codec_set_drvdata(codec, taiko);
 
-
 	/* codec resmgr module init */
 	wcd9xxx = codec->control_data;
 	pdata = dev_get_platdata(codec->dev->parent);
@@ -6074,7 +6084,7 @@
 				  &taiko_reg_address);
 	if (ret) {
 		pr_err("%s: wcd9xxx init failed %d\n", __func__, ret);
-		return ret;
+		goto err_init;
 	}
 
 	taiko->clsh_d.buck_mv = taiko_codec_get_buck_mv(codec);
@@ -6085,7 +6095,7 @@
 				WCD9XXX_MBHC_VERSION_TAIKO);
 	if (ret) {
 		pr_err("%s: mbhc init failed %d\n", __func__, ret);
-		return ret;
+		goto err_init;
 	}
 
 	taiko->codec = codec;
@@ -6179,7 +6189,11 @@
 
 	snd_soc_dapm_sync(dapm);
 
-	(void) taiko_setup_irqs(taiko);
+	ret = taiko_setup_irqs(taiko);
+	if (ret) {
+		pr_err("%s: taiko irq setup failed %d\n", __func__, ret);
+		goto err_irq;
+	}
 
 	atomic_set(&kp_taiko_priv, (unsigned long)taiko);
 	mutex_lock(&dapm->codec->mutex);
@@ -6194,10 +6208,13 @@
 	codec->ignore_pmdown_time = 1;
 	return ret;
 
+err_irq:
+	taiko_cleanup_irqs(taiko);
 err_pdata:
 	kfree(ptr);
 err_nomem_slimch:
 	kfree(taiko);
+err_init:
 	return ret;
 }
 static int taiko_codec_remove(struct snd_soc_codec *codec)
@@ -6212,6 +6229,8 @@
 					   WCD9XXX_BANDGAP_AUDIO_MODE);
 	WCD9XXX_BCL_UNLOCK(&taiko->resmgr);
 
+	taiko_cleanup_irqs(taiko);
+
 	/* cleanup MBHC */
 	wcd9xxx_mbhc_deinit(&taiko->mbhc);
 	/* cleanup resmgr */
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 1d01d2e..daba6d5 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -3534,7 +3534,6 @@
 {
 	void *cdata = mbhc->codec->control_data;
 
-	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_SLIMBUS, mbhc);
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_RELEASE, mbhc);
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_POTENTIAL, mbhc);
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_REMOVAL, mbhc);
@@ -3543,7 +3542,6 @@
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_JACK_SWITCH, mbhc);
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, mbhc);
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, mbhc);
-	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_RELEASE, mbhc);
 
 	if (mbhc->mbhc_fw)
 		release_firmware(mbhc->mbhc_fw);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index a163f6a..27b3f56 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -33,6 +33,7 @@
 #include <sound/compress_offload.h>
 #include <sound/compress_driver.h>
 #include <sound/timer.h>
+#include <sound/pcm_params.h>
 
 #include "msm-pcm-q6-v2.h"
 #include "msm-pcm-routing-v2.h"
@@ -55,9 +56,10 @@
 				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
 	.formats =              SNDRV_PCM_FMTBIT_S16_LE |
 					SNDRV_PCM_FMTBIT_S24_LE,
-	.rates =                SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
+	.rates =                SNDRV_PCM_RATE_8000_192000 |
+					SNDRV_PCM_RATE_KNOT,
 	.rate_min =             8000,
-	.rate_max =             48000,
+	.rate_max =             192000,
 	.channels_min =         1,
 	.channels_max =         2,
 	.buffer_bytes_max =     1024 * 1024,
@@ -70,7 +72,8 @@
 
 /* Conventional and unconventional sample rate supported */
 static unsigned int supported_sample_rates[] = {
-	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+	96000, 192000
 };
 
 static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -277,19 +280,7 @@
 static int msm_pcm_open(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
 	struct msm_audio *prtd;
-	struct asm_softpause_params softpause = {
-		.enable = SOFT_PAUSE_ENABLE,
-		.period = SOFT_PAUSE_PERIOD,
-		.step = SOFT_PAUSE_STEP,
-		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
-	};
-	struct asm_softvolume_params softvol = {
-		.period = SOFT_VOLUME_PERIOD,
-		.step = SOFT_VOLUME_STEP,
-		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
-	};
 	int ret = 0;
 
 	pr_debug("%s\n", __func__);
@@ -307,31 +298,9 @@
 		kfree(prtd);
 		return -ENOMEM;
 	}
-	prtd->audio_client->perf_mode = false;
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM);
-		if (ret < 0) {
-			pr_err("%s: pcm out open failed\n", __func__);
-			q6asm_audio_client_free(prtd->audio_client);
-			kfree(prtd);
-			return -ENOMEM;
-		}
-		ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
-		if (ret < 0) {
-			pr_err("%s: Set IO mode failed\n", __func__);
-			q6asm_audio_client_free(prtd->audio_client);
-			kfree(prtd);
-			return -ENOMEM;
-		}
-	}
 	/* Capture path */
 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 		return -EPERM;
-	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
-	prtd->session_id = prtd->audio_client->session;
-	msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
-		prtd->audio_client->perf_mode,
-		prtd->session_id, substream->stream);
 
 	ret = snd_pcm_hw_constraint_list(runtime, 0,
 				SNDRV_PCM_HW_PARAM_RATE,
@@ -350,15 +319,6 @@
 	atomic_set(&lpa_audio.audio_ocmem_req, 0);
 	runtime->private_data = prtd;
 	lpa_audio.prtd = prtd;
-	lpa_set_volume(0);
-	ret = q6asm_set_softpause(lpa_audio.prtd->audio_client, &softpause);
-	if (ret < 0)
-		pr_err("%s: Send SoftPause Param failed ret=%d\n",
-			__func__, ret);
-	ret = q6asm_set_softvolume(lpa_audio.prtd->audio_client, &softvol);
-	if (ret < 0)
-		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
-			__func__, ret);
 
 	return 0;
 }
@@ -407,22 +367,24 @@
 		prtd->pcm_irq_pos = 0;
 	}
 
-	dir = IN;
-	atomic_set(&prtd->pending_buffer, 0);
+	if (prtd->audio_client) {
+		dir = IN;
+		atomic_set(&prtd->pending_buffer, 0);
 
-	if (atomic_cmpxchg(&lpa_audio.audio_ocmem_req, 1, 0))
-		audio_ocmem_process_req(AUDIO, false);
-	lpa_audio.prtd = NULL;
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
+		if (atomic_cmpxchg(&lpa_audio.audio_ocmem_req, 1, 0))
+			audio_ocmem_process_req(AUDIO, false);
+		lpa_audio.prtd = NULL;
+		q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+		q6asm_audio_client_buf_free_contiguous(dir,
 				prtd->audio_client);
 
-	atomic_set(&prtd->stop, 1);
-	pr_debug("%s\n", __func__);
+		atomic_set(&prtd->stop, 1);
+		q6asm_audio_client_free(prtd->audio_client);
+		pr_debug("%s\n", __func__);
+	}
 	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
 		SNDRV_PCM_STREAM_PLAYBACK);
 	pr_debug("%s\n", __func__);
-	q6asm_audio_client_free(prtd->audio_client);
 	kfree(prtd);
 
 	return 0;
@@ -484,9 +446,59 @@
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct msm_audio *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
 	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
 	struct audio_buffer *buf;
+	uint16_t bits_per_sample = 16;
 	int dir, ret;
+	struct asm_softpause_params softpause = {
+		.enable = SOFT_PAUSE_ENABLE,
+		.period = SOFT_PAUSE_PERIOD,
+		.step = SOFT_PAUSE_STEP,
+		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
+	};
+	struct asm_softvolume_params softvol = {
+		.period = SOFT_VOLUME_PERIOD,
+		.step = SOFT_VOLUME_STEP,
+		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+	};
+
+	prtd->audio_client->perf_mode = false;
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (params_format(params) == SNDRV_PCM_FORMAT_S24_LE)
+			bits_per_sample = 24;
+		ret = q6asm_open_write_v2(prtd->audio_client,
+				FORMAT_LINEAR_PCM, bits_per_sample);
+		if (ret < 0) {
+			pr_err("%s: pcm out open failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			prtd->audio_client = NULL;
+			return -ENOMEM;
+		}
+		ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
+		if (ret < 0) {
+			pr_err("%s: Set IO mode failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			prtd->audio_client = NULL;
+			return -ENOMEM;
+		}
+	}
+
+	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
+	prtd->session_id = prtd->audio_client->session;
+	msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+		prtd->audio_client->perf_mode,
+		prtd->session_id, substream->stream);
+
+	lpa_set_volume(0);
+	ret = q6asm_set_softpause(lpa_audio.prtd->audio_client, &softpause);
+	if (ret < 0)
+		pr_err("%s: Send SoftPause Param failed ret=%d\n",
+			__func__, ret);
+	ret = q6asm_set_softvolume(lpa_audio.prtd->audio_client, &softvol);
+	if (ret < 0)
+		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+			__func__, ret);
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		dir = IN;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 17934eb..c651ec7 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1618,6 +1618,12 @@
 	msm_routing_put_audio_mixer),
 };
 
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new mmul5_mixer_controls[] = {
 	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
@@ -2472,6 +2478,7 @@
 	SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
@@ -2621,6 +2628,8 @@
 	mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
 	SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
 	mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
 	SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0,
 	mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)),
 	SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -2777,6 +2786,7 @@
 	{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
 	{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
 	{"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"},
+	{"MultiMedia4 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -2847,6 +2857,7 @@
 	{"MM_UL1", NULL, "MultiMedia1 Mixer"},
 	{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
 	{"MM_UL2", NULL, "MultiMedia2 Mixer"},
+	{"MM_UL4", NULL, "MultiMedia4 Mixer"},
 	{"MM_UL5", NULL, "MultiMedia5 Mixer"},
 
 	{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},