Merge "ARM: dts: msm: Update vbatdet-delta-mv on pm8110 and pm8226"
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
index e336429..39be71e 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -24,6 +24,12 @@
 Channel node
 NOTE: Atleast one Channel node is required.
 
+Client required property:
+- qcom,<consumer name>-iadc : The phandle to the corresponding iadc device.
+			The consumer name passed to the driver when calling
+			qpnp_get_iadc() is used to associate the client
+			with the corresponding device.
+
 Required properties:
 - label : Channel name used for sysfs entry.
 - reg : AMUX channel number.
@@ -113,3 +119,9 @@
                                 qcom,fast-avg-setup = <0>;
                         };
 	};
+
+Client device example:
+/* Add to the clients node that needs the IADC */
+client_node {
+	qcom,client-iadc = <&pm8941_iadc>;
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
index 3720172..0f35e73 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
@@ -20,7 +20,6 @@
  - synaptics,panel-x		: panel x dimension
  - synaptics,panel-y		: panel y dimension
  - synaptics,fw-image-name	: name of firmware .img file in /etc/firmware
- - synaptics,power-down		: fully power down regulators in suspend
 
 Example:
 	i2c@f9927000 { /* BLSP1 QUP5 */
diff --git a/Documentation/devicetree/bindings/media/video/msm-ispif.txt b/Documentation/devicetree/bindings/media/video/msm-ispif.txt
index ff33b17..dc1187a 100644
--- a/Documentation/devicetree/bindings/media/video/msm-ispif.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-ispif.txt
@@ -4,6 +4,7 @@
 - cell-index: ispif hardware core index
 - compatible :
     - "qcom,ispif"
+    - "qcom,ispif-v3.0"
 - reg : offset and length of the register set for the device
     for the ispif operating in compatible mode.
 - reg-names : should specify relevant names to each reg property defined.
@@ -11,9 +12,13 @@
 - interrupt-names : should specify relevant names to each interrupts
   property defined.
 
+Optional properties:
+- qcom,num-isps: The number of ISPs the ISPIF module is connected to. If not set
+  the default value used is 1
+
 Example:
 
-   qcom,ispif@0xfda0a000 {
+   qcom,ispif@fda0a000 {
        cell-index = <0>;
        compatible = "qcom,ispif";
        reg = <0xfda0a000 0x300>;
@@ -21,3 +26,16 @@
        interrupts = <0 55 0>;
        interrupt-names = "ispif";
    };
+
+or
+
+   qcom,ispif@fda0a000 {
+       cell-index = <0>;
+       compatible = "qcom,ispif-v3.0", "qcom,ispif";
+       reg = <0xfda0a000 0x300>;
+       reg-names = "ispif";
+       interrupts = <0 55 0>;
+       interrupt-names = "ispif";
+       qcom,num-isps = <2>
+   };
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index ac60e38..cc2506d 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -33,15 +33,31 @@
   request by different video encoder usecases.
 - qcom,dec-ddr-ab-ib : list of bus vectors(ab, ib pair) for ddr bandwidth
   request by different video decoder usecases.
-- qcom,iommu-groups : list of IOMMU groups to be used.  Groups are defined as
-  phandles in <target>-iommu-domains.dtsi (e.g msm8974-v1-iommu-domains.dtsi)
-- qcom,iommu-group-buffer-types : bitmap of buffer types that can be mapped into
-  the corresponding IOMMU group. Buffer types are defined within the vidc driver
-  by "enum hal_buffer" in msm_smem.h
 - qcom,buffer-type-tz-usage-table : a key-value pair, mapping a buffer type
   (enum hal_buffer) to its corresponding TZ usage. The TZ usages are defined
   as "enum cp_mem_usage" in include/linux/msm_ion.h
 - qcom,has-ocmem: indicate the target has ocmem if this property exists
+- qcom,vidc-iommu-domains: node containing individual domain nodes, each with:
+	- a unique domain name for the domain node (e.g vidc,domain-ns)
+	- qcom,vidc-domain-phandle: phandle for the domain as defined in
+	  <target>-iommu-domains.dtsi (e.g msm8974-v1-iommu-domains.dtsi)
+	- qcom,vidc-partition-buffer-types: bitmap of buffer types that can
+	  be mapped into each IOMMU domain partition.  There must be exactly
+	  one buffer bitmap per partition in the domain, with order of the
+	  bitmaps to be the same as the order of the respective partitions.
+	- Buffer types are defined as the following:
+	  input = 0x1
+	  output = 0x2
+	  output2 = 0x2
+	  extradata input = 0x4
+	  extradata output = 0x8
+	  extradata output2 = 0x8
+	  internal scratch = 0x10
+	  internal scratch1 = 0x20
+	  internal scratch2 = 0x40
+	  internal persist = 0x80
+	  internal persist1 = 0x100
+	  internal cmd queue = 0x200
 
 Example:
 
@@ -71,9 +87,20 @@
 			<60000 664950>;
 		qcom,dec-ddr-ab-ib = <0 0>,
 			<110000 909000>;
-		qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
-		qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
 		qcom,buffer-type-tz-usage-table = <0x1 0x1>,
 						<0x1fe 0x2>;
 		qcom,max-hw-load = <1224450>; /* 4k @ 30 + 1080p @ 30*/
+		qcom,vidc-iommu-domains {
+			qcom,domain-ns {
+				qcom,vidc-domain-phandle = <&venus_domain_ns>;
+				qcom,vidc-partition-buffer-types = <0x1ff>,
+							<0x200>;
+			};
+
+			qcom,domain-cp {
+				qcom,vidc-domain-phandle = <&venus_domain_cp>;
+				qcom,vidc-partition-buffer-types = <0x2>,
+							<0x1f1>;
+			};
+		};
 	};
diff --git a/Documentation/devicetree/bindings/power/qpnp-bms.txt b/Documentation/devicetree/bindings/power/qpnp-bms.txt
index 2da5c72..1162298 100644
--- a/Documentation/devicetree/bindings/power/qpnp-bms.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-bms.txt
@@ -87,6 +87,7 @@
 			For example - A value of 10 indicates:
 			FCC value (in mAh) = (8-bit register value) * 10.
 - qcom,bms-vadc: Corresponding VADC device's phandle.
+- qcom,bms-iadc: Corresponding IADC device's phandle.
 
 Parent node optional properties:
 - qcom,ignore-shutdown-soc: A boolean that controls whether BMS will
@@ -102,6 +103,8 @@
 - qcom,use-ocv-thresholds : A boolean that controls whether BMS will take
 			new OCVs only between the defined thresholds.
 - qcom,enable-fcc-learning: A boolean that defines if FCC learning is enabled.
+- qcom,bms-adc_tm: Corresponding ADC_TM device's phandle to set recurring measurements
+		and receive notifications for die_temperature, vbatt.
 
 
 All sub node required properties:
@@ -150,6 +153,8 @@
 	qcom,tm-temp-margin = <5000>;
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,bms-vadc = <&pm8941_vadc>;
+	qcom,bms-iadc = <&pm8941_iadc>;
+	qcom,bms-adc_tm = <&pm8941_adc_tm>;
 
 	qcom,bms-iadc@3800 {
 		reg = <0x3800 0x100>;
diff --git a/Documentation/devicetree/bindings/power/qpnp-charger.txt b/Documentation/devicetree/bindings/power/qpnp-charger.txt
index 3150bbf..4a5e58c 100644
--- a/Documentation/devicetree/bindings/power/qpnp-charger.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-charger.txt
@@ -89,6 +89,8 @@
 	to the collective charging device. For example USB detection
 	and the battery interface are each seperate peripherals and
 	each should be their own subnode.
+- qcom,chg-adc_tm			Corresponding ADC TM device's phandle to set recurring
+					measurements and receive notification for batt_therm.
 
 Sub node required properties:
 - compatible:		Must be "qcom,qpnp-charger".
@@ -103,6 +105,7 @@
 
 			qcom,usb-chgpth:
 			 - usbin-valid
+			 - usb-ocp (only for SMBBP and SMBCL)
 
 			qcom,chgr:
 			 - chg-done
@@ -149,6 +152,9 @@
 			 - coarse-det-usb:	Coarse detect interrupt triggers
 						at low voltage on USB_IN.
 			 - chg-gone:		Triggers on VCHG line.
+			 - usb-ocp		Triggers on over current conditions when
+						reverse boosting. (Only available on
+						SMBCL and SMBBP devices).
 
 			qcom,dc-chgpth:
 			 - dcin-valid:		Indicates a valid DC charger
@@ -198,6 +204,7 @@
 		qcom,batt-cold-percent = <85>;
 		qcom,btc-disabled = <0>;
 		qcom,chg-vadc = <&pm8941_vadc>;
+		qcom,chg-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,chgr@1000 {
 			reg = <0x1000 0x100>;
diff --git a/Documentation/devicetree/bindings/regulator/krait-regulator-pmic.txt b/Documentation/devicetree/bindings/regulator/krait-regulator-pmic.txt
new file mode 100644
index 0000000..a0730f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/krait-regulator-pmic.txt
@@ -0,0 +1,47 @@
+Krait Voltage regulators in the PMIC
+
+In certain MSMs the CPUs are powered using a single supply powered by PMIC ganged regulators
+operating in different phases. The krait-regulator-pmic node represents the gang leader and its
+associated control, power stage and frequency peripherals.
+
+[First Level Nodes]
+Required properties:
+- compatible:			Must be "qcom,krait-regulator-pmic".
+- spmi-dev-container:	        Specifies that all the device nodes specified
+				within this node should have their resources coalesced into a
+				single spmi_device.  This is used to specify all SPMI peripherals
+				that logically make up the gang leader.
+ - #address-cells:		The number of cells dedicated to represent an address
+				This must be set to '1'.
+ - #size-cells:			The number of cells dedicated to represent address
+				space range of a peripheral. This must be set to '1'.
+
+[Second Level Nodes]
+Required properties:
+- reg:             Specifies the SPMI address and size for this peripheral.
+There must be exactly three subnodes qcom,ctl qcom,ps and qcom,freq representing control,
+power stage and frequency SPMI peripherals respectively of the gang leader.
+
+Example:
+		krait_regulator_pmic: qcom,krait-regulator-pmic {
+			spmi-dev-container;
+			compatible = "qcom,krait-regulator-pmic";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			status = "disabled";
+
+			qcom,ctl@2000 {
+				status = "disabled";
+				reg = <0x2000 0x100>;
+			};
+
+			qcom,ps@2100 {
+				status = "disabled";
+				reg = <0x2100 0x100>;
+			};
+
+			qcom,freq@2200 {
+				status = "disabled";
+				reg = <0x2200 0x100>;
+			};
+		};
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
index 64c44b5..e1681ca 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -27,6 +27,12 @@
 		crosses a set threshold, read temperature and enable/set trip types supported
 		by the thermal framework.
 
+Client required property:
+- qcom,<consumer name>-adc_tm : The phandle to the corresponding adc_tm device.
+			The consumer name passed to the driver when calling
+			qpnp_get_adc_tm() is used to associate the client
+			with the corresponding device.
+
 Channel nodes
 NOTE: Atleast one Channel node is required.
 
@@ -97,6 +103,12 @@
 			    allocated to the corresponding channel node.
 - qcom,adc_tm-vadc : phandle to the corresponding VADC device to read the ADC channels.
 
+Client device example:
+/* Add to the clients node that needs the ADC_TM channel A/D */
+client_node {
+	qcom,client-adc_tm = <&pm8941_adc_tm>;
+};
+
 Example:
 	/* Main Node */
 	qcom,vadc@3400 {
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index ff7b03d..f2a9940 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -49,6 +49,9 @@
   is not supported and attached host should always be assumed as SDP.
 - USB3_GDSC-supply : phandle to the globally distributed switch controller
   regulator node to the USB controller.
+- qcom,dwc_usb3-adc_tm: Corresponding ADC_TM device's phandle to set recurring
+		measurements on USB_ID channel when using ADC and receive
+		notifications for set thresholds.
 
 Sub nodes:
 - Sub node for "DWC3- USB3 controller".
@@ -72,6 +75,7 @@
 		qcom,dwc-usb3-msm-dbm-eps = <4>
 		qcom,vdd-voltage-level = <1 5 7>;
 		qcom,dwc-hsphy-init = <0x00D195A4>;
+		qcom,dwc_usb3-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,msm_bus,name = "usb3";
 		qcom,msm_bus,num_cases = <2>;
diff --git a/arch/arm/boot/dts/apq8074-v2.2-cdp.dts b/arch/arm/boot/dts/apq8074-v2.2-cdp.dts
new file mode 100644
index 0000000..a3dc490
--- /dev/null
+++ b/arch/arm/boot/dts/apq8074-v2.2-cdp.dts
@@ -0,0 +1,34 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "apq8074-v2.2.dtsi"
+/include/ "msm8974-cdp.dtsi"
+
+/ {
+	model = "Qualcomm APQ 8074v2.2 CDP";
+	compatible = "qcom,apq8074-cdp", "qcom,apq8074", "qcom,cdp";
+	qcom,msm-id = <184 1 0x20002>;
+};
+
+&usb3 {
+	interrupt-parent = <&usb3>;
+	interrupts = <0 1>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0x0 0xffffffff>;
+	interrupt-map = <0x0 0 &intc 0 133 0
+			 0x0 1 &spmi_bus 0x0 0x0 0x9 0x0>;
+	interrupt-names = "hs_phy_irq", "pmic_id_irq";
+
+	qcom,misc-ref = <&pm8941_misc>;
+};
diff --git a/arch/arm/boot/dts/apq8074-v2.2-dragonboard.dts b/arch/arm/boot/dts/apq8074-v2.2-dragonboard.dts
new file mode 100644
index 0000000..6989088
--- /dev/null
+++ b/arch/arm/boot/dts/apq8074-v2.2-dragonboard.dts
@@ -0,0 +1,22 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "apq8074-v2.2.dtsi"
+/include/ "apq8074-dragonboard.dtsi"
+
+/ {
+	model = "Qualcomm APQ 8074v2.2 DRAGONBOARD";
+	compatible = "qcom,apq8074-dragonboard", "qcom,apq8074", "qcom,dragonboard";
+	qcom,msm-id = <184 10 0x20002>;
+};
diff --git a/arch/arm/boot/dts/apq8074-v2.2-liquid.dts b/arch/arm/boot/dts/apq8074-v2.2-liquid.dts
new file mode 100644
index 0000000..09d6e66
--- /dev/null
+++ b/arch/arm/boot/dts/apq8074-v2.2-liquid.dts
@@ -0,0 +1,22 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "apq8074-v2.2.dtsi"
+/include/ "msm8974-liquid.dtsi"
+
+/ {
+	model = "Qualcomm APQ 8074v2.2 LIQUID";
+	compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
+	qcom,msm-id = <184 9 0x20002>;
+};
diff --git a/arch/arm/boot/dts/apq8074-v2.2.dtsi b/arch/arm/boot/dts/apq8074-v2.2.dtsi
new file mode 100644
index 0000000..ddf7ec8
--- /dev/null
+++ b/arch/arm/boot/dts/apq8074-v2.2.dtsi
@@ -0,0 +1,57 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi file.
+ */
+
+/include/ "msm8974-v2.2.dtsi"
+
+&soc {
+	qcom,qseecom@a700000 {
+		compatible = "qcom,qseecom";
+		reg = <0x0a700000 0x500000>;
+		reg-names = "secapp-region";
+		qcom,disk-encrypt-pipe-pair = <2>;
+		qcom,hlos-ce-hw-instance = <1>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,msm-bus,name = "qseecom-noc";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 3936000 393600>,
+				<55 512 3936000 393600>,
+				<55 512 3936000 393600>;
+	};
+
+	sound {
+		compatible = "qcom,apq8074-audio-taiko";
+	};
+};
+
+&memory_hole {
+	qcom,memblock-remove = <0x0a700000 0x5800000>; /* Address and size of the hole */
+};
+
+&qseecom {
+	status = "disabled";
+};
+
+&sdcc3 {
+	qcom,sup-voltages = <2000 2000>;
+	status = "ok";
+};
+
diff --git a/arch/arm/boot/dts/apq8084-regulator.dtsi b/arch/arm/boot/dts/apq8084-regulator.dtsi
index 0c9ca7d..3d5fc7c 100644
--- a/arch/arm/boot/dts/apq8084-regulator.dtsi
+++ b/arch/arm/boot/dts/apq8084-regulator.dtsi
@@ -326,9 +326,9 @@
 };
 
 &rpm_bus {
-	rpm-regulator-smpb1 {
+	rpm-regulator-smpa1 {
 		compatible = "qcom,rpm-regulator-smd-resource";
-		qcom,resource-name = "smpb";
+		qcom,resource-name = "smpa";
 		qcom,resource-id = <1>;
 		qcom,regulator-type = <1>;
 		qcom,hpm-min-load = <100000>;
@@ -342,9 +342,9 @@
 		};
 	};
 
-	rpm-regulator-smpb2 {
+	rpm-regulator-smpa2 {
 		compatible = "qcom,rpm-regulator-smd-resource";
-		qcom,resource-name = "smpb";
+		qcom,resource-name = "smpa";
 		qcom,resource-id = <2>;
 		qcom,regulator-type = <1>;
 		qcom,hpm-min-load = <100000>;
diff --git a/arch/arm/boot/dts/mpq8092-iommu.dtsi b/arch/arm/boot/dts/mpq8092-iommu.dtsi
index baec2d5..af8e015 100644
--- a/arch/arm/boot/dts/mpq8092-iommu.dtsi
+++ b/arch/arm/boot/dts/mpq8092-iommu.dtsi
@@ -229,3 +229,7 @@
 		interrupts = <0 302 0>;
 	};
 };
+
+&vcap_iommu {
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/mpq8092-rumi.dtsi b/arch/arm/boot/dts/mpq8092-rumi.dtsi
index cc345d8..af49eaf 100644
--- a/arch/arm/boot/dts/mpq8092-rumi.dtsi
+++ b/arch/arm/boot/dts/mpq8092-rumi.dtsi
@@ -10,6 +10,12 @@
  * GNU General Public License for more details.
  */
 
+/ {
+	aliases {
+		spi0 = &spi_0;
+	};
+};
+
 &soc {
 	timer {
 		clock-frequency = <5000000>;
@@ -41,17 +47,28 @@
 		status = "disable";
 	};
 
-	spi@f9923000 {
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9923000 0x1000>;
-		interrupts = <0 95 0>;
-		spi-max-frequency = <24000000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		gpios = <&msmgpio 3 0>, /* CLK  */
-			<&msmgpio 1 0>, /* MISO */
-			<&msmgpio 0 0>; /* MOSI */
-		cs-gpios = <&msmgpio 9 0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xf000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
+		qcom,gpio-mosi = <&msmgpio 0  0>;
+		qcom,gpio-miso = <&msmgpio 1  0>;
+		qcom,gpio-clk  = <&msmgpio 3  0>;
+		qcom,gpio-cs2  = <&msmgpio 11 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 
 		ethernet-switch@2 {
 			compatible = "simtec,ks8851";
diff --git a/arch/arm/boot/dts/msm-iommu-v1.dtsi b/arch/arm/boot/dts/msm-iommu-v1.dtsi
index ef8677d..cd5adaa 100644
--- a/arch/arm/boot/dts/msm-iommu-v1.dtsi
+++ b/arch/arm/boot/dts/msm-iommu-v1.dtsi
@@ -1056,4 +1056,98 @@
 			label = "lpass_core_cb_2";
 		};
 	};
+
+	vcap_iommu: qcom,iommu@fdfb6000 {
+		compatible = "qcom,msm-smmu-v1";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfdfb6000 0x10000>;
+		reg-names = "iommu_base";
+		interrupts = <0 315 0>;
+		qcom,needs-alt-core-clk;
+		label = "vcap_iommu";
+		status = "disabled";
+		qcom,msm-bus,name = "vcap_ebi";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<48 512 0 0>,
+			<48 512 0 1000>;
+
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <8>;
+		qcom,iommu-pmu-event-classes = <0x00
+						0x01
+						0x08
+						0x09
+						0x0A
+						0x10
+						0x11
+						0x12
+						0x80
+						0x81
+						0x82
+						0x83
+						0x90
+						0x91
+						0x92
+						0xb0
+						0xb1>;
+
+		qcom,iommu-bfb-regs =  <0x204c
+					0x2514
+					0x2540
+					0x256c
+					0x2314
+					0x2394
+					0x2414
+					0x2494
+					0x20ac
+					0x215c
+					0x220c
+					0x22bc
+					0x2008
+					0x200c>;
+
+		qcom,iommu-bfb-data = <0x0ff
+					0x00000004
+					0x00000008
+					0x0
+					0x0
+					0x00000008
+					0x00000028
+					0x0
+					0x001000
+					0x001000
+					0x003008
+					0x0
+					0x0
+					0x0>;
+
+		qcom,iommu-ctx@fdfbe000 {
+			compatible = "qcom,msm-smmu-v1-ctx";
+			reg = <0xfdfbe000 0x1000>;
+			interrupts = <0 313 0>;
+			qcom,iommu-ctx-sids = <0>;
+			label = "vcap_cb0";
+		};
+
+		qcom,iommu-ctx@fdfbf000 {
+			compatible = "qcom,msm-smmu-v1-ctx";
+			reg = <0xfdfbf000 0x1000>;
+			interrupts = <0 313 0>;
+			qcom,iommu-ctx-sids = <1>;
+			label = "vcap_cb1";
+		};
+
+		qcom,iommu-ctx@fdfc0000 {
+			compatible = "qcom,msm-smmu-v1-ctx";
+			reg = <0xfdfc0000 0x1000>;
+			interrupts = <0 313 0>;
+			qcom,iommu-ctx-sids = <>;
+			label = "vcap_cb2";
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index 1584100..f8ccc93 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -69,6 +69,7 @@
 			qcom,resume-soc = <99>;
 			qcom,tchg-mins = <150>;
 			qcom,chg-vadc = <&pm8110_vadc>;
+			qcom,chg-adc_tm = <&pm8110_adc_tm>;
 
 			qcom,chgr@1000 {
 				status = "disabled";
@@ -133,11 +134,13 @@
 				reg = <0x1300 0x100>;
 				interrupts =	<0 0x13 0x0>,
 						<0 0x13 0x1>,
-						<0x0 0x13 0x2>;
+						<0x0 0x13 0x2>,
+						<0x0 0x13 0x3>;
 
 				interrupt-names =	"coarse-det-usb",
 							"usbin-valid",
-							"chg-gone";
+							"chg-gone",
+							"usb-ocp";
 			};
 
 			qcom,chg-misc@1600 {
@@ -250,7 +253,7 @@
 			};
 		};
 
-		iadc@3600 {
+		pm8110_iadc: iadc@3600 {
 			compatible = "qcom,qpnp-iadc";
 			reg = <0x3600 0x100>;
 			#address-cells = <1>;
@@ -325,6 +328,8 @@
 			qcom,high-ocv-correction-limit-uv = <50>;
 			qcom,hold-soc-est = <3>;
 			qcom,bms-vadc = <&pm8110_vadc>;
+			qcom,bms-iadc = <&pm8110_iadc>;
+			qcom,bms-adc_tm = <&pm8110_adc_tm>;
 
 			qcom,bms-iadc@3800 {
 				reg = <0x3800 0x100>;
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index a74b663..5a9f60f 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -84,6 +84,7 @@
 			qcom,resume-soc = <99>;
 			qcom,tchg-mins = <150>;
 			qcom,chg-vadc = <&pm8226_vadc>;
+			qcom,chg-adc_tm = <&pm8226_adc_tm>;
 
 			qcom,chgr@1000 {
 				status = "disabled";
@@ -141,19 +142,6 @@
 							"bat-fet-on",
 							"vcp-on",
 							"psi";
-
-			};
-
-			pm8226_chg_otg: qcom,usb-chgpth@1300 {
-				status = "disabled";
-				reg = <0x1300 0x100>;
-				interrupts =	<0 0x13 0x0>,
-						<0 0x13 0x1>,
-						<0x0 0x13 0x2>;
-
-				interrupt-names =	"coarse-det-usb",
-							"usbin-valid",
-							"chg-gone";
 			};
 
 			pm8226_chg_boost: qcom,boost@1500 {
@@ -166,6 +154,21 @@
 							"limit-error";
 			};
 
+
+			pm8226_chg_otg: qcom,usb-chgpth@1300 {
+				status = "disabled";
+				reg = <0x1300 0x100>;
+				interrupts =	<0 0x13 0x0>,
+						<0 0x13 0x1>,
+						<0x0 0x13 0x2>,
+						<0x0 0x13 0x3>;
+
+				interrupt-names =	"coarse-det-usb",
+							"usbin-valid",
+							"chg-gone",
+							"usb-ocp";
+			};
+
 			qcom,chg-misc@1600 {
 				status = "disabled";
 				reg = <0x1600 0x100>;
@@ -198,6 +201,8 @@
 			qcom,hold-soc-est = <3>;
 			qcom,low-voltage-threshold = <3420000>;
 			qcom,bms-vadc = <&pm8226_vadc>;
+			qcom,bms-iadc = <&pm8226_iadc>;
+			qcom,bms-adc_tm = <&pm8226_adc_tm>;
 
 			qcom,bms-iadc@3800 {
 				reg = <0x3800 0x100>;
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index fad57a6..12af463 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -135,6 +135,8 @@
 		qcom,high-ocv-correction-limit-uv = <50>;
 		qcom,hold-soc-est = <3>;
 		qcom,bms-vadc = <&pm8941_vadc>;
+		qcom,bms-iadc = <&pm8941_iadc>;
+		qcom,bms-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,bms-iadc@3800 {
 			reg = <0x3800 0x100>;
@@ -204,6 +206,7 @@
 		qcom,resume-soc = <99>;
 		qcom,tchg-mins = <150>;
 		qcom,chg-vadc = <&pm8941_vadc>;
+		qcom,chg-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,chgr@1000 {
 			status = "disabled";
diff --git a/arch/arm/boot/dts/msm-pma8084.dtsi b/arch/arm/boot/dts/msm-pma8084.dtsi
index 3368b36..808f0f6 100644
--- a/arch/arm/boot/dts/msm-pma8084.dtsi
+++ b/arch/arm/boot/dts/msm-pma8084.dtsi
@@ -323,6 +323,7 @@
 						"low-thr-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pma8084_vadc>;
 		};
 
 		qcom,rtc {
diff --git a/arch/arm/boot/dts/msm8226-cdp.dtsi b/arch/arm/boot/dts/msm8226-cdp.dtsi
index ef4b236..104cb4c 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8226-cdp.dtsi
@@ -416,10 +416,6 @@
 	};
 };
 
-&usb_otg_sw {
-	status = "okay";
-};
-
 &pm8226_vadc {
 	chan@14 {
 		label = "pa_therm0";
diff --git a/arch/arm/boot/dts/msm8226-iommu-domains.dtsi b/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
index 25fca2a..769e4df 100644
--- a/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
+++ b/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
@@ -24,7 +24,8 @@
 		venus_domain_cp: qcom,iommu-domain2 {
 			label = "venus_cp";
 			qcom,iommu-contexts = <&venus_cp>;
-			qcom,virtual-addr-pool = <0x1000000 0x3f000000>;
+			qcom,virtual-addr-pool = <0x1000000 0x1f800000
+						  0x20800000 0x1f800000>;
 			qcom,secure-domain;
 		};
 	};
diff --git a/arch/arm/boot/dts/msm8226-mtp.dtsi b/arch/arm/boot/dts/msm8226-mtp.dtsi
index ce7f9e9..977c772 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-mtp.dtsi
@@ -127,7 +127,7 @@
 	interrupt-names = "core_irq", "async_irq", "pmic_id_irq";
 
 	qcom,hsusb-otg-mode = <3>;
-	vbus_otg-supply = <&usb_otg_sw>;
+	vbus_otg-supply = <&pm8226_chg_otg>;
 };
 
 &sdcc1 {
@@ -452,10 +452,6 @@
 	qcom,charging-disabled;
 };
 
-&usb_otg_sw {
-	status = "okay";
-};
-
 &slim_msm {
 	tapan_codec {
 		qcom,cdc-micbias1-ext-cap;
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index d587b77..5b3da9b 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -482,14 +482,10 @@
 	regulator-name = "8226_smbbp_boost";
 };
 
-&soc {
-	usb_otg_sw: regulator-ncp380 {
-		compatible = "regulator-fixed";
-		regulator-name = "usb_otg_sw";
-		gpio = <&msmgpio 67 0>;
-		parent-supply = <&pm8226_chg_boost>;
-		startup-delay-us = <4000>;
-		enable-active-high;
-		status = "disabled";
-	};
+&pm8226_chg {
+	otg-parent-supply = <&pm8226_chg_boost>;
+};
+
+&pm8226_chg_otg {
+	regulator-name = "8226_smbbp_otg";
 };
diff --git a/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts b/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts
index a2ad682..cb6c1af 100644
--- a/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts
+++ b/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts
@@ -13,7 +13,6 @@
 /dts-v1/;
 /include/ "msm8226-v2.dtsi"
 /include/ "msm8226-qrd.dtsi"
-/include/ "msm8226-camera-sensor-cdp.dtsi"
 /include/ "dsi-panel-nt35590-720p-video.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index b836100..dd4f1f8 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -182,11 +182,23 @@
 			<103000 134000>,
 			<268000 348000>,
 			<505000 657000>;
-		qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
-		qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
 		qcom,buffer-type-tz-usage-table = <0x1 0x1>,
-			<0x1fe 0x2>;
+			<0x2 0x2>,
+			<0x1f0 0x3>;
 		qcom,max-hw-load = <352800>; /* 720p @ 30 + 1080p @ 30 */
+		qcom,vidc-iommu-domains {
+			qcom,domain-ns {
+				qcom,vidc-domain-phandle = <&venus_domain_ns>;
+				qcom,vidc-partition-buffer-types = <0x1ff>,
+							<0x200>;
+			};
+
+			qcom,domain-cp {
+				qcom,vidc-domain-phandle = <&venus_domain_cp>;
+				qcom,vidc-partition-buffer-types = <0x2>,
+							<0x1f1>;
+			};
+		};
 	};
 
 	qcom,wfd {
@@ -1072,16 +1084,17 @@
 		interrupts = <0 95 0>, <0 238 0>;
 		spi-max-frequency = <19200000>;
 
-		gpios = <&msmgpio 3 0>, /* CLK  */
-			<&msmgpio 1 0>, /* MISO */
-			<&msmgpio 0 0>; /* MOSI */
-		cs-gpios = <&msmgpio 22 0>;
+		qcom,gpio-mosi = <&msmgpio 0 0>;
+		qcom,gpio-miso = <&msmgpio 1 0>;
+		qcom,gpio-clk  = <&msmgpio 3 0>;
+		qcom,gpio-cs0  = <&msmgpio 22 0>;
 
 		qcom,infinite-mode = <0>;
 		qcom,use-bam;
 		qcom,ver-reg-exists;
 		qcom,bam-consumer-pipe-index = <12>;
 		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,bam_dmux@fc834000 {
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 0078861..60528ee 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -173,13 +173,17 @@
 	qcom,vidc@fdc00000 {
 		compatible = "qcom,msm-vidc";
 		qcom,vidc-ns-map = <0x40000000 0x40000000>;
-		qcom,iommu-groups = <&q6_domain_ns>;
-		qcom,iommu-group-buffer-types = <0xfff>;
 		qcom,buffer-type-tz-usage-map = <0x1 0x1>,
 						<0x1fe 0x2>;
 		qcom,hfi = "q6";
 		qcom,max-hw-load = <108000>; /* 720p @ 30 * 1 */
-	};
+		qcom,vidc-iommu-domains {
+			qcom,domain-ns {
+				qcom,vidc-domain-phandle = <&q6_domain_ns>;
+				qcom,vidc-partition-buffer-types = <0xfff>;
+			};
+		};
+};
 
 	qcom,usbbam@f9a44000 {
 		compatible = "qcom,usb-bam-msm";
@@ -518,16 +522,17 @@
 		interrupts = <0 98 0>, <0 238 0>;
 		spi-max-frequency = <50000000>;
 
-		gpios = <&msmgpio 89 0>, /* CLK  */
-			<&msmgpio 87 0>, /* MISO */
-			<&msmgpio 86 0>; /* MOSI */
-		cs-gpios = <&msmgpio 88 0>;
+		qcom,gpio-mosi = <&msmgpio 86 0>;
+		qcom,gpio-miso = <&msmgpio 87 0>;
+		qcom,gpio-clk  = <&msmgpio 89 0>;
+		qcom,gpio-cs0  = <&msmgpio 88 0>;
 
 		qcom,infinite-mode = <0>;
 		qcom,use-bam;
 		qcom,ver-reg-exists;
 		qcom,bam-consumer-pipe-index = <18>;
 		qcom,bam-producer-pipe-index = <19>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,pronto@fb21b000 {
diff --git a/arch/arm/boot/dts/msm8926-qrd.dts b/arch/arm/boot/dts/msm8926-qrd.dts
index 8497ba2..e056b7e 100644
--- a/arch/arm/boot/dts/msm8926-qrd.dts
+++ b/arch/arm/boot/dts/msm8926-qrd.dts
@@ -13,7 +13,6 @@
 /dts-v1/;
 /include/ "msm8926.dtsi"
 /include/ "msm8226-qrd.dtsi"
-/include/ "msm8226-camera-sensor-cdp.dtsi"
 
 / {
 	model = "Qualcomm MSM 8926 QRD";
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
index 4be2b38..1413e0c 100644
--- a/arch/arm/boot/dts/msm8974-camera.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -94,12 +94,13 @@
 
 	qcom,ispif@fda0A000 {
 		cell-index = <0>;
-		compatible = "qcom,ispif";
+		compatible = "qcom,ispif-v3.0", "qcom,ispif";
 		reg = <0xfda0A000 0x500>,
                       <0xfda00020 0x10>;
 		reg-names = "ispif", "csi_clk_mux";
 		interrupts = <0 55 0>;
 		interrupt-names = "ispif";
+		qcom,num-isps = <0x2>;
 	};
 
 	qcom,vfe@fda10000 {
diff --git a/arch/arm/boot/dts/msm8974-rumi.dtsi b/arch/arm/boot/dts/msm8974-rumi.dtsi
index c01a4e5..152ac4d 100644
--- a/arch/arm/boot/dts/msm8974-rumi.dtsi
+++ b/arch/arm/boot/dts/msm8974-rumi.dtsi
@@ -13,6 +13,12 @@
 /include/ "msm8974-leds.dtsi"
 /include/ "msm8974-camera-sensor-cdp.dtsi"
 
+/ {
+	aliases {
+		spi0 = &spi_0;
+	};
+};
+
 &soc {
 	timer {
 		clock-frequency = <5000000>;
@@ -42,17 +48,28 @@
 		status = "disable";
 	};
 
-	spi@f9923000 {
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9923000 0x1000>;
-		interrupts = <0 95 0>;
-		spi-max-frequency = <24000000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		gpios = <&msmgpio 3 0>, /* CLK  */
-			<&msmgpio 1 0>, /* MISO */
-			<&msmgpio 0 0>; /* MOSI */
-		cs-gpios = <&msmgpio 9 0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xf000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
+		qcom,gpio-mosi = <&msmgpio 0 0>;
+		qcom,gpio-miso = <&msmgpio 1 0>;
+		qcom,gpio-clk  = <&msmgpio 3 0>;
+		qcom,gpio-cs0  = <&msmgpio 9 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 
 		ethernet-switch@2 {
 			compatible = "simtec,ks8851";
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 0b3b60f..9d5e50b 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -136,12 +136,29 @@
 		<3240000 1600000>,
 		<4048000 1600000>,
 		<4264000 1600000>;
-	qcom,iommu-groups = <&venus_domain_ns &venus_domain_sec_bitstream
-			&venus_domain_sec_pixel &venus_domain_sec_non_pixel>;
-	qcom,iommu-group-buffer-types = <0xfff 0x91 0x42 0x120>;
 	qcom,buffer-type-tz-usage-table = <0x91 0x1>,
 					<0x42 0x2>,
 					<0x120 0x3>;
+	qcom,vidc-iommu-domains {
+                qcom,domain-ns {
+                        qcom,vidc-domain-phandle = <&venus_domain_ns>;
+                        qcom,vidc-partition-buffer-types = <0x1ff>,
+							<0x200>;
+                };
+                qcom,domain-sec-bs {
+                        qcom,vidc-domain-phandle = <&venus_domain_sec_bitstream>;
+                        qcom,vidc-partition-buffer-types = <0x91>;
+                };
+                qcom,domain-sec-px {
+                        qcom,vidc-domain-phandle = <&venus_domain_sec_pixel>;
+                        qcom,vidc-partition-buffer-types = <0x42>;
+                };
+                qcom,domain-sec-np {
+                        qcom,vidc-domain-phandle = <&venus_domain_sec_non_pixel>;
+                        qcom,vidc-partition-buffer-types = <0x120>;
+                };
+        };
+
 };
 
 &krait_pdn {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 4360fe0..c8e3d96 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -580,18 +580,28 @@
 		qcom,bam-dma-res-pipes = <6>;
 	};
 
-	spi_7: spi_epm: spi@f9966000 {
+	spi_7: spi_epm: spi@f9966000 { /* BLSP2 QUP4 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9966000 0x1000>;
-		interrupts = <0 104 0>;
-		spi-max-frequency = <19200000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		qcom,master-id = <84>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9966000 0x1000>,
+		      <0xf9944000 0x15000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 104 0>, <0 239 0>;
+		spi-max-frequency = <19200000>;
+
 		qcom,gpio-mosi = <&msmgpio 53 0>;
 		qcom,gpio-miso = <&msmgpio 54 0>;
 		qcom,gpio-clk  = <&msmgpio 56 0>;
 		qcom,gpio-cs0  = <&msmgpio 55 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <18>;
+		qcom,bam-producer-pipe-index = <19>;
+		qcom,master-id = <84>;
 	};
 
 	tspp: msm_tspp@f99d8000 {
@@ -816,18 +826,28 @@
 		qcom,master-id = <86>;
 	};
 
-	spi_0: spi@f9923000 {
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9923000 0x1000>;
-		interrupts = <0 95 0>;
-		spi-max-frequency = <19200000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		qcom,master-id = <86>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xf000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
 		qcom,gpio-mosi = <&msmgpio 0 0>;
 		qcom,gpio-miso = <&msmgpio 1 0>;
 		qcom,gpio-clk  = <&msmgpio 3 0>;
-		qcom,gpio-cs2  = <&msmgpio 9 0>;
+		qcom,gpio-cs0  = <&msmgpio 9 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,acpuclk@f9000000 {
@@ -877,6 +897,7 @@
 		qcom,vdd-voltage-level = <1 5 7>;
 		qcom,dwc-hsphy-init = <0x00D191A4>;
 		qcom,misc-ref = <&pm8941_misc>;
+		dwc_usb3-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,msm-bus,name = "usb3";
 		qcom,msm-bus,num-cases = <2>;
diff --git a/arch/arm/boot/dts/msm8974pro-ac-mtp.dts b/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
index b8df576..9ac9aaf 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
@@ -27,3 +27,103 @@
 &sdhc_1 {
 	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000>;
 };
+
+&pma8084_vadc {
+	chan@73 {
+		label = "msm_therm";
+		reg = <0x73>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@75 {
+		label = "pa_therm0";
+		reg = <0x75>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@77 {
+		label = "pa_therm1";
+		reg = <0x77>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@78 {
+		label = "quiet_therm";
+		reg = <0x78>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pma8084_adc_tm {
+	chan@73 {
+		label = "msm_therm";
+		reg = <0x73>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x48>;
+		qcom,thermal-node;
+	};
+
+	chan@75 {
+		label = "pa_therm0";
+		reg = <0x75>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@77 {
+		label = "pa_therm1";
+		reg = <0x77>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@78 {
+		label = "quiet_therm";
+		reg = <0x78>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi
index 4d10ded..0989c34 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi
@@ -21,14 +21,6 @@
 };
 /include/ "msm-pm8941.dtsi"
 
-&pma8084_vadc {
-	status = "disabled";
-};
-
-&pma8084_adc_tm {
-	status = "disabled";
-};
-
 &pm8941_lsid0 {
 	qcom,power-on@800 {
 		status = "disabled";
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 61478b8..0e7baf1 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -328,18 +328,26 @@
 		interrupt-names = "bam_irq";
 	};
 
-	spi_0: spi@f9924000 {
+	spi_0: spi@f9924000 { /* BLSP1 QUP2 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9924000 0x1000>;
-		interrupts = <0 96 0>;
-		spi-max-frequency = <25000000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		gpios = <&msmgpio 7 0>, /* CLK  */
-			<&msmgpio 5 0>, /* MISO */
-			<&msmgpio 4 0>; /* MOSI */
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9924000 0x1000>,
+		      <0xf9904000 0x11000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 96 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+		qcom,gpio-mosi = <&msmgpio 4 0>;
+		qcom,gpio-miso = <&msmgpio 5 0>;
+		qcom,gpio-clk  = <&msmgpio 7 0>;
+		qcom,gpio-cs0  = <&msmgpio 6 0>;
 
-		cs-gpios = <&msmgpio 6 0>;
+		qcom,infinite-mode = <0>;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <14>;
+		qcom,bam-producer-pipe-index = <15>;
+		qcom,master-id = <86>;
 
 		ethernet-switch@0 {
 			compatible = "simtec,ks8851";
diff --git a/arch/arm/boot/dts/msmkrypton.dtsi b/arch/arm/boot/dts/msmkrypton.dtsi
index cdaf964..f3e9dbb 100644
--- a/arch/arm/boot/dts/msmkrypton.dtsi
+++ b/arch/arm/boot/dts/msmkrypton.dtsi
@@ -17,6 +17,10 @@
 	compatible = "qcom,msmkrypton";
 	interrupt-parent = <&intc>;
 
+	aliases {
+		spi6 = &spi_6;
+	};
+
 	soc: soc { };
 };
 
@@ -140,18 +144,27 @@
 	};
 
 	spi_6: spi@f9928000 { /* BLSP1 QUP6 */
-		cell-index = <0>;
 		compatible = "qcom,spi-qup-v2";
 		#address-cells = <1>;
 		#size-cells = <0>;
-		reg = <0xf9928000 0x1000>;
-		interrupts = <0 100 0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9928000 0x1000>,
+		      <0xf9904000 0x19000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 100 0>, <0 238 0>;
 		spi-max-frequency = <19200000>;
 
-		gpios = <&msmgpio 23 0>, /* CLK  */
-		<&msmgpio 21 0>, /* MISO */
-		<&msmgpio 20 0>; /* MOSI */
-		cs-gpios = <&msmgpio 22 0>;
+		qcom,gpio-mosi = <&msmgpio 20 0>;
+		qcom,gpio-miso = <&msmgpio 21 0>;
+		qcom,gpio-clk  = <&msmgpio 23 0>;
+		qcom,gpio-cs0  = <&msmgpio 22 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <22>;
+		qcom,bam-producer-pipe-index = <23>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,ipc-spinlock@fd484000 {
diff --git a/arch/arm/boot/dts/msmsamarium.dtsi b/arch/arm/boot/dts/msmsamarium.dtsi
index 31ab623..20954f9 100644
--- a/arch/arm/boot/dts/msmsamarium.dtsi
+++ b/arch/arm/boot/dts/msmsamarium.dtsi
@@ -42,7 +42,7 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 		reg = <0xfd510000 0x4000>;
-		ngpio = <145>;
+		ngpio = <146>;
 		interrupts = <0 208 0>;
 		qcom,direct-connect-irqs = <8>;
 	};
@@ -243,3 +243,5 @@
 		qcom,not-wakeup;     /* Needed until MPM is fully configured. */
 	};
 };
+
+/include/ "msm-pma8084.dtsi"
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 72472f9..9296515 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -60,6 +60,18 @@
         dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.0-1-cdp.dtb
         dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.0-1-liquid.dtb
         dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.0-1-dragonboard.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.2-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.2-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.2-dragonboard.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-fluid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-mtp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-fluid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-mtp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ac-mtp.dtb
 
 # APQ8084
    zreladdr-$(CONFIG_ARCH_APQ8084)	:= 0x00008000
@@ -85,6 +97,7 @@
 # MSM8226
    zreladdr-$(CONFIG_ARCH_MSM8226)	:= 0x00008000
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-sim.dtb
+        dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-fluid.dtb
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-v1-cdp.dtb
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-v1-mtp.dtb
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-v1-qrd-evt.dtb
@@ -120,6 +133,8 @@
 
 # MSM8610
    zreladdr-$(CONFIG_ARCH_MSM8610)	:= 0x00008000
+        dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-mtp.dtb
         dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-rumi.dtb
         dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-sim.dtb
         dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-qrd-skuaa.dtb
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 21a940d..1460f94 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -233,6 +233,7 @@
 #define UL_TIMEOUT_DELAY 1000	/* in ms */
 #define ENABLE_DISCONNECT_ACK	0x1
 #define SHUTDOWN_TIMEOUT_MS	500
+#define UL_WAKEUP_TIMEOUT_MS	2000
 static void toggle_apps_ack(void);
 static void reconnect_to_bam(void);
 static void disconnect_to_bam(void);
@@ -1663,7 +1664,8 @@
 	if (wait_for_ack) {
 		BAM_DMUX_LOG("%s waiting for previous ack\n", __func__);
 		ret = wait_for_completion_timeout(
-					&ul_wakeup_ack_completion, HZ);
+					&ul_wakeup_ack_completion,
+					msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
 		wait_for_ack = 0;
 		if (unlikely(ret == 0) && ssrestart_check()) {
 			mutex_unlock(&wakeup_lock);
@@ -1674,14 +1676,16 @@
 	INIT_COMPLETION(ul_wakeup_ack_completion);
 	power_vote(1);
 	BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__);
-	ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
+	ret = wait_for_completion_timeout(&ul_wakeup_ack_completion,
+					msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
 	if (unlikely(ret == 0) && ssrestart_check()) {
 		mutex_unlock(&wakeup_lock);
 		BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__);
 		return;
 	}
 	BAM_DMUX_LOG("%s waiting completion\n", __func__);
-	ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
+	ret = wait_for_completion_timeout(&bam_connection_completion,
+					msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
 	if (unlikely(ret == 0) && ssrestart_check()) {
 		mutex_unlock(&wakeup_lock);
 		BAM_DMUX_LOG("%s timeout power on\n", __func__);
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index a93cfe5..a29bb88 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -630,16 +630,27 @@
 	static struct rpm_regulator_init_data *rpm_data;
 	int i;
 
-	if (machine_is_msm8960_cdp()) {
+	if (machine_is_msm8960_cdp() || cpu_is_msm8960ab()) {
 		/* Only modify LVS6 consumers for CDP targets. */
 		for (i = 0; i < ARRAY_SIZE(msm_rpm_regulator_init_data); i++) {
 			rpm_data = &msm_rpm_regulator_init_data[i];
-			if (rpm_data->id == RPM_VREG_ID_PM8921_LVS6) {
+			if (machine_is_msm8960_cdp() &&
+				rpm_data->id == RPM_VREG_ID_PM8921_LVS6) {
 				rpm_data->init_data.consumer_supplies
 					= vreg_consumers_CDP_LVS6;
 				rpm_data->init_data.num_consumer_supplies
 					= ARRAY_SIZE(vreg_consumers_CDP_LVS6);
 			}
+			if (cpu_is_msm8960ab() &&
+				rpm_data->id == RPM_VREG_ID_PM8921_S7) {
+				rpm_data->init_data.constraints.min_uV =
+								1275000;
+				rpm_data->init_data.constraints.max_uV =
+								1275000;
+				rpm_data->init_data.constraints.input_uV =
+								1275000;
+				rpm_data->default_uV = 1275000;
+			}
 		}
 	}
 }
diff --git a/arch/arm/mach-msm/clock-8092.c b/arch/arm/mach-msm/clock-8092.c
index ec7a4b0..802cd1b 100644
--- a/arch/arm/mach-msm/clock-8092.c
+++ b/arch/arm/mach-msm/clock-8092.c
@@ -304,6 +304,9 @@
 	CLK_DUMMY("core_clk", NULL, "fdc84000.qcom,iommu", OFF),
 	CLK_DUMMY("iface_clk", NULL, "fdee4000.qcom,iommu", OFF),
 	CLK_DUMMY("core_clk", NULL, "fdee4000.qcom,iommu", OFF),
+	CLK_DUMMY("iface_clk", NULL, "fdfb6000.qcom,iommu", OFF),
+	CLK_DUMMY("core_clk", NULL, "fdfb6000.qcom,iommu", OFF),
+	CLK_DUMMY("alt_core_clk", NULL, "fdfb6000.qcom,iommu", OFF),
 	/* BCSS broadcast */
 	CLK_DUMMY("",	bcc_dem_core_b_clk_src.c,	"", OFF),
 	CLK_DUMMY("",	adc_01_clk_src.c,	"", OFF),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index a8cb46e..cd6d582 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -718,17 +718,38 @@
 DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a1_pin, cxo_a1_a_pin, A1_ID);
 DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a2_pin, cxo_a2_a_pin, A2_ID);
 
+static unsigned int soft_vote_gpll0;
+
+static struct pll_vote_clk gpll0_ao_clk_src = {
+	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
+	.en_mask = BIT(0),
+	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
+	.status_mask = BIT(17),
+	.soft_vote = &soft_vote_gpll0,
+	.soft_vote_mask = PLL_SOFT_VOTE_ACPU,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.parent = &cxo_a_clk_src.c,
+		.rate = 600000000,
+		.dbg_name = "gpll0_ao_clk_src",
+		.ops = &clk_ops_pll_acpu_vote,
+		CLK_INIT(gpll0_ao_clk_src.c),
+	},
+};
+
 static struct pll_vote_clk gpll0_clk_src = {
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
 	.en_mask = BIT(0),
 	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
 	.status_mask = BIT(17),
+	.soft_vote = &soft_vote_gpll0,
+	.soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
 		.parent = &cxo_clk_src.c,
 		.rate = 600000000,
 		.dbg_name = "gpll0_clk_src",
-		.ops = &clk_ops_pll_vote,
+		.ops = &clk_ops_pll_acpu_vote,
 		CLK_INIT(gpll0_clk_src.c),
 	},
 };
@@ -4895,6 +4916,9 @@
 
 	CLK_LOOKUP("measure",	measure_clk.c,	"debug"),
 
+	CLK_LOOKUP("gpll0", gpll0_clk_src.c, ""),
+	CLK_LOOKUP("gpll0_ao", gpll0_ao_clk_src.c, ""),
+
 	CLK_LOOKUP("dma_bam_pclk", gcc_bam_dma_ahb_clk.c, "msm_sps"),
 	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991f000.serial"),
 	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9924000.i2c"),
@@ -5173,6 +5197,25 @@
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
 		"fda0a000.qcom,ispif"),
 
+	CLK_LOOKUP("vfe0_clk_src", vfe0_clk_src.c, "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_vfe_vfe0_clk", camss_vfe_vfe0_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_csi_vfe0_clk", camss_csi_vfe0_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("vfe1_clk_src", vfe1_clk_src.c, "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_vfe_vfe1_clk", camss_vfe_vfe1_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_csi_vfe1_clk", camss_csi_vfe1_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_clk", camss_csi0_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c,
+			   "fda0a000.qcom,ispif"),
+
 	/*VFE clocks*/
 	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
 					"fda10000.qcom,vfe"),
diff --git a/arch/arm/mach-msm/ipc_router.c b/arch/arm/mach-msm/ipc_router.c
index c0cca1d..64986d0 100644
--- a/arch/arm/mach-msm/ipc_router.c
+++ b/arch/arm/mach-msm/ipc_router.c
@@ -1013,6 +1013,7 @@
 		return -EINVAL;
 	}
 
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
 
 	for (i = 0; i < SRV_HASH_SIZE; i++) {
@@ -1241,6 +1242,7 @@
 	mode = mode_info->mode;
 	xprt_info = mode_info->xprt_info;
 
+	memset(&msg, 0, sizeof(msg));
 	msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
 	msg.cli.node_id = node_id;
 	msg.cli.port_id = port_id;
@@ -1295,6 +1297,7 @@
 	D("Remove server %08x:%08x - %08x:%08x",
 	   server->name.service, server->name.instance,
 	   rport_ptr->node_id, rport_ptr->port_id);
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
 	ctl.srv.service = server->name.service;
 	ctl.srv.instance = server->name.instance;
@@ -1313,6 +1316,7 @@
 	union rr_control_msg ctl;
 	int j;
 
+	memset(&ctl, 0, sizeof(ctl));
 	for (j = 0; j < RP_HASH_SIZE; j++) {
 		list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
 				&rt_entry->remote_port_list[j], list) {
@@ -1847,7 +1851,7 @@
 process_done:
 		if (resume_tx) {
 			union rr_control_msg msg;
-
+			memset(&msg, 0, sizeof(msg));
 			msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
 			msg.cli.node_id = resume_tx_node_id;
 			msg.cli.port_id = resume_tx_port_id;
@@ -1900,6 +1904,7 @@
 		return -EINVAL;
 	}
 
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
 	ctl.srv.service = server->name.service;
 	ctl.srv.instance = server->name.instance;
@@ -1948,6 +1953,7 @@
 		return -ENODEV;
 	}
 
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
 	ctl.srv.service = server->name.service;
 	ctl.srv.instance = server->name.instance;
@@ -2433,6 +2439,7 @@
 		up_write(&local_ports_lock_lha2);
 
 		if (port_ptr->type == SERVER_PORT) {
+			memset(&msg, 0, sizeof(msg));
 			msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
 			msg.srv.service = port_ptr->port_name.service;
 			msg.srv.instance = port_ptr->port_name.instance;
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 8a3ecb1..94281b1 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -573,7 +573,8 @@
 {
 	int clk_ready = 0;
 
-	if (of_find_property(desc->dev->of_node,
+	if (desc->ops->proxy_unvote &&
+		of_find_property(desc->dev->of_node,
 				"qcom,gpio-proxy-unvote",
 				NULL)) {
 		clk_ready = of_get_named_gpio(desc->dev->of_node,
@@ -780,7 +781,7 @@
 		 "Invalid proxy unvote callback or a proxy timeout of 0"
 		 " was specified or no proxy unvote IRQ was specified.\n");
 
-	if (desc->proxy_unvote_irq > 0 && desc->ops->proxy_unvote) {
+	if (desc->proxy_unvote_irq) {
 		ret = request_threaded_irq(desc->proxy_unvote_irq,
 				  NULL,
 				  proxy_unvote_intr_handler,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 184dd982..a28bf33 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -114,18 +114,6 @@
 	.long_ib_detect = 1,
 };
 
-/* This set of registers are used for Hang detection
- * If the values of these registers are same after
- * KGSL_TIMEOUT_PART time, GPU hang is reported in
- * kernel log.
- * *****ALERT******ALERT********ALERT*************
- * Order of registers below is important, registers
- * from LONG_IB_DETECT_REG_INDEX_START to
- * LONG_IB_DETECT_REG_INDEX_END are used in long ib detection.
- */
-#define LONG_IB_DETECT_REG_INDEX_START 1
-#define LONG_IB_DETECT_REG_INDEX_END 5
-
 unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
 
 /*
@@ -1722,6 +1710,9 @@
 				(device->pwrctrl.gpu_cx &&
 				regulator_is_enabled(device->pwrctrl.gpu_cx)));
 
+	/* Clear any GPU faults that might have been left over */
+	adreno_set_gpu_fault(adreno_dev, 0);
+
 	/* Power up the device */
 	kgsl_pwrctrl_enable(device);
 
@@ -1830,29 +1821,35 @@
  */
 int adreno_reset(struct kgsl_device *device)
 {
-	int ret;
+	int ret = 0;
 
 	/* Try soft reset first */
-	if (adreno_soft_reset(device) == 0)
-		return 0;
+	if (adreno_soft_reset(device) != 0) {
+		KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
 
-	/* If it failed, then pull the power */
-	ret = adreno_stop(device);
-	if (ret)
-		return ret;
+		/* If it failed, then pull the power */
+		ret = adreno_stop(device);
+		if (ret)
+			return ret;
 
-	ret = adreno_start(device);
+		ret = adreno_start(device);
 
-	if (ret == 0) {
-		/*
-		 * If active_cnt is non-zero then the system was active before
-		 * going into a reset - put it back in that state
-		 */
-
-		if (atomic_read(&device->active_cnt))
-			kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+		if (ret)
+			return ret;
 	}
 
+	/*
+	 * If active_cnt is non-zero then the system was active before
+	 * going into a reset - put it back in that state
+	 */
+
+	if (atomic_read(&device->active_cnt))
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+
+	/* Set the page table back to the default page table */
+	kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+			KGSL_MEMSTORE_GLOBAL);
+
 	return ret;
 }
 
@@ -2310,6 +2307,13 @@
 	/* Stop the ringbuffer */
 	adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
 
+	if (kgsl_pwrctrl_isenabled(device))
+		device->ftbl->irqctrl(device, 0);
+
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+	adreno_set_gpu_fault(adreno_dev, 0);
+
 	/* Delete the idle timer */
 	del_timer_sync(&device->idle_timer);
 
@@ -2391,12 +2395,20 @@
 			0x110, 0x110);
 
 	while (time_before(jiffies, wait)) {
+		/*
+		 * If we fault, stop waiting and return an error. The dispatcher
+		 * will clean up the fault from the work queue, but we need to
+		 * make sure we don't block it by waiting for an idle that
+		 * will never come.
+		 */
+
+		if (adreno_gpu_fault(adreno_dev) != 0)
+			return -EDEADLK;
+
 		if (adreno_isidle(device))
 			return 0;
 	}
 
-	kgsl_postmortem_dump(device, 0);
-
 	return -ETIMEDOUT;
 }
 
@@ -2819,6 +2831,7 @@
 	.drawctxt_destroy = adreno_drawctxt_destroy,
 	.setproperty = adreno_setproperty,
 	.postmortem_dump = adreno_dump,
+	.drawctxt_sched = adreno_drawctxt_sched,
 };
 
 static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 32e43b2..0363739 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -35,12 +35,11 @@
 #define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
 
 /* Flags to control command packet settings */
-#define KGSL_CMD_FLAGS_NONE             0x00000000
-#define KGSL_CMD_FLAGS_PMODE		0x00000001
-#define KGSL_CMD_FLAGS_INTERNAL_ISSUE	0x00000002
-#define KGSL_CMD_FLAGS_GET_INT		0x00000004
-#define KGSL_CMD_FLAGS_PROFILE		0x00000008
-#define KGSL_CMD_FLAGS_EOF	        0x00000100
+#define KGSL_CMD_FLAGS_NONE             0
+#define KGSL_CMD_FLAGS_PMODE		BIT(0)
+#define KGSL_CMD_FLAGS_INTERNAL_ISSUE   BIT(1)
+#define KGSL_CMD_FLAGS_WFI              BIT(2)
+#define KGSL_CMD_FLAGS_PROFILE		BIT(3)
 
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
@@ -96,6 +95,10 @@
 	TRACE_BUS_CTL,
 };
 
+#define ADRENO_SOFT_FAULT 1
+#define ADRENO_HARD_FAULT 2
+#define ADRENO_TIMEOUT_FAULT 3
+
 /*
  * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
  * smaller then this but this size will allow for a larger range of inflight
@@ -110,7 +113,7 @@
  * @state: Current state of the dispatcher (active or paused)
  * @timer: Timer to monitor the progress of the command batches
  * @inflight: Number of command batch operations pending in the ringbuffer
- * @fault: True if a HW fault was detected
+ * @fault: Non-zero if a fault was detected.
  * @pending: Priority list of contexts waiting to submit command batches
  * @plist_lock: Spin lock to protect the pending queue
  * @cmdqueue: Queue of command batches currently flight
@@ -125,8 +128,9 @@
 	struct mutex mutex;
 	unsigned int state;
 	struct timer_list timer;
+	struct timer_list fault_timer;
 	unsigned int inflight;
-	int fault;
+	atomic_t fault;
 	struct plist_head pending;
 	spinlock_t plist_lock;
 	struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
@@ -340,13 +344,16 @@
 };
 
 /* Fault Tolerance policy flags */
-#define  KGSL_FT_OFF                      BIT(0)
-#define  KGSL_FT_REPLAY                   BIT(1)
-#define  KGSL_FT_SKIPIB                   BIT(2)
-#define  KGSL_FT_SKIPFRAME                BIT(3)
-#define  KGSL_FT_DISABLE                  BIT(4)
-#define  KGSL_FT_TEMP_DISABLE             BIT(5)
-#define  KGSL_FT_DEFAULT_POLICY           (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
+#define  KGSL_FT_OFF                      0
+#define  KGSL_FT_REPLAY                   1
+#define  KGSL_FT_SKIPIB                   2
+#define  KGSL_FT_SKIPFRAME                3
+#define  KGSL_FT_DISABLE                  4
+#define  KGSL_FT_TEMP_DISABLE             5
+#define  KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB))
+
+/* This internal bit is used to skip the PM dump on replayed command batches */
+#define  KGSL_FT_SKIP_PMDUMP              31
 
 /* Pagefault policy flags */
 #define KGSL_FT_PAGEFAULT_INT_ENABLE         BIT(0)
@@ -356,6 +363,14 @@
 #define KGSL_FT_PAGEFAULT_DEFAULT_POLICY     (KGSL_FT_PAGEFAULT_INT_ENABLE + \
 					KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
 
+#define ADRENO_FT_TYPES \
+	{ BIT(KGSL_FT_OFF), "off" }, \
+	{ BIT(KGSL_FT_REPLAY), "replay" }, \
+	{ BIT(KGSL_FT_SKIPIB), "skipib" }, \
+	{ BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
+	{ BIT(KGSL_FT_DISABLE), "disable" }, \
+	{ BIT(KGSL_FT_TEMP_DISABLE), "temp" }
+
 extern struct adreno_gpudev adreno_a2xx_gpudev;
 extern struct adreno_gpudev adreno_a3xx_gpudev;
 
@@ -425,6 +440,8 @@
 
 void adreno_dispatcher_schedule(struct kgsl_device *device);
 void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
+void adreno_dispatcher_queue_context(struct kgsl_device *device,
+	struct adreno_context *drawctxt);
 int adreno_reset(struct kgsl_device *device);
 
 int adreno_ft_init_sysfs(struct kgsl_device *device);
@@ -741,4 +758,31 @@
 		return ADRENO_REG_REGISTER_MAX;
 	return adreno_dev->gpudev->reg_offsets->offsets[offset_name];
 }
+
+/**
+ * adreno_gpu_fault() - Return the current state of the GPU
+ * @adreno_dev: A ponter to the adreno_device to query
+ *
+ * Return 0 if there is no fault or positive with the last type of fault that
+ * occurred
+ */
+static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
+{
+	smp_rmb();
+	return atomic_read(&adreno_dev->dispatcher.fault);
+}
+
+/**
+ * adreno_set_gpu_fault() - Set the current fault status of the GPU
+ * @adreno_dev: A pointer to the adreno_device to set
+ * @state: fault state to set
+ *
+ */
+static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
+	int state)
+{
+	atomic_set(&adreno_dev->dispatcher.fault, state);
+	smp_wmb();
+}
+
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 8b75c4e..c4f81fa 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2596,7 +2596,7 @@
 
 		/* Clear the error */
 		kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
-		return;
+		goto done;
 	}
 	case A3XX_INT_RBBM_REG_TIMEOUT:
 		err = "RBBM: AHB register timeout";
@@ -2637,10 +2637,15 @@
 	case A3XX_INT_UCHE_OOB_ACCESS:
 		err = "UCHE:  Out of bounds access";
 		break;
+	default:
+		return;
 	}
-
 	KGSL_DRV_CRIT(device, "%s\n", err);
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+done:
+	/* Trigger a fault in the dispatcher - this will effect a restart */
+	adreno_dispatcher_irq_fault(device);
 }
 
 static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index e429934..13b4ba3 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/jiffies.h>
+#include <linux/err.h>
 
 #include "kgsl.h"
 #include "adreno.h"
@@ -41,6 +42,71 @@
 /* Command batch timeout (in milliseconds) */
 static unsigned int _cmdbatch_timeout = 2000;
 
+/* Interval for reading and comparing fault detection registers */
+static unsigned int _fault_timer_interval = 50;
+
+/* Local array for the current set of fault detect registers */
+static unsigned int fault_detect_regs[FT_DETECT_REGS_COUNT];
+
+/* The last retired global timestamp read during fault detect */
+static unsigned int fault_detect_ts;
+
+/**
+ * fault_detect_read() - Read the set of fault detect registers
+ * @device: Pointer to the KGSL device struct
+ *
+ * Read the set of fault detect registers and store them in the local array.
+ * This is for the initial values that are compared later with
+ * fault_detect_read_compare
+ */
+static void fault_detect_read(struct kgsl_device *device)
+{
+	int i;
+
+	fault_detect_ts = kgsl_readtimestamp(device, NULL,
+		KGSL_TIMESTAMP_RETIRED);
+
+	for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+		if (ft_detect_regs[i] == 0)
+			continue;
+		kgsl_regread(device, ft_detect_regs[i],
+			&fault_detect_regs[i]);
+	}
+}
+
+/**
+ * fault_detect_read_compare() - Read the fault detect registers and compare
+ * them to the current value
+ * @device: Pointer to the KGSL device struct
+ *
+ * Read the set of fault detect registers and compare them to the current set
+ * of registers.  Return 1 if any of the register values changed
+ */
+static int fault_detect_read_compare(struct kgsl_device *device)
+{
+	int i, ret = 0;
+	unsigned int ts;
+
+	for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+		unsigned int val;
+
+		if (ft_detect_regs[i] == 0)
+			continue;
+		kgsl_regread(device, ft_detect_regs[i], &val);
+		if (val != fault_detect_regs[i])
+			ret = 1;
+		fault_detect_regs[i] = val;
+	}
+
+	ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+	if (ts != fault_detect_ts)
+		ret = 1;
+
+	fault_detect_ts = ts;
+
+	return ret;
+}
+
 /**
  * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
  * @drawctxt: Pointer to the adreno draw context
@@ -55,12 +121,23 @@
 	mutex_lock(&drawctxt->mutex);
 	if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
 		cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+		/*
+		 * Don't dequeue a cmdbatch that is still waiting for other
+		 * events
+		 */
+		if (kgsl_cmdbatch_sync_pending(cmdbatch)) {
+			cmdbatch = ERR_PTR(-EAGAIN);
+			goto done;
+		}
+
 		drawctxt->cmdqueue_head =
 			CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
 			ADRENO_CONTEXT_CMDQUEUE_SIZE);
 		drawctxt->queued--;
 	}
 
+done:
 	mutex_unlock(&drawctxt->mutex);
 
 	return cmdbatch;
@@ -162,9 +239,17 @@
 
 	ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
 
-	/* Turn the GPU back off on failure.  Sad face. */
-	if (ret && dispatcher->inflight == 1)
-		kgsl_active_count_put(device);
+	/*
+	 * On the first command, if the submission was successful, then read the
+	 * fault registers.  If it failed then turn off the GPU. Sad face.
+	 */
+
+	if (dispatcher->inflight == 1) {
+		if (ret == 0)
+			fault_detect_read(device);
+		else
+			kgsl_active_count_put(device);
+	}
 
 	mutex_unlock(&device->mutex);
 
@@ -191,6 +276,12 @@
 		cmdbatch->expires = jiffies +
 			msecs_to_jiffies(_cmdbatch_timeout);
 		mod_timer(&dispatcher->timer, cmdbatch->expires);
+
+		/* Start the fault detection timer */
+		if (adreno_dev->fast_hang_detect)
+			mod_timer(&dispatcher->fault_timer,
+				jiffies +
+				msecs_to_jiffies(_fault_timer_interval));
 	}
 
 	return 0;
@@ -208,19 +299,51 @@
 {
 	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
 	int count = 0;
+	int requeued = 0;
 
 	/*
 	 * Each context can send a specific number of command batches per cycle
 	 */
-	for ( ; count < _context_cmdbatch_burst &&
-		dispatcher->inflight < _dispatcher_inflight; count++) {
+	while ((count < _context_cmdbatch_burst) &&
+		(dispatcher->inflight < _dispatcher_inflight)) {
 		int ret;
-		struct kgsl_cmdbatch *cmdbatch =
-			adreno_dispatcher_get_cmdbatch(drawctxt);
+		struct kgsl_cmdbatch *cmdbatch;
+
+		if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
+			break;
+
+		if (adreno_gpu_fault(adreno_dev) != 0)
+			break;
+
+		cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
 
 		if (cmdbatch == NULL)
 			break;
 
+		/*
+		 * adreno_context_get_cmdbatch returns -EAGAIN if the current
+		 * cmdbatch has pending sync points so no more to do here.
+		 * When the sync points are satisfied then the context will get
+		 * reqeueued
+		 */
+
+		if (IS_ERR(cmdbatch) && PTR_ERR(cmdbatch) == -EAGAIN) {
+			requeued = 1;
+			break;
+		}
+
+		/*
+		 * If this is a synchronization submission then there are no
+		 * commands to submit.  Discard it and get the next item from
+		 * the queue.  Decrement count so this packet doesn't count
+		 * against the burst for the context
+		 */
+
+		if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
+			kgsl_cmdbatch_destroy(cmdbatch);
+			continue;
+		}
+
 		ret = sendcmd(adreno_dev, cmdbatch);
 
 		/*
@@ -232,8 +355,10 @@
 		 */
 		if (ret) {
 			adreno_dispatcher_requeue_cmdbatch(drawctxt, cmdbatch);
+			requeued = 1;
 			break;
 		}
+		count++;
 	}
 
 	/*
@@ -243,7 +368,7 @@
 	 * reasonable to think that a busy context will stay busy.
 	 */
 
-	if (count) {
+	if (count || requeued) {
 		dispatcher_queue_context(adreno_dev, drawctxt);
 
 		/*
@@ -255,7 +380,17 @@
 		wake_up_interruptible_all(&drawctxt->wq);
 	}
 
-	return count;
+	/* Return the number of command batches processed */
+	if (count > 0)
+		return count;
+
+	/*
+	 * If we didn't process anything because of a stall or an error
+	 * return -1 so the issuecmds loop knows that we shouldn't
+	 * keep trying to process it
+	 */
+
+	return requeued ? -1 : 0;
 }
 
 /**
@@ -268,14 +403,18 @@
 static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
 {
 	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
-	/* Don't do anything if the dispatcher is paused */
-	if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
-		return 0;
+	int last_id = 0;
 
 	while (dispatcher->inflight < _dispatcher_inflight) {
 		struct adreno_context *drawctxt = NULL;
 
+		/* Don't do anything if the dispatcher is paused */
+		if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
+			break;
+
+		if (adreno_gpu_fault(adreno_dev) != 0)
+			break;
+
 		spin_lock(&dispatcher->plist_lock);
 
 		if (!plist_head_empty(&dispatcher->pending)) {
@@ -296,6 +435,21 @@
 			continue;
 		}
 
+		/*
+		 * Don't loop endlessly on the same stalled context - if we see
+		 * the same context twice in a row then assume that there is
+		 * nothing else to do and bail.  But don't forget to put the
+		 * stalled context back on the queue otherwise it will get
+		 * forgotten
+		 */
+
+		if (last_id == drawctxt->base.id) {
+			dispatcher_queue_context(adreno_dev, drawctxt);
+			kgsl_context_put(&drawctxt->base);
+			break;
+		}
+
+		last_id = drawctxt->base.id;
 		dispatcher_context_sendcmds(adreno_dev, drawctxt);
 		kgsl_context_put(&drawctxt->base);
 	}
@@ -343,131 +497,41 @@
 }
 
 /**
- * adreno_dispatcher_replay() - Replay commands from the dispatcher queue
- * @adreno_dev: Pointer to the adreno device struct
+ * get_timestamp() - Return the next timestamp for the context
+ * @drawctxt - Pointer to an adreno draw context struct
+ * @cmdbatch - Pointer to a command batch
+ * @timestamp - Pointer to a timestamp value possibly passed from the user
  *
- * Replay the commands from the dispatcher inflight queue.  This is called after
- * a power down/up to recover from a fault
+ * Assign a timestamp based on the settings of the draw context and the command
+ * batch.
  */
-int adreno_dispatcher_replay(struct adreno_device *adreno_dev)
+static int get_timestamp(struct adreno_context *drawctxt,
+		struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
 {
-	struct kgsl_device *device = &adreno_dev->dev;
-	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-	struct kgsl_cmdbatch **replay;
-	int i, ptr, count = 0;
-
-	BUG_ON(!mutex_is_locked(&dispatcher->mutex));
-
-	replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
-
-	/*
-	 * If we can't allocate enough memory for the replay commands then we
-	 * are in a bad way.  Invalidate everything, reset the GPU and see ya
-	 * later alligator
-	 */
-
-	if (replay == NULL) {
-
-		ptr = dispatcher->head;
-
-		while (ptr != dispatcher->tail) {
-			struct kgsl_context *context =
-				dispatcher->cmdqueue[ptr]->context;
-
-			adreno_drawctxt_invalidate(device, context);
-			ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
-		}
-
-		/* Reset the dispatcher queue */
-		dispatcher->inflight = 0;
-		dispatcher->head = dispatcher->tail = 0;
-
-		/* Reset the hardware */
-		mutex_lock(&device->mutex);
-
-		/*
-		 * If adreno_reset fails then the GPU is not alive and there
-		 * isn't anything we can do to recover at this point
-		 */
-
-		BUG_ON(adreno_reset(device));
-		mutex_unlock(&device->mutex);
-
+	/* Synchronization commands don't get a timestamp */
+	if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
+		*timestamp = 0;
 		return 0;
 	}
 
-	ptr = dispatcher->head;
-
-	while (ptr != dispatcher->tail) {
-		struct kgsl_cmdbatch *cmdbatch = dispatcher->cmdqueue[ptr];
-		struct adreno_context *drawctxt =
-			ADRENO_CONTEXT(cmdbatch->context);
-
-		if (cmdbatch->invalid)
-			adreno_drawctxt_invalidate(device, cmdbatch->context);
-
-		if (!kgsl_context_detached(cmdbatch->context) &&
-			drawctxt->state == ADRENO_CONTEXT_STATE_ACTIVE) {
-			/*
-			 * The context for the command batch is still valid -
-			 * add it to the replay list
-			 */
-			replay[count++] = dispatcher->cmdqueue[ptr];
-		} else {
-			/*
-			 * Skip over invaliated or detached contexts - cancel
-			 * any pending events for the timestamp and destroy the
-			 * command batch
-			 */
-			mutex_lock(&device->mutex);
-			kgsl_cancel_events_timestamp(device, cmdbatch->context,
-				cmdbatch->timestamp);
-			mutex_unlock(&device->mutex);
-
-			kgsl_cmdbatch_destroy(cmdbatch);
-		}
-
-		ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
-	}
-
-	/* Reset the dispatcher queue */
-	dispatcher->inflight = 0;
-	dispatcher->head = dispatcher->tail = 0;
-
-	mutex_lock(&device->mutex);
-	BUG_ON(adreno_reset(device));
-	mutex_unlock(&device->mutex);
-
-	/* Replay the pending command buffers */
-	for (i = 0; i < count; i++) {
-		int ret = sendcmd(adreno_dev, replay[i]);
-
+	if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
 		/*
-		 * I'm afraid that if we get an error during replay we
-		 * are not going to space today
+		 * User specified timestamps need to be greater than the last
+		 * issued timestamp in the context
 		 */
+		if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
+			return -ERANGE;
 
-		BUG_ON(ret);
-	}
+		drawctxt->timestamp = *timestamp;
+	} else
+		drawctxt->timestamp++;
 
-	/*
-	 * active_count will be set when we come into this function because
-	 * there were inflight commands.  By virtue of setting ->inflight back
-	 * to 0 sendcmd() will increase the active count again on the first
-	 * submission.  This active_count_put is needed to put the universe back
-	 * in balance and as a bonus it ensures that the hardware stays up for
-	 * the entire reset process
-	 */
-	mutex_lock(&device->mutex);
-	kgsl_active_count_put(device);
-	mutex_unlock(&device->mutex);
-
-	kfree(replay);
+	*timestamp = drawctxt->timestamp;
 	return 0;
 }
 
 /**
- * adreno_dispatcher_queue_cmd() - Queue a new command in the context
+ * adreno_dispactcher_queue_cmd() - Queue a new command in the context
  * @adreno_dev: Pointer to the adreno device struct
  * @drawctxt: Pointer to the adreno draw context
  * @cmdbatch: Pointer to the command batch being submitted
@@ -489,6 +553,42 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * After skipping to the end of the frame we need to force the preamble
+	 * to run (if it exists) regardless of the context state.
+	 */
+
+	if (drawctxt->flags & CTXT_FLAGS_FORCE_PREAMBLE) {
+		set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+		drawctxt->flags &= ~CTXT_FLAGS_FORCE_PREAMBLE;
+	}
+
+	/*
+	 * If we are waiting for the end of frame and it hasn't appeared yet,
+	 * then mark the command batch as skipped.  It will still progress
+	 * through the pipeline but it won't actually send any commands
+	 */
+
+	if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+		set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+
+		/*
+		 * If this command batch represents the EOF then clear the way
+		 * for the dispatcher to continue submitting
+		 */
+
+		if (cmdbatch->flags & KGSL_CONTEXT_END_OF_FRAME) {
+			drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+
+			/*
+			 * Force the preamble on the next command to ensure that
+			 * the state is correct
+			 */
+
+			drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
+		}
+	}
+
 	/* Wait for room in the context queue */
 
 	while (drawctxt->queued >= _context_cmdqueue_size) {
@@ -518,23 +618,23 @@
 		}
 	}
 
+	ret = get_timestamp(drawctxt, cmdbatch, timestamp);
+	if (ret) {
+		mutex_unlock(&drawctxt->mutex);
+		return ret;
+	}
+
+	cmdbatch->timestamp = *timestamp;
+
 	/*
-	 * If the UMD specified a timestamp then use that under the condition
-	 * that it is greater then the last queued timestamp in the context.
+	 * Set the fault tolerance policy for the command batch - assuming the
+	 * context hsn't disabled FT use the current device policy
 	 */
 
-	if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
-		if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
-			mutex_unlock(&drawctxt->mutex);
-			return -ERANGE;
-		}
-
-		drawctxt->timestamp = *timestamp;
-	} else
-		drawctxt->timestamp++;
-
-	cmdbatch->timestamp = drawctxt->timestamp;
-	*timestamp = drawctxt->timestamp;
+	if (drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+		set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
+	else
+		cmdbatch->fault_policy = adreno_dev->ft_policy;
 
 	/* Put the command into the queue */
 	drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
@@ -565,58 +665,442 @@
 	return 0;
 }
 
-/**
- * dispatcher_do_fault() - Handle a GPU fault and reset the GPU
- * @device: Pointer to the KGSL device
- * @cmdbatch: Pointer to the command batch believed to be responsible for the
- * fault
- * @invalidate: Non zero if the current command should be invalidated
- *
- * Trigger a fault in the dispatcher and start the replay process
+/*
+ * If an IB inside of the command batch has a gpuaddr that matches the base
+ * passed in then zero the size which effectively skips it when it is submitted
+ * in the ringbuffer.
  */
-static void dispatcher_do_fault(struct kgsl_device *device,
-		struct kgsl_cmdbatch *cmdbatch, int invalidate)
+static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, unsigned int base)
+{
+	int i;
+
+	for (i = 0; i < cmdbatch->ibcount; i++) {
+		if (cmdbatch->ibdesc[i].gpuaddr == base) {
+			cmdbatch->ibdesc[i].sizedwords = 0;
+			return;
+		}
+	}
+}
+
+static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
+	struct kgsl_cmdbatch **replay, int count)
+{
+	struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+	int skip = 1;
+	int i;
+
+	for (i = 0; i < count; i++) {
+
+		/*
+		 * Only operate on command batches that belong to the
+		 * faulting context
+		 */
+
+		if (replay[i]->context->id != cmdbatch->context->id)
+			continue;
+
+		/*
+		 * Skip all the command batches in this context until
+		 * the EOF flag is seen.  If the EOF flag is seen then
+		 * force the preamble for the next command.
+		 */
+
+		if (skip) {
+			set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+
+			if (replay[i]->flags & KGSL_CONTEXT_END_OF_FRAME)
+				skip = 0;
+		} else {
+			set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+			return;
+		}
+	}
+
+	/*
+	 * If the EOF flag hasn't been seen yet then set the flag in the
+	 * drawctxt to keep looking for it
+	 */
+
+	if (skip && drawctxt)
+		drawctxt->flags |= CTXT_FLAGS_SKIP_EOF;
+
+	/*
+	 * If we did see the EOF flag then force the preamble on for the
+	 * next command issued on this context
+	 */
+
+	if (!skip && drawctxt)
+		drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
+}
+
+static void remove_invalidated_cmdbatches(struct kgsl_device *device,
+		struct kgsl_cmdbatch **replay, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		struct kgsl_cmdbatch *cmd = replay[i];
+		struct adreno_context *drawctxt;
+
+		if (cmd == NULL)
+			continue;
+
+		drawctxt = ADRENO_CONTEXT(cmd->context);
+
+		if (kgsl_context_detached(cmd->context) ||
+			drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+			replay[i] = NULL;
+
+			mutex_lock(&device->mutex);
+			kgsl_cancel_events_timestamp(device, cmd->context,
+				cmd->timestamp);
+			mutex_unlock(&device->mutex);
+
+			kgsl_cmdbatch_destroy(cmd);
+		}
+	}
+}
+
+static char _pidname[TASK_COMM_LEN];
+
+static inline const char *_kgsl_context_comm(struct kgsl_context *context)
+{
+	struct task_struct *task = NULL;
+
+	if (context)
+		task = find_task_by_vpid(context->pid);
+
+	if (task)
+		get_task_comm(_pidname, task);
+	else
+		snprintf(_pidname, TASK_COMM_LEN, "unknown");
+
+	return _pidname;
+}
+
+#define pr_fault(_d, _c, fmt, args...) \
+		dev_err((_d)->dev, "%s[%d]: " fmt, \
+		_kgsl_context_comm((_c)->context), \
+		(_c)->context->pid, ##args)
+
+
+static void adreno_fault_header(struct kgsl_device *device,
+	struct kgsl_cmdbatch *cmdbatch)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int status, base, rptr, wptr, ib1base, ib2base, ib1sz, ib2sz;
+
+	kgsl_regread(device,
+			adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS),
+			&status);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_BASE),
+		&base);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_RPTR),
+		&rptr);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_WPTR),
+		&wptr);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
+		&ib1base);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ),
+		&ib1sz);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
+		&ib2base);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
+		&ib2sz);
+
+	trace_adreno_gpu_fault(cmdbatch->context->id, cmdbatch->timestamp,
+		status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
+
+	pr_fault(device, cmdbatch,
+		"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %8.8x/%4.4x ib2 %8.8x/%4.4x\n",
+		cmdbatch->context->id, cmdbatch->timestamp, status,
+		rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
+}
+
+static int dispatcher_do_fault(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-	unsigned int reg;
+	unsigned int ptr;
+	unsigned int reg, base;
+	struct kgsl_cmdbatch **replay = NULL;
+	struct kgsl_cmdbatch *cmdbatch;
+	int ret, i, count = 0;
+	int fault, first = 0;
+	bool pagefault = false;
+	BUG_ON(dispatcher->inflight == 0);
 
-	/* Stop the timers */
+	fault = atomic_xchg(&dispatcher->fault, 0);
+	if (fault == 0)
+		return 0;
+
+	/* Turn off all the timers */
 	del_timer_sync(&dispatcher->timer);
+	del_timer_sync(&dispatcher->fault_timer);
 
 	mutex_lock(&device->mutex);
 
-	/*
-	 * There is an interesting race condition here - when a command batch
-	 * expires and we invaliate before we recover we run the risk of having
-	 * the UMD clean up the context and free memory that the GPU is still
-	 * using.  Not that it is dangerous because we are a few microseconds
-	 * away from resetting, but it still ends up in pagefaults and log
-	 * messages and so on and so forth. To avoid this we mark the command
-	 * batch itself as invalid and then reset - the context will get
-	 * invalidated in the replay.
-	 */
+	cmdbatch = dispatcher->cmdqueue[dispatcher->head];
 
-	if (invalidate)
-		cmdbatch->invalid = 1;
+	trace_adreno_cmdbatch_fault(cmdbatch, fault);
 
 	/*
-	 * Stop the CP in its tracks - this ensures that we don't get activity
-	 * while we are trying to dump the state of the system
+	 * If the fault was due to a timeout then stop the CP to ensure we don't
+	 * get activity while we are trying to dump the state of the system
 	 */
 
+	if (fault == ADRENO_TIMEOUT_FAULT) {
+		adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, &reg);
+		reg |= (1 << 27) | (1 << 28);
+		adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
 
-	adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, &reg);
-	reg |= (1 << 27) | (1 << 28);
-	adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
+		/* Skip the PM dump for a timeout because it confuses people */
+		set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+	}
 
-	kgsl_postmortem_dump(device, 0);
-	kgsl_device_snapshot(device, 1);
+	adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
+
+	/*
+	 * Dump the postmortem and snapshot information if this is the first
+	 * detected fault for the oldest active command batch
+	 */
+
+	if (!test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) {
+		adreno_fault_header(device, cmdbatch);
+
+		if (device->pm_dump_enable)
+			kgsl_postmortem_dump(device, 0);
+
+		kgsl_device_snapshot(device, 1);
+	}
+
 	mutex_unlock(&device->mutex);
 
-	/* If we can't replay then bravely run away and die */
-	if (adreno_dispatcher_replay(adreno_dev))
-		BUG();
+	/* Allocate memory to store the inflight commands */
+	replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
+
+	if (replay == NULL) {
+		unsigned int ptr = dispatcher->head;
+
+		while (ptr != dispatcher->tail) {
+			struct kgsl_context *context =
+				dispatcher->cmdqueue[ptr]->context;
+
+			adreno_drawctxt_invalidate(device, context);
+			kgsl_cmdbatch_destroy(dispatcher->cmdqueue[ptr]);
+
+			ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+		}
+
+		/*
+		 * Set the replay count to zero - this will ensure that the
+		 * hardware gets reset but nothing else goes played
+		 */
+
+		count = 0;
+		goto replay;
+	}
+
+	/* Copy the inflight command batches into the temporary storage */
+	ptr = dispatcher->head;
+
+	while (ptr != dispatcher->tail) {
+		replay[count++] = dispatcher->cmdqueue[ptr];
+		ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+	}
+
+	/*
+	 * For the purposes of replay, we assume that the oldest command batch
+	 * that hasn't retired a timestamp is "hung".
+	 */
+
+	cmdbatch = replay[0];
+
+	/*
+	 * If FT is disabled for this cmdbatch invalidate immediately
+	 */
+
+	if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
+		test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+		pr_fault(device, cmdbatch, "gpu skipped ctx %d ts %d\n",
+			cmdbatch->context->id, cmdbatch->timestamp);
+
+		adreno_drawctxt_invalidate(device, cmdbatch->context);
+	}
+
+	/*
+	 * Set a flag so we don't print another PM dump if the cmdbatch fails
+	 * again on replay
+	 */
+
+	set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+
+	/*
+	 * A hardware fault generally means something was deterministically
+	 * wrong with the command batch - no point in trying to replay it
+	 * Clear the replay bit and move on to the next policy level
+	 */
+
+	if (fault == ADRENO_HARD_FAULT)
+		clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+
+	/*
+	 * A timeout fault means the IB timed out - clear the policy and
+	 * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
+	 * because we won't see this cmdbatch again
+	 */
+
+	if (fault == ADRENO_TIMEOUT_FAULT)
+		bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+
+	/*
+	 * If the context had a GPU page fault then it is likely it would fault
+	 * again if replayed
+	 */
+
+	if (test_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv)) {
+		/* we'll need to resume the mmu later... */
+		pagefault = true;
+		clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+		clear_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv);
+	}
+
+	/*
+	 * Execute the fault tolerance policy. Each command batch stores the
+	 * current fault policy that was set when it was queued.
+	 * As the options are tried in descending priority
+	 * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
+	 * from the cmdbatch policy so the next thing can be tried if the
+	 * change comes around again
+	 */
+
+	/* Replay the hanging command batch again */
+	if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
+		trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
+		set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+		goto replay;
+	}
+
+	/*
+	 * Skip the last IB1 that was played but replay everything else.
+	 * Note that the last IB1 might not be in the "hung" command batch
+	 * because the CP may have caused a page-fault while it was prefetching
+	 * the next IB1/IB2. walk all outstanding commands and zap the
+	 * supposedly bad IB1 where ever it lurks.
+	 */
+
+	if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
+		trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
+		set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+
+		for (i = 0; i < count; i++) {
+			if (replay[i] != NULL)
+				cmdbatch_skip_ib(replay[i], base);
+		}
+
+		goto replay;
+	}
+
+	if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
+		trace_adreno_cmdbatch_recovery(cmdbatch,
+			BIT(KGSL_FT_SKIPFRAME));
+		set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+
+		/*
+		 * Skip all the pending command batches for this context until
+		 * the EOF frame is seen
+		 */
+		cmdbatch_skip_frame(cmdbatch, replay, count);
+		goto replay;
+	}
+
+	/* If we get here then all the policies failed */
+
+	pr_fault(device, cmdbatch, "gpu failed ctx %d ts %d\n",
+		cmdbatch->context->id, cmdbatch->timestamp);
+
+	/* Invalidate the context */
+	adreno_drawctxt_invalidate(device, cmdbatch->context);
+
+
+replay:
+	/* Reset the dispatcher queue */
+	dispatcher->inflight = 0;
+	dispatcher->head = dispatcher->tail = 0;
+
+	/* Reset the GPU */
+	mutex_lock(&device->mutex);
+
+	/* resume the MMU if it is stalled */
+	if (pagefault && device->mmu.mmu_ops->mmu_pagefault_resume != NULL)
+		device->mmu.mmu_ops->mmu_pagefault_resume(&device->mmu);
+
+	ret = adreno_reset(device);
+	mutex_unlock(&device->mutex);
+
+	/* If adreno_reset() fails then what hope do we have for the future? */
+	BUG_ON(ret);
+
+	/* Remove any pending command batches that have been invalidated */
+	remove_invalidated_cmdbatches(device, replay, count);
+
+	/* Replay the pending command buffers */
+	for (i = 0; i < count; i++) {
+
+		int ret;
+
+		if (replay[i] == NULL)
+			continue;
+
+		/*
+		 * Force the preamble on the first command (if applicable) to
+		 * avoid any strange stage issues
+		 */
+
+		if (first == 0) {
+			set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+			first = 1;
+		}
+
+		/*
+		 * Force each command batch to wait for idle - this avoids weird
+		 * CP parse issues
+		 */
+
+		set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+
+		ret = sendcmd(adreno_dev, replay[i]);
+
+		/*
+		 * If sending the command fails, then try to recover by
+		 * invalidating the context
+		 */
+
+		if (ret) {
+			pr_fault(device, replay[i],
+				"gpu reset failed ctx %d ts %d\n",
+				replay[i]->context->id, replay[i]->timestamp);
+
+			adreno_drawctxt_invalidate(device, replay[i]->context);
+			remove_invalidated_cmdbatches(device, &replay[i],
+				count - i);
+		}
+	}
+
+	mutex_lock(&device->mutex);
+	kgsl_active_count_put(device);
+	mutex_unlock(&device->mutex);
+
+	kfree(replay);
+
+	return 1;
 }
 
 static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
@@ -626,6 +1110,30 @@
 		(timestamp_cmp(retired, cmdbatch->timestamp) < 0));
 }
 
+static void _print_recovery(struct kgsl_device *device,
+		struct kgsl_cmdbatch *cmdbatch)
+{
+	static struct {
+		unsigned int mask;
+		const char *str;
+	} flags[] = { ADRENO_FT_TYPES };
+
+	int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+	char *result = "unknown";
+
+	for (i = 0; i < ARRAY_SIZE(flags); i++) {
+		if (flags[i].mask == BIT(nr)) {
+			result = (char *) flags[i].str;
+			break;
+		}
+	}
+
+	pr_fault(device, cmdbatch,
+		"gpu %s ctx %d ts %d policy %lX\n",
+		result, cmdbatch->context->id, cmdbatch->timestamp,
+		cmdbatch->fault_recovery);
+}
+
 /**
  * adreno_dispatcher_work() - Master work handler for the dispatcher
  * @work: Pointer to the work struct for the current work queue
@@ -639,7 +1147,7 @@
 	struct adreno_device *adreno_dev =
 		container_of(dispatcher, struct adreno_device, dispatcher);
 	struct kgsl_device *device = &adreno_dev->dev;
-	int inv, count = 0;
+	int count = 0;
 
 	mutex_lock(&dispatcher->mutex);
 
@@ -667,6 +1175,14 @@
 		if (kgsl_context_detached(cmdbatch->context) ||
 			(timestamp_cmp(cmdbatch->timestamp, retired) <= 0)) {
 
+			/*
+			 * If the cmdbatch in question had faulted announce its
+			 * successful completion to the world
+			 */
+
+			if (cmdbatch->fault_recovery != 0)
+				_print_recovery(device, cmdbatch);
+
 			trace_adreno_cmdbatch_retired(cmdbatch,
 				dispatcher->inflight - 1);
 
@@ -693,8 +1209,6 @@
 			}
 
 			count++;
-
-			BUG_ON(dispatcher->inflight == 0 && dispatcher->fault);
 			continue;
 		}
 
@@ -703,17 +1217,23 @@
 		 * is to blame.  Invalidate it, reset and replay
 		 */
 
-		if (dispatcher->fault) {
-			dispatcher_do_fault(device, cmdbatch, 1);
+		if (dispatcher_do_fault(device))
 			goto done;
-		}
 
 		/* Get the last consumed timestamp */
 		consumed = kgsl_readtimestamp(device, cmdbatch->context,
 			KGSL_TIMESTAMP_CONSUMED);
 
-		/* Break here if fault detection is disabled for the context */
-		if (drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+		/*
+		 * Break here if fault detection is disabled for the context or
+		 * if the long running IB detection is disaled device wide
+		 * Long running command buffers will be allowed to run to
+		 * completion - but badly behaving command buffers (infinite
+		 * shaders etc) can end up running forever.
+		 */
+
+		if (!adreno_dev->long_ib_detect ||
+			drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
 			break;
 
 		/*
@@ -727,23 +1247,13 @@
 
 		/* Boom goes the dynamite */
 
-		pr_err("-----------------------\n");
+		pr_fault(device, cmdbatch,
+			"gpu timeout ctx %d ts %d\n",
+			cmdbatch->context->id, cmdbatch->timestamp);
 
-		pr_err("dispatcher: expired ctx=%d ts=%d consumed=%d retired=%d\n",
-			cmdbatch->context->id, cmdbatch->timestamp, consumed,
-			retired);
-		pr_err("dispatcher: jiffies=%lu expired=%lu\n", jiffies,
-				cmdbatch->expires);
+		adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
 
-		/*
-		 * If execution stopped after the current command batch was
-		 * consumed then invalidate the context for the current command
-		 * batch
-		 */
-
-		inv = cmdbatch_consumed(cmdbatch, consumed, retired);
-
-		dispatcher_do_fault(device, cmdbatch, inv);
+		dispatcher_do_fault(device);
 		break;
 	}
 
@@ -768,9 +1278,12 @@
 		struct kgsl_cmdbatch *cmdbatch
 			= dispatcher->cmdqueue[dispatcher->head];
 
+		/* Update the timeout timer for the next command batch */
 		mod_timer(&dispatcher->timer, cmdbatch->expires);
-	} else
+	} else {
 		del_timer_sync(&dispatcher->timer);
+		del_timer_sync(&dispatcher->fault_timer);
+	}
 
 	/* Before leaving update the pwrscale information */
 	mutex_lock(&device->mutex);
@@ -788,6 +1301,60 @@
 	queue_work(device->work_queue, &dispatcher->work);
 }
 
+/**
+ * adreno_dispatcher_queue_context() - schedule a drawctxt in the dispatcher
+ * device: pointer to the KGSL device
+ * drawctxt: pointer to the drawctxt to schedule
+ *
+ * Put a draw context on the dispatcher pending queue and schedule the
+ * dispatcher. This is used to reschedule changes that might have been blocked
+ * for sync points or other concerns
+ */
+void adreno_dispatcher_queue_context(struct kgsl_device *device,
+	struct adreno_context *drawctxt)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	dispatcher_queue_context(adreno_dev, drawctxt);
+	adreno_dispatcher_schedule(device);
+}
+
+/*
+ * This is called on a regular basis while command batches are inflight.  Fault
+ * detection registers are read and compared to the existing values - if they
+ * changed then the GPU is still running.  If they are the same between
+ * subsequent calls then the GPU may have faulted
+ */
+
+void adreno_dispatcher_fault_timer(unsigned long data)
+{
+	struct adreno_device *adreno_dev = (struct adreno_device *) data;
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	/* Leave if the user decided to turn off fast hang detection */
+	if (adreno_dev->fast_hang_detect == 0)
+		return;
+
+	if (adreno_gpu_fault(adreno_dev)) {
+		adreno_dispatcher_schedule(device);
+		return;
+	}
+
+	/*
+	 * Read the fault registers - if it returns 0 then they haven't changed
+	 * so mark the dispatcher as faulted and schedule the work loop.
+	 */
+
+	if (!fault_detect_read_compare(device)) {
+		adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
+		adreno_dispatcher_schedule(device);
+	} else {
+		mod_timer(&dispatcher->fault_timer,
+			jiffies + msecs_to_jiffies(_fault_timer_interval));
+	}
+}
+
 /*
  * This is called when the timer expires - it either means the GPU is hung or
  * the IB is taking too long to execute
@@ -800,18 +1367,15 @@
 	adreno_dispatcher_schedule(device);
 }
 /**
- * adreno_dispatcher_fault_irq() - Trigger a fault in the dispatcher
+ * adreno_dispatcher_irq_fault() - Trigger a fault in the dispatcher
  * @device: Pointer to the KGSL device
  *
  * Called from an interrupt context this will trigger a fault in the
- * dispatcher
+ * dispatcher for the oldest pending command batch
  */
-void adreno_dispatcher_fault_irq(struct kgsl_device *device)
+void adreno_dispatcher_irq_fault(struct kgsl_device *device)
 {
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
-	dispatcher->fault = 1;
+	adreno_set_gpu_fault(ADRENO_DEVICE(device), ADRENO_HARD_FAULT);
 	adreno_dispatcher_schedule(device);
 }
 
@@ -863,6 +1427,8 @@
 	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
 
 	del_timer_sync(&dispatcher->timer);
+	del_timer_sync(&dispatcher->fault_timer);
+
 	dispatcher->state = ADRENO_DISPATCHER_PAUSE;
 }
 
@@ -878,6 +1444,7 @@
 
 	mutex_lock(&dispatcher->mutex);
 	del_timer_sync(&dispatcher->timer);
+	del_timer_sync(&dispatcher->fault_timer);
 
 	while (dispatcher->head != dispatcher->tail) {
 		kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
@@ -953,6 +1520,8 @@
 	_context_cmdbatch_burst);
 static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, _cmdbatch_timeout);
 static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
+static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
+	_fault_timer_interval);
 
 static struct attribute *dispatcher_attrs[] = {
 	&dispatcher_attr_inflight.attr,
@@ -960,6 +1529,7 @@
 	&dispatcher_attr_context_burst_count.attr,
 	&dispatcher_attr_cmdbatch_timeout.attr,
 	&dispatcher_attr_context_queue_wait.attr,
+	&dispatcher_attr_fault_detect_interval.attr,
 	NULL,
 };
 
@@ -1024,6 +1594,9 @@
 	setup_timer(&dispatcher->timer, adreno_dispatcher_timer,
 		(unsigned long) adreno_dev);
 
+	setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
+		(unsigned long) adreno_dev);
+
 	INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
 
 	plist_head_init(&dispatcher->pending);
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 1a4310e..907d41f 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -461,6 +461,21 @@
 }
 
 /**
+ * adreno_drawctxt_sched() - Schedule a previously blocked context
+ * @device: pointer to a KGSL device
+ * @drawctxt: drawctxt to rechedule
+ *
+ * This function is called by the core when it knows that a previously blocked
+ * context has been unblocked.  The default adreno response is to reschedule the
+ * context on the dispatcher
+ */
+void adreno_drawctxt_sched(struct kgsl_device *device,
+		struct kgsl_context *context)
+{
+	adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
+}
+
+/**
  * adreno_drawctxt_detach(): detach a context from the GPU
  * @context: Generic KGSL context container for the context
  *
@@ -596,10 +611,8 @@
 		return ret;
 	}
 
-	KGSL_CTXT_INFO(device, "from %d to %d flags %d\n",
-		adreno_dev->drawctxt_active ?
-		adreno_dev->drawctxt_active->base.id : 0,
-		drawctxt ? drawctxt->base.id : 0, flags);
+	trace_adreno_drawctxt_switch(adreno_dev->drawctxt_active,
+		drawctxt, flags);
 
 	/* Save the old context */
 	ret = adreno_dev->gpudev->ctxt_save(adreno_dev,
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index f8469e2..5c12676 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -54,6 +54,8 @@
 #define CTXT_FLAGS_SKIP_EOF             BIT(15)
 /* Context no fault tolerance */
 #define CTXT_FLAGS_NO_FAULT_TOLERANCE  BIT(16)
+/* Force the preamble for the next submission */
+#define CTXT_FLAGS_FORCE_PREAMBLE      BIT(17)
 
 /* Symbolic table for the adreno draw context type */
 #define ADRENO_DRAWCTXT_TYPES \
@@ -199,6 +201,9 @@
 
 void adreno_drawctxt_destroy(struct kgsl_context *context);
 
+void adreno_drawctxt_sched(struct kgsl_device *device,
+		struct kgsl_context *context);
+
 int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
 				struct adreno_context *drawctxt,
 				unsigned int flags);
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 294ae76..8fb2830 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -396,8 +396,8 @@
 
 int adreno_dump(struct kgsl_device *device, int manual)
 {
-	unsigned int cp_ib1_base, cp_ib1_bufsz;
-	unsigned int cp_ib2_base, cp_ib2_bufsz;
+	unsigned int cp_ib1_base;
+	unsigned int cp_ib2_base;
 	phys_addr_t pt_base, cur_pt_base;
 	unsigned int cp_rb_base, cp_rb_ctrl, rb_count;
 	unsigned int cp_rb_wptr, cp_rb_rptr;
@@ -410,7 +410,6 @@
 	unsigned int ts_processed = 0xdeaddead;
 	struct kgsl_context *context;
 	unsigned int context_id;
-	unsigned int rbbm_status;
 
 	static struct ib_list ib_list;
 
@@ -420,16 +419,10 @@
 
 	mb();
 
-	if (device->pm_dump_enable) {
-		msm_clk_dump_debug_info();
+	msm_clk_dump_debug_info();
 
-		if (adreno_dev->gpudev->postmortem_dump)
-			adreno_dev->gpudev->postmortem_dump(adreno_dev);
-	}
-
-	kgsl_regread(device,
-			adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS),
-			&rbbm_status);
+	if (adreno_dev->gpudev->postmortem_dump)
+		adreno_dev->gpudev->postmortem_dump(adreno_dev);
 
 	pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
 	cur_pt_base = pt_base;
@@ -451,29 +444,8 @@
 		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
 		&cp_ib1_base);
 	kgsl_regread(device,
-		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ),
-		&cp_ib1_bufsz);
-	kgsl_regread(device,
 		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
 		&cp_ib2_base);
-	kgsl_regread(device,
-		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
-		&cp_ib2_bufsz);
-
-	trace_adreno_gpu_fault(rbbm_status, cp_rb_rptr, cp_rb_wptr,
-			cp_ib1_base, cp_ib1_bufsz, cp_ib2_base, cp_ib2_bufsz);
-
-	/* If postmortem dump is not enabled, dump minimal set and return */
-	if (!device->pm_dump_enable) {
-
-		KGSL_LOG_DUMP(device,
-			"STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
-			" | RPTR: %04X | WPTR: %04X\n",
-			rbbm_status,  cp_ib1_base, cp_ib1_bufsz, cp_ib2_base,
-			cp_ib2_bufsz, cp_rb_rptr, cp_rb_wptr);
-
-		return 0;
-	}
 
 	kgsl_sharedmem_readl(&device->memstore,
 			(unsigned int *) &context_id,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index dc1530a..1ad90fd 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -606,8 +606,8 @@
 	if (adreno_is_a20x(adreno_dev))
 		total_sizedwords += 2; /* CACHE_FLUSH */
 
-	if (flags & KGSL_CMD_FLAGS_EOF)
-		total_sizedwords += 2;
+	if (flags & KGSL_CMD_FLAGS_WFI)
+		total_sizedwords += 2; /* WFI */
 
 	if (profile_ready)
 		total_sizedwords += 6;   /* space for pre_ib and post_ib */
@@ -737,6 +737,12 @@
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
 	}
 
+	if (flags & KGSL_CMD_FLAGS_WFI) {
+		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00000000);
+	}
+
 	adreno_ringbuffer_submit(rb);
 
 	return 0;
@@ -1024,6 +1030,7 @@
 	struct kgsl_context *context;
 	struct adreno_context *drawctxt;
 	unsigned int start_index = 0;
+	int flags = KGSL_CMD_FLAGS_NONE;
 	int ret;
 
 	context = cmdbatch->context;
@@ -1039,10 +1046,23 @@
 	commands are stored in the first node of the IB chain. We can skip that
 	if a context switch hasn't occured */
 
-	if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
-		adreno_dev->drawctxt_active == drawctxt)
+	if ((drawctxt->flags & CTXT_FLAGS_PREAMBLE) &&
+		!test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+		(adreno_dev->drawctxt_active == drawctxt))
 		start_index = 1;
 
+	/*
+	 * In skip mode don't issue the draw IBs but keep all the other
+	 * accoutrements of a submision (including the interrupt) to keep
+	 * the accounting sane. Set start_index and numibs to 0 to just
+	 * generate the start and end markers and skip everything else
+	 */
+
+	if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+		start_index = 0;
+		numibs = 0;
+	}
+
 	cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
 				GFP_KERNEL);
 	if (!link) {
@@ -1061,7 +1081,17 @@
 		*cmds++ = ibdesc[0].sizedwords;
 	}
 	for (i = start_index; i < numibs; i++) {
-		*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+
+		/*
+		 * Skip 0 sized IBs - these are presumed to have been removed
+		 * from consideration by the FT policy
+		 */
+
+		if (ibdesc[i].sizedwords == 0)
+			*cmds++ = cp_nop_packet(2);
+		else
+			*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+
 		*cmds++ = ibdesc[i].gpuaddr;
 		*cmds++ = ibdesc[i].sizedwords;
 	}
@@ -1085,9 +1115,12 @@
 	if (ret)
 		goto done;
 
+	if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+		flags = KGSL_CMD_FLAGS_WFI;
+
 	ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
 					drawctxt,
-					cmdbatch->flags,
+					flags,
 					&link[0], (cmds - link),
 					cmdbatch->timestamp);
 
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index 59aca2e..6079b61 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -29,15 +29,21 @@
 		__field(unsigned int, id)
 		__field(unsigned int, timestamp)
 		__field(unsigned int, queued)
+		__field(unsigned int, flags)
 	),
 	TP_fast_assign(
 		__entry->id = cmdbatch->context->id;
 		__entry->timestamp = cmdbatch->timestamp;
 		__entry->queued = queued;
+		__entry->flags = cmdbatch->flags;
 	),
 	TP_printk(
-		"ctx=%u ts=%u queued=%u",
-			__entry->id, __entry->timestamp, __entry->queued
+		"ctx=%u ts=%u queued=%u flags=%s",
+			__entry->id, __entry->timestamp, __entry->queued,
+			__entry->flags ? __print_flags(__entry->flags, "|",
+				{ KGSL_CONTEXT_SYNC, "SYNC" },
+				{ KGSL_CONTEXT_END_OF_FRAME, "EOF" })
+				: "none"
 	)
 );
 
@@ -61,14 +67,78 @@
 	)
 );
 
-DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_retired,
+DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_submitted,
 	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
 	TP_ARGS(cmdbatch, inflight)
 );
 
-DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_submitted,
+TRACE_EVENT(adreno_cmdbatch_retired,
 	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
-	TP_ARGS(cmdbatch, inflight)
+	TP_ARGS(cmdbatch, inflight),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, inflight)
+		__field(unsigned int, recovery)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->inflight = inflight;
+		__entry->recovery = cmdbatch->fault_recovery;
+	),
+	TP_printk(
+		"ctx=%u ts=%u inflight=%u recovery=%s",
+			__entry->id, __entry->timestamp,
+			__entry->inflight,
+			__entry->recovery ?
+				__print_flags(__entry->recovery, "|",
+				ADRENO_FT_TYPES) : "none"
+	)
+);
+
+TRACE_EVENT(adreno_cmdbatch_fault,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
+	TP_ARGS(cmdbatch, fault),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, fault)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->fault = fault;
+	),
+	TP_printk(
+		"ctx=%u ts=%u type=%s",
+			__entry->id, __entry->timestamp,
+			__print_symbolic(__entry->fault,
+				{ 0, "none" },
+				{ ADRENO_SOFT_FAULT, "soft" },
+				{ ADRENO_HARD_FAULT, "hard" },
+				{ ADRENO_TIMEOUT_FAULT, "timeout" })
+	)
+);
+
+TRACE_EVENT(adreno_cmdbatch_recovery,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
+	TP_ARGS(cmdbatch, action),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, action)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->action = action;
+	),
+	TP_printk(
+		"ctx=%u ts=%u action=%s",
+			__entry->id, __entry->timestamp,
+			__print_symbolic(__entry->action, ADRENO_FT_TYPES)
+	)
 );
 
 DECLARE_EVENT_CLASS(adreno_drawctxt_template,
@@ -139,12 +209,36 @@
 	)
 );
 
+TRACE_EVENT(adreno_drawctxt_switch,
+	TP_PROTO(struct adreno_context *oldctx,
+		struct adreno_context *newctx,
+		unsigned int flags),
+	TP_ARGS(oldctx, newctx, flags),
+	TP_STRUCT__entry(
+		__field(unsigned int, oldctx)
+		__field(unsigned int, newctx)
+		__field(unsigned int, flags)
+	),
+	TP_fast_assign(
+		__entry->oldctx = oldctx ? oldctx->base.id : 0;
+		__entry->newctx = newctx ? newctx->base.id : 0;
+	),
+	TP_printk(
+		"oldctx=%u newctx=%u flags=%X",
+			__entry->oldctx, __entry->newctx, flags
+	)
+);
+
 TRACE_EVENT(adreno_gpu_fault,
-	TP_PROTO(unsigned int status, unsigned int rptr, unsigned int wptr,
+	TP_PROTO(unsigned int ctx, unsigned int ts,
+		unsigned int status, unsigned int rptr, unsigned int wptr,
 		unsigned int ib1base, unsigned int ib1size,
 		unsigned int ib2base, unsigned int ib2size),
-	TP_ARGS(status, rptr, wptr, ib1base, ib1size, ib2base, ib2size),
+	TP_ARGS(ctx, ts, status, rptr, wptr, ib1base, ib1size, ib2base,
+		ib2size),
 	TP_STRUCT__entry(
+		__field(unsigned int, ctx)
+		__field(unsigned int, ts)
 		__field(unsigned int, status)
 		__field(unsigned int, rptr)
 		__field(unsigned int, wptr)
@@ -154,6 +248,8 @@
 		__field(unsigned int, ib2size)
 	),
 	TP_fast_assign(
+		__entry->ctx = ctx;
+		__entry->ts = ts;
 		__entry->status = status;
 		__entry->rptr = rptr;
 		__entry->wptr = wptr;
@@ -162,10 +258,10 @@
 		__entry->ib2base = ib2base;
 		__entry->ib2size = ib2size;
 	),
-	TP_printk("status=%X RB=%X/%X IB1=%X/%X IB2=%X/%X",
-		__entry->status, __entry->wptr, __entry->rptr,
-		__entry->ib1base, __entry->ib1size, __entry->ib2base,
-		__entry->ib2size)
+	TP_printk("ctx=%d ts=%d status=%X RB=%X/%X IB1=%X/%X IB2=%X/%X",
+		__entry->ctx, __entry->ts, __entry->status, __entry->wptr,
+		__entry->rptr, __entry->ib1base, __entry->ib1size,
+		__entry->ib2base, __entry->ib2size)
 );
 
 #endif /* _ADRENO_TRACE_H */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2624c16..35a03de 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -14,6 +14,7 @@
 #include <linux/fb.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/list.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/interrupt.h>
@@ -1398,30 +1399,372 @@
 	return result;
 }
 
+/*
+ * KGSL command batch management
+ * A command batch is a single submission from userland.  The cmdbatch
+ * encapsulates everything about the submission : command buffers, flags and
+ * sync points.
+ *
+ * Sync points are events that need to expire before the
+ * cmdbatch can be queued to the hardware. For each sync point a
+ * kgsl_cmdbatch_sync_event struct is created and added to a list in the
+ * cmdbatch. There can be multiple types of events both internal ones (GPU
+ * events) and external triggers. As the events expire the struct is deleted
+ * from the list. The GPU will submit the command batch as soon as the list
+ * goes empty indicating that all the sync points have been met.
+ */
+
+/**
+ * struct kgsl_cmdbatch_sync_event
+ * @type: Syncpoint type
+ * @node: Local list node for the cmdbatch sync point list
+ * @cmdbatch: Pointer to the cmdbatch that owns the sync event
+ * @context: Pointer to the KGSL context that owns the cmdbatch
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ * @lock: Spin lock to protect the sync event list
+ */
+struct kgsl_cmdbatch_sync_event {
+	int type;
+	struct list_head node;
+	struct kgsl_cmdbatch *cmdbatch;
+	struct kgsl_context *context;
+	unsigned int timestamp;
+	struct kgsl_sync_fence_waiter *handle;
+	struct kgsl_device *device;
+	spinlock_t lock;
+};
+
+/**
+ * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
+ * @kref: Pointer to the kref structure for this object
+ *
+ * Actually destroy a command batch object.  Called from kgsl_cmdbatch_put
+ */
+void kgsl_cmdbatch_destroy_object(struct kref *kref)
+{
+	struct kgsl_cmdbatch *cmdbatch = container_of(kref,
+		struct kgsl_cmdbatch, refcount);
+
+	kgsl_context_put(cmdbatch->context);
+	kfree(cmdbatch->ibdesc);
+
+	kfree(cmdbatch);
+}
+EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
+
+/*
+ * a generic function to retire a pending sync event and (possibly)
+ * kick the dispatcher
+ */
+static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
+	struct kgsl_cmdbatch_sync_event *event)
+{
+	int sched = 0;
+
+	spin_lock(&event->cmdbatch->lock);
+	list_del(&event->node);
+	sched = list_empty(&event->cmdbatch->synclist) ? 1 : 0;
+	spin_unlock(&event->cmdbatch->lock);
+
+	/*
+	 * if this is the last event in the list then tell
+	 * the GPU device that the cmdbatch can be submitted
+	 */
+
+	if (sched && device->ftbl->drawctxt_sched)
+		device->ftbl->drawctxt_sched(device, event->cmdbatch->context);
+}
+
+
+/*
+ * This function is called by the GPU event when the sync event timestamp
+ * expires
+ */
+static void kgsl_cmdbatch_sync_func(struct kgsl_device *device, void *priv,
+		u32 id, u32 timestamp, u32 type)
+{
+	struct kgsl_cmdbatch_sync_event *event = priv;
+
+	kgsl_cmdbatch_sync_expire(device, event);
+
+	kgsl_context_put(event->context);
+	kgsl_cmdbatch_put(event->cmdbatch);
+
+	kfree(event);
+}
+
+/**
+ * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
+ * @cmdbatch: Pointer to the command batch object to destroy
+ *
+ * Start the process of destroying a command batch.  Cancel any pending events
+ * and decrement the refcount.
+ */
+void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+{
+	struct kgsl_cmdbatch_sync_event *event, *tmp;
+
+	spin_lock(&cmdbatch->lock);
+
+	/* Delete any pending sync points for this command batch */
+	list_for_each_entry_safe(event, tmp, &cmdbatch->synclist, node) {
+
+		if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP) {
+			/* Cancel the event if it still exists */
+			kgsl_cancel_event(cmdbatch->device, event->context,
+				event->timestamp, kgsl_cmdbatch_sync_func,
+				event);
+		} else if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) {
+			if (kgsl_sync_fence_async_cancel(event->handle)) {
+				list_del(&event->node);
+				kfree(event);
+				kgsl_cmdbatch_put(cmdbatch);
+			}
+		}
+	}
+
+	spin_unlock(&cmdbatch->lock);
+	kgsl_cmdbatch_put(cmdbatch);
+}
+EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+
+/*
+ * A callback that gets registered with kgsl_sync_fence_async_wait and is fired
+ * when a fence is expired
+ */
+static void kgsl_cmdbatch_sync_fence_func(void *priv)
+{
+	struct kgsl_cmdbatch_sync_event *event = priv;
+
+	spin_lock(&event->lock);
+	kgsl_cmdbatch_sync_expire(event->device, event);
+	kgsl_cmdbatch_put(event->cmdbatch);
+	spin_unlock(&event->lock);
+	kfree(event);
+}
+
+/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+ * @device: KGSL device
+ * @cmdbatch: KGSL cmdbatch to add the sync point to
+ * @priv: Private sructure passed by the user
+ *
+ * Add a new fence sync syncpoint to the cmdbatch.
+ */
+static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
+		struct kgsl_cmdbatch *cmdbatch, void *priv)
+{
+	struct kgsl_cmd_syncpoint_fence *sync = priv;
+	struct kgsl_cmdbatch_sync_event *event;
+
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+
+	if (event == NULL)
+		return -ENOMEM;
+
+	kref_get(&cmdbatch->refcount);
+
+	event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
+	event->cmdbatch = cmdbatch;
+	event->device = device;
+	spin_lock_init(&event->lock);
+
+	/*
+	 * Add it to the list first to account for the possiblity that the
+	 * callback will happen immediately after the call to
+	 * kgsl_sync_fence_async_wait
+	 */
+
+	spin_lock(&cmdbatch->lock);
+	list_add(&event->node, &cmdbatch->synclist);
+	spin_unlock(&cmdbatch->lock);
+
+	/*
+	 * There is a distinct race condition that can occur if the fence
+	 * callback is fired before the function has a chance to return.  The
+	 * event struct would be freed before we could write event->handle and
+	 * hilarity ensued.  Protect against this by protecting the call to
+	 * kgsl_sync_fence_async_wait and the kfree in the callback with a lock.
+	 */
+
+	spin_lock(&event->lock);
+
+	event->handle = kgsl_sync_fence_async_wait(sync->fd,
+		kgsl_cmdbatch_sync_fence_func, event);
+
+
+	if (IS_ERR_OR_NULL(event->handle)) {
+		int ret = PTR_ERR(event->handle);
+
+		spin_lock(&cmdbatch->lock);
+		list_del(&event->node);
+		spin_unlock(&cmdbatch->lock);
+
+		kgsl_cmdbatch_put(cmdbatch);
+		spin_unlock(&event->lock);
+		kfree(event);
+
+		return ret;
+	}
+
+	spin_unlock(&event->lock);
+	return 0;
+}
+
+/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+ * @device: KGSL device
+ * @cmdbatch: KGSL cmdbatch to add the sync point to
+ * @priv: Private sructure passed by the user
+ *
+ * Add a new sync point timestamp event to the cmdbatch.
+ */
+static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
+		struct kgsl_cmdbatch *cmdbatch, void *priv)
+{
+	struct kgsl_cmd_syncpoint_timestamp *sync = priv;
+	struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+		sync->context_id);
+	struct kgsl_cmdbatch_sync_event *event;
+	int ret = -EINVAL;
+
+	if (context == NULL)
+		return -EINVAL;
+
+	/* Sanity check - you can't create a sync point on your own context */
+	if (context == cmdbatch->context) {
+		KGSL_DRV_ERR(device,
+			"Cannot create a sync point on your own context %d\n",
+			context->id);
+		goto done;
+	}
+
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+	if (event == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	kref_get(&cmdbatch->refcount);
+
+	event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
+	event->cmdbatch = cmdbatch;
+	event->context = context;
+	event->timestamp = sync->timestamp;
+
+	spin_lock(&cmdbatch->lock);
+	list_add(&event->node, &cmdbatch->synclist);
+	spin_unlock(&cmdbatch->lock);
+
+	mutex_lock(&device->mutex);
+	kgsl_active_count_get(device);
+	ret = kgsl_add_event(device, context->id, sync->timestamp,
+		kgsl_cmdbatch_sync_func, event, NULL);
+	kgsl_active_count_put(device);
+	mutex_unlock(&device->mutex);
+
+	if (ret) {
+		spin_lock(&cmdbatch->lock);
+		list_del(&event->node);
+		spin_unlock(&cmdbatch->lock);
+
+		kgsl_cmdbatch_put(cmdbatch);
+		kfree(event);
+	}
+
+done:
+	if (ret)
+		kgsl_context_put(context);
+
+	return ret;
+}
+
+/**
+ * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * @device: Pointer to the KGSL device struct for the GPU
+ * @cmdbatch: Pointer to the cmdbatch
+ * @sync: Pointer to the user-specified struct defining the syncpoint
+ *
+ * Create a new sync point in the cmdbatch based on the user specified
+ * parameters
+ */
+static int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
+	struct kgsl_cmdbatch *cmdbatch,
+	struct kgsl_cmd_syncpoint *sync)
+{
+	void *priv;
+	int ret, psize;
+	int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+			void *priv);
+
+	switch (sync->type) {
+	case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
+		psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
+		func = kgsl_cmdbatch_add_sync_timestamp;
+		break;
+	case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+		psize = sizeof(struct kgsl_cmd_syncpoint_fence);
+		func = kgsl_cmdbatch_add_sync_fence;
+		break;
+	default:
+		KGSL_DRV_ERR(device, "Invalid sync type 0x%x\n", sync->type);
+		return -EINVAL;
+	}
+
+	if (sync->size != psize) {
+		KGSL_DRV_ERR(device, "Invalid sync size %d\n", sync->size);
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sync->size, GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(priv, sync->priv, sync->size)) {
+		kfree(priv);
+		return -EFAULT;
+	}
+
+	ret = func(device, cmdbatch, priv);
+	kfree(priv);
+
+	return ret;
+}
+
 /**
  * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * @device: Pointer to a KGSL device struct
  * @context: Pointer to a KGSL context struct
  * @numibs: Number of indirect buffers to make room for in the cmdbatch
  *
  * Allocate an new cmdbatch structure and add enough room to store the list of
  * indirect buffers
  */
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_context *context,
-	int numibs)
+static struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
+		struct kgsl_context *context, unsigned int flags,
+		unsigned int numibs)
 {
 	struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
 	if (cmdbatch == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs,
-		GFP_KERNEL);
-	if (cmdbatch->ibdesc == NULL) {
-		kfree(cmdbatch);
-		return ERR_PTR(-ENOMEM);
+	if (!(flags & KGSL_CONTEXT_SYNC)) {
+		cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs,
+			GFP_KERNEL);
+		if (cmdbatch->ibdesc == NULL) {
+			kfree(cmdbatch);
+			return ERR_PTR(-ENOMEM);
+		}
 	}
 
-	cmdbatch->ibcount = numibs;
+	kref_init(&cmdbatch->refcount);
+	INIT_LIST_HEAD(&cmdbatch->synclist);
+	spin_lock_init(&cmdbatch->lock);
+
+	cmdbatch->device = device;
+	cmdbatch->ibcount = (flags & KGSL_CONTEXT_SYNC) ? 0 : numibs;
 	cmdbatch->context = context;
+	cmdbatch->flags = flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
 
 	/*
 	 * Increase the reference count on the context so it doesn't disappear
@@ -1450,15 +1793,22 @@
 	for (i = 0; i < cmdbatch->ibcount; i++) {
 		if (cmdbatch->ibdesc[i].sizedwords == 0) {
 			KGSL_DRV_ERR(dev_priv->device,
-				"IB verification failed: Invalid size\n");
+				"invalid size ctx %d ib(%d) %X/%X\n",
+				cmdbatch->context->id, i,
+				cmdbatch->ibdesc[i].gpuaddr,
+				cmdbatch->ibdesc[i].sizedwords);
+
 			return false;
 		}
 
 		if (!kgsl_mmu_gpuaddr_in_range(private->pagetable,
 			cmdbatch->ibdesc[i].gpuaddr)) {
 			KGSL_DRV_ERR(dev_priv->device,
-				"IB verification failed: invalid address 0x%X\n",
-				cmdbatch->ibdesc[i].gpuaddr);
+				"Invalid address ctx %d ib(%d) %X/%X\n",
+				cmdbatch->context->id, i,
+				cmdbatch->ibdesc[i].gpuaddr,
+				cmdbatch->ibdesc[i].sizedwords);
+
 			return false;
 		}
 	}
@@ -1468,16 +1818,19 @@
 
 /**
  * _kgsl_cmdbatch_create_legacy() - Create a cmdbatch from a legacy ioctl struct
+ * @device: Pointer to the KGSL device struct for the GPU
  * @context: Pointer to the KGSL context that issued the command batch
  * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
  *
  * Create a command batch from the legacy issueibcmds format.
  */
 static struct kgsl_cmdbatch *_kgsl_cmdbatch_create_legacy(
+		struct kgsl_device *device,
 		struct kgsl_context *context,
 		struct kgsl_ringbuffer_issueibcmds *param)
 {
-	struct kgsl_cmdbatch *cmdbatch = kgsl_cmdbatch_create(context, 1);
+	struct kgsl_cmdbatch *cmdbatch =
+		kgsl_cmdbatch_create(device, context, param->flags, 1);
 
 	if (IS_ERR(cmdbatch))
 		return cmdbatch;
@@ -1492,31 +1845,64 @@
 
 /**
  * _kgsl_cmdbatch_create() - Create a cmdbatch from a ioctl struct
- * @device: Pointer to the KGSL device for the GPU
+ * @device: Pointer to the KGSL device struct for the GPU
  * @context: Pointer to the KGSL context that issued the command batch
- * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
+ * @flags: Flags passed in from the user command
+ * @cmdlist: Pointer to the list of commands from the user
+ * @numcmds: Number of commands in the list
+ * @synclist: Pointer to the list of syncpoints from the user
+ * @numsyncs: Number of syncpoints in the list
  *
  * Create a command batch from the standard issueibcmds format sent by the user.
  */
-struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
+static struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
 		struct kgsl_context *context,
-		struct kgsl_ringbuffer_issueibcmds *param)
+		unsigned int flags,
+		unsigned int cmdlist, unsigned int numcmds,
+		unsigned int synclist, unsigned int numsyncs)
 {
 	struct kgsl_cmdbatch *cmdbatch =
-		kgsl_cmdbatch_create(context, param->numibs);
+		kgsl_cmdbatch_create(device, context, flags, numcmds);
+	int ret = 0;
 
 	if (IS_ERR(cmdbatch))
 		return cmdbatch;
 
-	if (copy_from_user(cmdbatch->ibdesc, (void *)param->ibdesc_addr,
-		sizeof(struct kgsl_ibdesc) * param->numibs)) {
-		KGSL_DRV_ERR(device,
-			"Unable to copy the IB userspace commands\n");
-		kgsl_cmdbatch_destroy(cmdbatch);
-		return ERR_PTR(-EFAULT);
+	if (!(flags & KGSL_CONTEXT_SYNC)) {
+		if (copy_from_user(cmdbatch->ibdesc, (void __user *) cmdlist,
+			sizeof(struct kgsl_ibdesc) * numcmds)) {
+			ret = -EFAULT;
+			goto done;
+		}
 	}
 
-	cmdbatch->flags = param->flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
+	if (synclist && numsyncs) {
+		struct kgsl_cmd_syncpoint sync;
+		void  __user *uptr = (void __user *) synclist;
+		int i;
+
+		for (i = 0; i < numsyncs; i++) {
+			memset(&sync, 0, sizeof(sync));
+
+			if (copy_from_user(&sync, uptr, sizeof(sync))) {
+				ret = -EFAULT;
+				break;
+			}
+
+			ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+
+			if (ret)
+				break;
+
+			uptr += sizeof(sync);
+		}
+	}
+
+done:
+	if (ret) {
+		kgsl_cmdbatch_destroy(cmdbatch);
+		return ERR_PTR(ret);
+	}
 
 	return cmdbatch;
 }
@@ -1530,12 +1916,14 @@
 	struct kgsl_cmdbatch *cmdbatch;
 	long result = -EINVAL;
 
+	/* The legacy functions don't support synchronization commands */
+	if (param->flags & KGSL_CONTEXT_SYNC)
+		return -EINVAL;
+
+	/* Get the context */
 	context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
-	if (context == NULL) {
-		KGSL_DRV_ERR(device,
-			"Could not find context %d\n", param->drawctxt_id);
+	if (context == NULL)
 		goto done;
-	}
 
 	if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
 		/*
@@ -1543,15 +1931,13 @@
 		 * submission
 		 */
 
-		if (param->numibs == 0 || param->numibs > 100000) {
-			KGSL_DRV_ERR(device,
-				"Invalid number of IBs %d\n", param->numibs);
+		if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS)
 			goto done;
-		}
 
-		cmdbatch = _kgsl_cmdbatch_create(device, context, param);
+		cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
+				param->ibdesc_addr, param->numibs, 0, 0);
 	} else
-		cmdbatch = _kgsl_cmdbatch_create_legacy(context, param);
+		cmdbatch = _kgsl_cmdbatch_create_legacy(device, context, param);
 
 	if (IS_ERR(cmdbatch)) {
 		result = PTR_ERR(cmdbatch);
@@ -1559,11 +1945,57 @@
 	}
 
 	/* Run basic sanity checking on the command */
-	if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch)) {
-		KGSL_DRV_ERR(device, "Unable to verify the IBs\n");
+	if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch))
 		goto free_cmdbatch;
+
+	result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+		cmdbatch, &param->timestamp);
+
+free_cmdbatch:
+	if (result)
+		kgsl_cmdbatch_destroy(cmdbatch);
+
+done:
+	kgsl_context_put(context);
+	return result;
+}
+
+static long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
+				      unsigned int cmd, void *data)
+{
+	struct kgsl_submit_commands *param = data;
+	struct kgsl_device *device = dev_priv->device;
+	struct kgsl_context *context;
+	struct kgsl_cmdbatch *cmdbatch;
+
+	long result = -EINVAL;
+
+	/* The number of IBs are completely ignored for sync commands */
+	if (!(param->flags & KGSL_CONTEXT_SYNC)) {
+		if (param->numcmds == 0 || param->numcmds > KGSL_MAX_NUMIBS)
+			return -EINVAL;
+	} else if (param->numcmds != 0) {
+		KGSL_DEV_ERR_ONCE(device,
+			"Commands specified with the SYNC flag.  They will be ignored\n");
 	}
 
+	context = kgsl_context_get_owner(dev_priv, param->context_id);
+	if (context == NULL)
+		return -EINVAL;
+
+	cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
+		(unsigned int) param->cmdlist, param->numcmds,
+		(unsigned int) param->synclist, param->numsyncs);
+
+	if (IS_ERR(cmdbatch)) {
+		result = PTR_ERR(cmdbatch);
+		goto done;
+	}
+
+	/* Run basic sanity checking on the command */
+	if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch))
+		goto free_cmdbatch;
+
 	result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
 		cmdbatch, &param->timestamp);
 
@@ -2823,6 +3255,8 @@
 			KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
 			kgsl_ioctl_rb_issueibcmds, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS,
+			kgsl_ioctl_submit_commands, 0),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
 			kgsl_ioctl_cmdstream_readtimestamp,
 			KGSL_IOCTL_LOCK),
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index de647d5..2e9d52e 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -78,6 +78,8 @@
 
 #define KGSL_MEMFREE_HIST_SIZE	((int)(PAGE_SIZE * 2))
 
+#define KGSL_MAX_NUMIBS 100000
+
 struct kgsl_memfree_hist_elem {
 	unsigned int pid;
 	unsigned int gpuaddr;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index f5b27d0..cd9c4f7 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -134,6 +134,8 @@
 	int (*postmortem_dump) (struct kgsl_device *device, int manual);
 	int (*next_event)(struct kgsl_device *device,
 		struct kgsl_event *event);
+	void (*drawctxt_sched)(struct kgsl_device *device,
+		struct kgsl_context *context);
 };
 
 /* MH register values */
@@ -159,23 +161,53 @@
 
 /**
  * struct kgsl_cmdbatch - KGSl command descriptor
+ * @device: KGSL GPU device that the command was created for
  * @context: KGSL context that created the command
- * @timestamp: Timestamp assigned to the command (currently unused)
+ * @timestamp: Timestamp assigned to the command
  * @flags: flags
+ * @priv: Internal flags
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
  * @ibcount: Number of IBs in the command list
  * @ibdesc: Pointer to the list of IBs
  * @expires: Point in time when the cmdbatch is considered to be hung
  * @invalid:  non-zero if the dispatcher determines the command and the owning
  * context should be invalidated
+ * @refcount: kref structure to maintain the reference count
+ * @synclist: List of context/timestamp tuples to wait for before issuing
+ *
+ * This struture defines an atomic batch of command buffers issued from
+ * userspace.
  */
 struct kgsl_cmdbatch {
+	struct kgsl_device *device;
 	struct kgsl_context *context;
+	spinlock_t lock;
 	uint32_t timestamp;
 	uint32_t flags;
+	unsigned long priv;
+	unsigned long fault_policy;
+	unsigned long fault_recovery;
 	uint32_t ibcount;
 	struct kgsl_ibdesc *ibdesc;
 	unsigned long expires;
 	int invalid;
+	struct kref refcount;
+	struct list_head synclist;
+};
+
+/**
+ * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
+ * @CMDBATCH_FLAG_SKIP - skip the entire command batch
+ * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
+ * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
+ */
+
+enum kgsl_cmdbatch_priv {
+	CMDBATCH_FLAG_SKIP = 0,
+	CMDBATCH_FLAG_FORCE_PREAMBLE,
+	CMDBATCH_FLAG_WFI,
 };
 
 struct kgsl_device {
@@ -258,6 +290,7 @@
 	struct work_struct ts_expired_ws;
 	struct list_head events;
 	struct list_head events_pending_list;
+	unsigned int events_last_timestamp;
 	s64 on_time;
 
 	/* Postmortem Control switches */
@@ -368,6 +401,9 @@
 int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
 	kgsl_event_func func, void *priv, void *owner);
 
+void kgsl_cancel_event(struct kgsl_device *device, struct kgsl_context *context,
+		unsigned int timestamp, kgsl_event_func func, void *priv);
+
 static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
 	unsigned int type, size_t size)
 {
@@ -611,20 +647,39 @@
 	kgsl_signal_event(device, context, timestamp, KGSL_EVENT_CANCELLED);
 }
 
-/**
- * kgsl_cmdbatch_destroy() - Destroy a command batch structure
- * @cmdbatch: Pointer to the command batch to destroy
- *
- * Destroy and free a command batch
- */
-static inline void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
-{
-	if (cmdbatch) {
-		kgsl_context_put(cmdbatch->context);
-		kfree(cmdbatch->ibdesc);
-	}
+void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
 
-	kfree(cmdbatch);
+void kgsl_cmdbatch_destroy_object(struct kref *kref);
+
+/**
+ * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
+ * @cmdbatch: Pointer to the command batch object
+ */
+static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+{
+	if (cmdbatch)
+		kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+}
+
+/**
+ * kgsl_cmdbatch_sync_pending() - return true if the cmdbatch is waiting
+ * @cmdbatch: Pointer to the command batch object to check
+ *
+ * Return non-zero if the specified command batch is still waiting for sync
+ * point dependencies to be satisfied
+ */
+static inline int kgsl_cmdbatch_sync_pending(struct kgsl_cmdbatch *cmdbatch)
+{
+	int ret;
+
+	if (cmdbatch == NULL)
+		return 0;
+
+	spin_lock(&cmdbatch->lock);
+	ret = list_empty(&cmdbatch->synclist) ? 0 : 1;
+	spin_unlock(&cmdbatch->lock);
+
+	return ret;
 }
 
 #endif  /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index e4f502a..c7ac0ad 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -48,7 +48,8 @@
 {
 	int id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
 
-	trace_kgsl_fire_event(id, timestamp, type, jiffies - event->created);
+	trace_kgsl_fire_event(id, timestamp, type, jiffies - event->created,
+		event->func);
 
 	if (event->func)
 		event->func(device, event->priv, id, timestamp, type);
@@ -235,7 +236,7 @@
 	 */
 
 	if (timestamp_cmp(cur_ts, ts) >= 0) {
-		trace_kgsl_fire_event(id, cur_ts, ts, 0);
+		trace_kgsl_fire_event(id, cur_ts, ts, 0, func);
 
 		func(device, priv, id, ts, KGSL_EVENT_TIMESTAMP_RETIRED);
 		kgsl_context_put(context);
@@ -266,7 +267,7 @@
 	event->owner = owner;
 	event->created = jiffies;
 
-	trace_kgsl_register_event(id, ts);
+	trace_kgsl_register_event(id, ts, func);
 
 	/* Add the event to either the owning context or the global list */
 
@@ -333,7 +334,11 @@
 		void *priv)
 {
 	struct kgsl_event *event;
-	struct list_head *head = _get_list_head(device, context);
+	struct list_head *head;
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	head = _get_list_head(device, context);
 
 	event = _find_event(device, head, timestamp, func, priv);
 
@@ -400,10 +405,19 @@
 	struct kgsl_context *context, *tmp;
 	uint32_t timestamp;
 
+	/*
+	 * Bail unless the global timestamp has advanced.  We can safely do this
+	 * outside of the mutex for speed
+	 */
+
+	timestamp = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+	if (timestamp == device->events_last_timestamp)
+		return;
+
 	mutex_lock(&device->mutex);
 
-	/* Process expired global events */
-	timestamp = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+	device->events_last_timestamp = timestamp;
+
 	_retire_events(device, &device->events, timestamp);
 	_mark_next_event(device, &device->events);
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 103736d..e296784 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -423,8 +423,12 @@
 	 * the GPU and trigger a snapshot. To stall the transaction return
 	 * EBUSY error.
 	 */
-	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) {
+		/* turn off GPU IRQ so we don't get faults from it too */
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+		adreno_dispatcher_irq_fault(device);
 		ret = -EBUSY;
+	}
 done:
 	return ret;
 }
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index b7d7235..dc3ad21 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/err.h>
 #include <linux/file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -281,3 +282,65 @@
 {
 	sync_timeline_destroy(context->timeline);
 }
+
+static void kgsl_sync_callback(struct sync_fence *fence,
+	struct sync_fence_waiter *waiter)
+{
+	struct kgsl_sync_fence_waiter *kwaiter =
+		(struct kgsl_sync_fence_waiter *) waiter;
+	kwaiter->func(kwaiter->priv);
+	sync_fence_put(kwaiter->fence);
+	kfree(kwaiter);
+}
+
+struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+	void (*func)(void *priv), void *priv)
+{
+	struct kgsl_sync_fence_waiter *kwaiter;
+	struct sync_fence *fence;
+	int status;
+
+	fence = sync_fence_fdget(fd);
+	if (fence == NULL)
+		return ERR_PTR(-EINVAL);
+
+	/* create the waiter */
+	kwaiter = kzalloc(sizeof(*kwaiter), GFP_ATOMIC);
+	if (kwaiter == NULL) {
+		sync_fence_put(fence);
+		return ERR_PTR(-ENOMEM);
+	}
+	kwaiter->fence = fence;
+	kwaiter->priv = priv;
+	kwaiter->func = func;
+	sync_fence_waiter_init((struct sync_fence_waiter *) kwaiter,
+		kgsl_sync_callback);
+
+	/* if status then error or signaled */
+	status = sync_fence_wait_async(fence,
+		(struct sync_fence_waiter *) kwaiter);
+	if (status) {
+		kfree(kwaiter);
+		sync_fence_put(fence);
+		if (status < 0)
+			kwaiter = ERR_PTR(status);
+		else
+			kwaiter = NULL;
+	}
+
+	return kwaiter;
+}
+
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *kwaiter)
+{
+	if (kwaiter == NULL)
+		return 0;
+
+	if (sync_fence_cancel_async(kwaiter->fence,
+		(struct sync_fence_waiter *) kwaiter) == 0) {
+		sync_fence_put(kwaiter->fence);
+		kfree(kwaiter);
+		return 1;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 63adf06..275eaf0 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,13 @@
 	unsigned int timestamp;
 };
 
+struct kgsl_sync_fence_waiter {
+	struct sync_fence_waiter waiter;
+	struct sync_fence *fence;
+	void (*func)(void *priv);
+	void *priv;
+};
+
 #if defined(CONFIG_SYNC)
 struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline,
 	unsigned int timestamp);
@@ -39,6 +46,9 @@
 void kgsl_sync_timeline_signal(struct sync_timeline *timeline,
 	unsigned int timestamp);
 void kgsl_sync_timeline_destroy(struct kgsl_context *context);
+struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+	void (*func)(void *priv), void *priv);
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter);
 #else
 static inline struct sync_pt
 *kgsl_sync_pt_create(struct sync_timeline *timeline, unsigned int timestamp)
@@ -72,6 +82,20 @@
 static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context)
 {
 }
+
+static inline struct
+kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+	void (*func)(void *priv), void *priv)
+{
+	return NULL;
+}
+
+static inline int
+kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter)
+{
+	return 1;
+}
+
 #endif
 
 #endif /* __KGSL_SYNC_H */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 179a72b..5f39b8b 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -730,42 +730,46 @@
 );
 
 TRACE_EVENT(kgsl_register_event,
-		TP_PROTO(unsigned int id, unsigned int timestamp),
-		TP_ARGS(id, timestamp),
+		TP_PROTO(unsigned int id, unsigned int timestamp, void *func),
+		TP_ARGS(id, timestamp, func),
 		TP_STRUCT__entry(
 			__field(unsigned int, id)
 			__field(unsigned int, timestamp)
+			__field(void *, func)
 		),
 		TP_fast_assign(
 			__entry->id = id;
 			__entry->timestamp = timestamp;
+			__entry->func = func;
 		),
 		TP_printk(
-			"ctx=%u ts=%u",
-			__entry->id, __entry->timestamp)
+			"ctx=%u ts=%u cb=%pF",
+			__entry->id, __entry->timestamp, __entry->func)
 );
 
 TRACE_EVENT(kgsl_fire_event,
 		TP_PROTO(unsigned int id, unsigned int ts,
-			unsigned int type, unsigned int age),
-		TP_ARGS(id, ts, type, age),
+			unsigned int type, unsigned int age, void *func),
+		TP_ARGS(id, ts, type, age, func),
 		TP_STRUCT__entry(
 			__field(unsigned int, id)
 			__field(unsigned int, ts)
 			__field(unsigned int, type)
 			__field(unsigned int, age)
+			__field(void *, func)
 		),
 		TP_fast_assign(
 			__entry->id = id;
 			__entry->ts = ts;
 			__entry->type = type;
 			__entry->age = age;
+			__entry->func = func;
 		),
 		TP_printk(
-			"ctx=%u ts=%u type=%s age=%u",
+			"ctx=%u ts=%u type=%s age=%u cb=%pF",
 			__entry->id, __entry->ts,
 			__print_symbolic(__entry->type, KGSL_EVENT_TYPES),
-			__entry->age)
+			__entry->age, __entry->func)
 );
 
 TRACE_EVENT(kgsl_active_count,
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index 606d8dd..8e6afc1 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -134,27 +134,28 @@
 	u8	revision;
 };
 
-struct qpnp_iadc_drv {
+struct qpnp_iadc_chip {
 	struct qpnp_adc_drv			*adc;
 	int32_t					rsense;
 	bool					external_rsense;
 	struct device				*iadc_hwmon;
-	bool					iadc_initialized;
+	struct list_head			list;
 	int64_t					die_temp;
 	struct delayed_work			iadc_work;
 	struct mutex				iadc_vadc_lock;
 	bool					iadc_mode_sel;
 	struct qpnp_iadc_comp			iadc_comp;
 	struct qpnp_vadc_chip			*vadc_dev;
+	struct work_struct			trigger_completion_work;
 	struct sensor_device_attribute		sens_attr[0];
 	bool					skip_auto_calibrations;
 };
 
-static struct qpnp_iadc_drv	*qpnp_iadc;
+LIST_HEAD(qpnp_iadc_device_list);
 
-static int32_t qpnp_iadc_read_reg(uint32_t reg, u8 *data)
+static int32_t qpnp_iadc_read_reg(struct qpnp_iadc_chip *iadc,
+						uint32_t reg, u8 *data)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc;
 
 	rc = spmi_ext_register_readl(iadc->adc->spmi->ctrl, iadc->adc->slave,
@@ -167,9 +168,9 @@
 	return 0;
 }
 
-static int32_t qpnp_iadc_write_reg(uint32_t reg, u8 data)
+static int32_t qpnp_iadc_write_reg(struct qpnp_iadc_chip *iadc,
+						uint32_t reg, u8 data)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc;
 	u8 *buf;
 
@@ -184,41 +185,54 @@
 	return 0;
 }
 
-static void trigger_iadc_completion(struct work_struct *work)
+static int qpnp_iadc_is_valid(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
+	struct qpnp_iadc_chip *iadc_chip = NULL;
 
-	if (!iadc || !iadc->iadc_initialized)
+	list_for_each_entry(iadc_chip, &qpnp_iadc_device_list, list)
+		if (iadc == iadc_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static void qpnp_iadc_trigger_completion(struct work_struct *work)
+{
+	struct qpnp_iadc_chip *iadc = container_of(work,
+			struct qpnp_iadc_chip, trigger_completion_work);
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return;
 
 	complete(&iadc->adc->adc_rslt_completion);
 
 	return;
 }
-DECLARE_WORK(trigger_iadc_completion_work, trigger_iadc_completion);
 
 static irqreturn_t qpnp_iadc_isr(int irq, void *dev_id)
 {
-	schedule_work(&trigger_iadc_completion_work);
+	struct qpnp_iadc_chip *iadc = dev_id;
+
+	schedule_work(&iadc->trigger_completion_work);
 
 	return IRQ_HANDLED;
 }
 
-static int32_t qpnp_iadc_enable(bool state)
+static int32_t qpnp_iadc_enable(struct qpnp_iadc_chip *dev, bool state)
 {
 	int rc = 0;
 	u8 data = 0;
 
 	data = QPNP_IADC_ADC_EN;
 	if (state) {
-		rc = qpnp_iadc_write_reg(QPNP_IADC_EN_CTL1,
+		rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
 					data);
 		if (rc < 0) {
 			pr_err("IADC enable failed\n");
 			return rc;
 		}
 	} else {
-		rc = qpnp_iadc_write_reg(QPNP_IADC_EN_CTL1,
+		rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
 					(~data & QPNP_IADC_ADC_EN));
 		if (rc < 0) {
 			pr_err("IADC disable failed\n");
@@ -229,36 +243,36 @@
 	return 0;
 }
 
-static int32_t qpnp_iadc_status_debug(void)
+static int32_t qpnp_iadc_status_debug(struct qpnp_iadc_chip *dev)
 {
 	int rc = 0;
 	u8 mode = 0, status1 = 0, chan = 0, dig = 0, en = 0;
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_MODE_CTL, &mode);
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_MODE_CTL, &mode);
 	if (rc < 0) {
 		pr_err("mode ctl register read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_ADC_DIG_PARAM, &dig);
+	rc = qpnp_iadc_read_reg(dev, QPNP_ADC_DIG_PARAM, &dig);
 	if (rc < 0) {
 		pr_err("digital param read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_ADC_CH_SEL_CTL, &chan);
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_ADC_CH_SEL_CTL, &chan);
 	if (rc < 0) {
 		pr_err("channel read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_STATUS1, &status1);
+	rc = qpnp_iadc_read_reg(dev, QPNP_STATUS1, &status1);
 	if (rc < 0) {
 		pr_err("status1 read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_EN_CTL1, &en);
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_EN_CTL1, &en);
 	if (rc < 0) {
 		pr_err("en read failed with %d\n", rc);
 		return rc;
@@ -267,7 +281,7 @@
 	pr_debug("EOC not set with status:%x, dig:%x, ch:%x, mode:%x, en:%x\n",
 			status1, dig, chan, mode, en);
 
-	rc = qpnp_iadc_enable(false);
+	rc = qpnp_iadc_enable(dev, false);
 	if (rc < 0) {
 		pr_err("IADC disable failed with %d\n", rc);
 		return rc;
@@ -276,19 +290,20 @@
 	return 0;
 }
 
-static int32_t qpnp_iadc_read_conversion_result(uint16_t *data)
+static int32_t qpnp_iadc_read_conversion_result(struct qpnp_iadc_chip *iadc,
+								int16_t *data)
 {
 	uint8_t rslt_lsb, rslt_msb;
 	uint16_t rslt;
 	int32_t rc;
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_DATA0, &rslt_lsb);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA0, &rslt_lsb);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_DATA1, &rslt_msb);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA1, &rslt_msb);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		return rc;
@@ -297,7 +312,7 @@
 	rslt = (rslt_msb << 8) | rslt_lsb;
 	*data = rslt;
 
-	rc = qpnp_iadc_enable(false);
+	rc = qpnp_iadc_enable(iadc, false);
 	if (rc)
 		return rc;
 
@@ -364,32 +379,30 @@
 	return 0;
 }
 
-int32_t qpnp_iadc_comp_result(int64_t *result)
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc, int64_t *result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
-
 	return qpnp_iadc_comp(result, iadc->iadc_comp, iadc->die_temp);
 }
 EXPORT_SYMBOL(qpnp_iadc_comp_result);
 
-static int32_t qpnp_iadc_comp_info(void)
+static int32_t qpnp_iadc_comp_info(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc = 0;
 
-	rc = qpnp_iadc_read_reg(QPNP_INT_TEST_VAL, &iadc->iadc_comp.id);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_INT_TEST_VAL, &iadc->iadc_comp.id);
 	if (rc < 0) {
 		pr_err("qpnp adc comp id failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_REVISION2, &iadc->iadc_comp.revision);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2,
+						&iadc->iadc_comp.revision);
 	if (rc < 0) {
 		pr_err("qpnp adc revision read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_ATE_GAIN_CALIB_OFFSET,
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_ATE_GAIN_CALIB_OFFSET,
 						&iadc->iadc_comp.sys_gain);
 	if (rc < 0) {
 		pr_err("full scale read failed with %d\n", rc);
@@ -407,10 +420,10 @@
 	return rc;
 }
 
-static int32_t qpnp_iadc_configure(enum qpnp_iadc_channels channel,
+static int32_t qpnp_iadc_configure(struct qpnp_iadc_chip *iadc,
+					enum qpnp_iadc_channels channel,
 					uint16_t *raw_code, uint32_t mode_sel)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	u8 qpnp_iadc_mode_reg = 0, qpnp_iadc_ch_sel_reg = 0;
 	u8 qpnp_iadc_conv_req = 0, qpnp_iadc_dig_param_reg = 0;
 	int32_t rc = 0;
@@ -426,34 +439,34 @@
 
 	qpnp_iadc_conv_req = QPNP_IADC_CONV_REQ;
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_ADC_CH_SEL_CTL,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_ADC_CH_SEL_CTL,
 						qpnp_iadc_ch_sel_reg);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_ADC_DIG_PARAM,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_ADC_DIG_PARAM,
 						qpnp_iadc_dig_param_reg);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_HW_SETTLE_DELAY,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_HW_SETTLE_DELAY,
 				iadc->adc->amux_prop->hw_settle_time);
 	if (rc < 0) {
 		pr_err("qpnp adc configure error for hw settling time setup\n");
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_FAST_AVG_CTL,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_FAST_AVG_CTL,
 					iadc->adc->amux_prop->fast_avg_setup);
 	if (rc < 0) {
 		pr_err("qpnp adc fast averaging configure error\n");
@@ -462,11 +475,11 @@
 
 	INIT_COMPLETION(iadc->adc->adc_rslt_completion);
 
-	rc = qpnp_iadc_enable(true);
+	rc = qpnp_iadc_enable(iadc, true);
 	if (rc)
 		return rc;
 
-	rc = qpnp_iadc_write_reg(QPNP_CONV_REQ, qpnp_iadc_conv_req);
+	rc = qpnp_iadc_write_reg(iadc, QPNP_CONV_REQ, qpnp_iadc_conv_req);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
@@ -476,14 +489,14 @@
 				QPNP_ADC_COMPLETION_TIMEOUT);
 	if (!rc) {
 		u8 status1 = 0;
-		rc = qpnp_iadc_read_reg(QPNP_STATUS1, &status1);
+		rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
 		if (rc < 0)
 			return rc;
 		status1 &= (QPNP_STATUS1_REQ_STS | QPNP_STATUS1_EOC);
 		if (status1 == QPNP_STATUS1_EOC)
 			pr_debug("End of conversion status set\n");
 		else {
-			rc = qpnp_iadc_status_debug();
+			rc = qpnp_iadc_status_debug(iadc);
 			if (rc < 0) {
 				pr_err("status1 read failed with %d\n", rc);
 				return rc;
@@ -492,7 +505,7 @@
 		}
 	}
 
-	rc = qpnp_iadc_read_conversion_result(raw_code);
+	rc = qpnp_iadc_read_conversion_result(iadc, raw_code);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
@@ -504,9 +517,8 @@
 #define IADC_CENTER	0xC000
 #define IADC_READING_RESOLUTION_N	542535
 #define IADC_READING_RESOLUTION_D	100000
-static int32_t qpnp_convert_raw_offset_voltage(void)
+static int32_t qpnp_convert_raw_offset_voltage(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	s64 numerator;
 
 	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
@@ -531,20 +543,20 @@
 	return 0;
 }
 
-int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed)
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+							bool batfet_closed)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	uint8_t rslt_lsb, rslt_msb;
 	int32_t rc = 0;
 	uint16_t raw_data;
 	uint32_t mode_sel = 0;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	mutex_lock(&iadc->adc->adc_lock);
 
-	rc = qpnp_iadc_configure(GAIN_CALIBRATION_17P857MV,
+	rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV,
 						&raw_data, mode_sel);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
@@ -562,7 +574,7 @@
 	 */
 	if (!batfet_closed || iadc->external_rsense) {
 		/* external offset calculation */
-		rc = qpnp_iadc_configure(OFFSET_CALIBRATION_CSP_CSN,
+		rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN,
 						&raw_data, mode_sel);
 		if (rc < 0) {
 			pr_err("qpnp adc result read failed with %d\n", rc);
@@ -570,7 +582,7 @@
 		}
 	} else {
 		/* internal offset calculation */
-		rc = qpnp_iadc_configure(OFFSET_CALIBRATION_CSP2_CSN2,
+		rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2,
 						&raw_data, mode_sel);
 		if (rc < 0) {
 			pr_err("qpnp adc result read failed with %d\n", rc);
@@ -587,7 +599,7 @@
 	pr_debug("raw gain:0x%x, raw offset:0x%x\n",
 		iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
 
-	rc = qpnp_convert_raw_offset_voltage();
+	rc = qpnp_convert_raw_offset_voltage(iadc);
 	if (rc < 0) {
 		pr_err("qpnp raw_voltage conversion failed\n");
 		goto fail;
@@ -599,28 +611,28 @@
 
 	pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb);
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_SEC_ACCESS,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
 					QPNP_IADC_SEC_ACCESS_DATA);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for sec access\n");
 		goto fail;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_MSB_OFFSET,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET,
 						rslt_msb);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for MSB write\n");
 		goto fail;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_SEC_ACCESS,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
 					QPNP_IADC_SEC_ACCESS_DATA);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for sec access\n");
 		goto fail;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_LSB_OFFSET,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET,
 						rslt_lsb);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for LSB write\n");
@@ -634,11 +646,12 @@
 
 static void qpnp_iadc_work(struct work_struct *work)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
+	struct qpnp_iadc_chip *iadc = container_of(work,
+			struct qpnp_iadc_chip, iadc_work.work);
 	int rc = 0;
 
 	if (!iadc->skip_auto_calibrations) {
-		rc = qpnp_iadc_calibrate_for_trim(true);
+		rc = qpnp_iadc_calibrate_for_trim(iadc, true);
 		if (rc)
 			pr_debug("periodic IADC calibration failed\n");
 	}
@@ -649,12 +662,12 @@
 	return;
 }
 
-static int32_t qpnp_iadc_version_check(void)
+static int32_t qpnp_iadc_version_check(struct qpnp_iadc_chip *iadc)
 {
 	uint8_t revision;
 	int rc;
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_REVISION2, &revision);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2, &revision);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		return rc;
@@ -668,24 +681,31 @@
 	return 0;
 }
 
-int32_t qpnp_iadc_is_ready(void)
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
+	struct qpnp_iadc_chip *iadc;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
 
-	if (!iadc || !iadc->iadc_initialized)
-		return -EPROBE_DEFER;
-	else
-		return 0;
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-iadc", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(iadc, &qpnp_iadc_device_list, list)
+		if (iadc->adc->spmi->dev.of_node == node)
+			return iadc;
+	return ERR_PTR(-EPROBE_DEFER);
 }
-EXPORT_SYMBOL(qpnp_iadc_is_ready);
+EXPORT_SYMBOL(qpnp_get_iadc);
 
-int32_t qpnp_iadc_get_rsense(int32_t *rsense)
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc, int32_t *rsense)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	uint8_t	rslt_rsense;
 	int32_t	rc = 0, sign_bit = 0;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	if (iadc->external_rsense) {
@@ -693,7 +713,7 @@
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
 	if (rc < 0) {
 		pr_err("qpnp adc rsense read failed with %d\n", rc);
 		return rc;
@@ -717,9 +737,8 @@
 }
 EXPORT_SYMBOL(qpnp_iadc_get_rsense);
 
-static int32_t qpnp_check_pmic_temp(void)
+static int32_t qpnp_check_pmic_temp(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	struct qpnp_vadc_result result_pmic_therm;
 	int64_t die_temp_offset;
 	int rc = 0;
@@ -736,7 +755,7 @@
 	if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
 		iadc->die_temp = result_pmic_therm.physical;
 		if (!iadc->skip_auto_calibrations) {
-			rc = qpnp_iadc_calibrate_for_trim(true);
+			rc = qpnp_iadc_calibrate_for_trim(iadc, true);
 			if (rc)
 				pr_err("IADC calibration failed rc = %d\n", rc);
 		}
@@ -745,20 +764,20 @@
 	return rc;
 }
 
-int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+				enum qpnp_iadc_channels channel,
 				struct qpnp_iadc_result *result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int32_t rc, rsense_n_ohms, sign = 0, num, mode_sel = 0;
 	int32_t rsense_u_ohms = 0;
 	int64_t result_current;
 	uint16_t raw_data;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	if (!iadc->iadc_mode_sel) {
-		rc = qpnp_check_pmic_temp();
+		rc = qpnp_check_pmic_temp(iadc);
 		if (rc) {
 			pr_err("Error checking pmic therm temp\n");
 			return rc;
@@ -767,13 +786,13 @@
 
 	mutex_lock(&iadc->adc->adc_lock);
 
-	rc = qpnp_iadc_configure(channel, &raw_data, mode_sel);
+	rc = qpnp_iadc_configure(iadc, channel, &raw_data, mode_sel);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		goto fail;
 	}
 
-	rc = qpnp_iadc_get_rsense(&rsense_n_ohms);
+	rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
 	pr_debug("current raw:0%x and rsense:%d\n",
 			raw_data, rsense_n_ohms);
 	rsense_u_ohms = rsense_n_ohms/1000;
@@ -794,7 +813,7 @@
 		result->result_uv = -result->result_uv;
 		result_current = -result_current;
 	}
-	rc = qpnp_iadc_comp_result(&result_current);
+	rc = qpnp_iadc_comp_result(iadc, &result_current);
 	if (rc < 0)
 		pr_err("Error during compensating the IADC\n");
 	rc = 0;
@@ -807,15 +826,15 @@
 }
 EXPORT_SYMBOL(qpnp_iadc_read);
 
-int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_calib *result)
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+					struct qpnp_iadc_calib *result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
-	rc = qpnp_check_pmic_temp();
+	rc = qpnp_check_pmic_temp(iadc);
 	if (rc) {
 		pr_err("Error checking pmic therm temp\n");
 		return rc;
@@ -839,43 +858,32 @@
 }
 EXPORT_SYMBOL(qpnp_iadc_get_gain_and_offset);
 
-int qpnp_iadc_skip_calibration(void)
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
-
-	if (!iadc || !iadc->iadc_initialized)
-		return -EPROBE_DEFER;
-
 	iadc->skip_auto_calibrations = true;
 	return 0;
 }
 EXPORT_SYMBOL(qpnp_iadc_skip_calibration);
 
-int qpnp_iadc_resume_calibration(void)
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
-
-	if (!iadc || !iadc->iadc_initialized)
-		return -EPROBE_DEFER;
-
 	iadc->skip_auto_calibrations = false;
 	return 0;
 }
 EXPORT_SYMBOL(qpnp_iadc_resume_calibration);
 
-int32_t qpnp_iadc_vadc_sync_read(
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
 	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
 	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc = 0;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	mutex_lock(&iadc->iadc_vadc_lock);
 
-	rc = qpnp_check_pmic_temp();
+	rc = qpnp_check_pmic_temp(iadc);
 	if (rc) {
 		pr_err("PMIC die temp check failed\n");
 		goto fail;
@@ -889,7 +897,7 @@
 		goto fail;
 	}
 
-	rc = qpnp_iadc_read(i_channel, i_result);
+	rc = qpnp_iadc_read(iadc, i_channel, i_result);
 	if (rc)
 		pr_err("Configuring IADC failed\n");
 	/* Intentional fall through to release VADC */
@@ -911,10 +919,11 @@
 			struct device_attribute *devattr, char *buf)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct qpnp_iadc_chip *iadc = dev_get_drvdata(dev);
 	struct qpnp_iadc_result result;
 	int rc = -1;
 
-	rc = qpnp_iadc_read(attr->index, &result);
+	rc = qpnp_iadc_read(iadc, attr->index, &result);
 
 	if (rc)
 		return 0;
@@ -926,9 +935,9 @@
 static struct sensor_device_attribute qpnp_adc_attr =
 	SENSOR_ATTR(NULL, S_IRUGO, qpnp_iadc_show, NULL, 0);
 
-static int32_t qpnp_iadc_init_hwmon(struct spmi_device *spmi)
+static int32_t qpnp_iadc_init_hwmon(struct qpnp_iadc_chip *iadc,
+						struct spmi_device *spmi)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	struct device_node *child;
 	struct device_node *node = spmi->dev.of_node;
 	int rc = 0, i = 0, channel;
@@ -960,19 +969,11 @@
 
 static int __devinit qpnp_iadc_probe(struct spmi_device *spmi)
 {
-	struct qpnp_iadc_drv *iadc;
+	struct qpnp_iadc_chip *iadc;
 	struct qpnp_adc_drv *adc_qpnp;
 	struct device_node *node = spmi->dev.of_node;
 	struct device_node *child;
-	int rc, count_adc_channel_list = 0;
-
-	if (!node)
-		return -EINVAL;
-
-	if (qpnp_iadc) {
-		pr_err("IADC already in use\n");
-		return -EBUSY;
-	}
+	int rc, count_adc_channel_list = 0, i = 0;
 
 	for_each_child_of_node(node, child)
 		count_adc_channel_list++;
@@ -982,7 +983,7 @@
 		return -EINVAL;
 	}
 
-	iadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_iadc_drv) +
+	iadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_iadc_chip) +
 		(sizeof(struct sensor_device_attribute) *
 				count_adc_channel_list), GFP_KERNEL);
 	if (!iadc) {
@@ -1030,50 +1031,56 @@
 	IRQF_TRIGGER_RISING, "qpnp_iadc_interrupt", iadc);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to request adc irq\n");
-		goto fail;
+		return rc;
 	} else
 		enable_irq_wake(iadc->adc->adc_irq_eoc);
 
-	dev_set_drvdata(&spmi->dev, iadc);
-	qpnp_iadc = iadc;
-
-	rc = qpnp_iadc_init_hwmon(spmi);
+	rc = qpnp_iadc_init_hwmon(iadc, spmi);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
-		goto fail;
+		return rc;
 	}
 	iadc->iadc_hwmon = hwmon_device_register(&iadc->adc->spmi->dev);
 
-	rc = qpnp_iadc_version_check();
+	rc = qpnp_iadc_version_check(iadc);
 	if (rc) {
 		dev_err(&spmi->dev, "IADC version not supported\n");
 		goto fail;
 	}
 
 	mutex_init(&iadc->iadc_vadc_lock);
+	INIT_WORK(&iadc->trigger_completion_work, qpnp_iadc_trigger_completion);
 	INIT_DELAYED_WORK(&iadc->iadc_work, qpnp_iadc_work);
-	rc = qpnp_iadc_comp_info();
+	rc = qpnp_iadc_comp_info(iadc);
 	if (rc) {
 		dev_err(&spmi->dev, "abstracting IADC comp info failed!\n");
 		goto fail;
 	}
-	iadc->iadc_initialized = true;
 
-	rc = qpnp_iadc_calibrate_for_trim(true);
+	dev_set_drvdata(&spmi->dev, iadc);
+	list_add(&iadc->list, &qpnp_iadc_device_list);
+	rc = qpnp_iadc_calibrate_for_trim(iadc, true);
 	if (rc)
 		dev_err(&spmi->dev, "failed to calibrate for USR trim\n");
+
 	schedule_delayed_work(&iadc->iadc_work,
 			round_jiffies_relative(msecs_to_jiffies
 					(QPNP_IADC_CALIB_SECONDS)));
 	return 0;
 fail:
-	qpnp_iadc = NULL;
+	for_each_child_of_node(node, child) {
+		device_remove_file(&spmi->dev,
+			&iadc->sens_attr[i].dev_attr);
+		i++;
+	}
+	hwmon_device_unregister(iadc->iadc_hwmon);
+
 	return rc;
 }
 
 static int __devexit qpnp_iadc_remove(struct spmi_device *spmi)
 {
-	struct qpnp_iadc_drv *iadc = dev_get_drvdata(&spmi->dev);
+	struct qpnp_iadc_chip *iadc = dev_get_drvdata(&spmi->dev);
 	struct device_node *node = spmi->dev.of_node;
 	struct device_node *child;
 	int i = 0;
@@ -1085,6 +1092,7 @@
 			&iadc->sens_attr[i].dev_attr);
 		i++;
 	}
+	hwmon_device_unregister(iadc->iadc_hwmon);
 	dev_set_drvdata(&spmi->dev, NULL);
 
 	return 0;
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 519b7e4..ded2b6e 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -302,9 +302,6 @@
 
 /**
  *  Set/get enable function is just needed by sensor HAL.
- *  Normally, the open function does all the initialization
- *  and power work. And close undo that open does.
- *  Just keeping the function simple.
  */
 
 static ssize_t mpu3050_attr_set_enable(struct device *dev,
@@ -316,8 +313,25 @@
 
 	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
-	sensor->enable = (u32)val;
-
+	sensor->enable = (u32)val == 0 ? 0 : 1;
+	if (sensor->enable) {
+		pm_runtime_get_sync(sensor->dev);
+		gpio_set_value(sensor->enable_gpio, 1);
+		if (sensor->use_poll)
+			schedule_delayed_work(&sensor->input_work,
+				msecs_to_jiffies(sensor->poll_interval));
+		else
+			i2c_smbus_write_byte_data(sensor->client,
+					MPU3050_INT_CFG,
+					MPU3050_ACTIVE_LOW |
+					MPU3050_OPEN_DRAIN |
+					MPU3050_RAW_RDY_EN);
+	} else {
+		if (sensor->use_poll)
+			cancel_delayed_work_sync(&sensor->input_work);
+		gpio_set_value(sensor->enable_gpio, 0);
+		pm_runtime_put(sensor->dev);
+	}
 	return count;
 }
 
@@ -792,14 +806,8 @@
 		goto err_input_cleanup;
 	}
 
-	if (sensor->use_poll)
-		schedule_delayed_work(&sensor->input_work,
-				msecs_to_jiffies(sensor->poll_interval));
-	else
-		i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG,
-				MPU3050_ACTIVE_LOW |
-				MPU3050_OPEN_DRAIN |
-				MPU3050_RAW_RDY_EN);
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY);
 
 	return 0;
 
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index 25228a6..7fac726 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -82,6 +82,7 @@
 #define FT_ERASE_PANEL_REG	0x63
 #define FT_FW_START_REG		0xBF
 
+#define FT_STATUS_NUM_TP_MASK	0x0F
 
 #define FT_VTG_MIN_UV		2600000
 #define FT_VTG_MAX_UV		3300000
@@ -329,6 +330,7 @@
 static int ft5x06_handle_touchdata(struct ft5x06_ts_data *data)
 {
 	struct ts_event *event = &data->event;
+	int num_points;
 	int ret, i;
 	u8 buf[POINT_READ_BUF] = { 0 };
 	u8 pointid = FT_MAX_ID;
@@ -342,7 +344,9 @@
 	memset(event, 0, sizeof(struct ts_event));
 
 	event->touch_point = 0;
-	for (i = 0; i < CFG_MAX_TOUCH_POINTS; i++) {
+	num_points = buf[2] & FT_STATUS_NUM_TP_MASK;
+
+	for (i = 0; i < num_points; i++) {
 		pointid = (buf[FT_TOUCH_ID_POS + FT_TOUCH_STEP * i]) >> 4;
 		if (pointid >= FT_MAX_ID)
 			break;
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index 908d0d7..ba0be2b 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -1056,8 +1056,6 @@
 
 	rmi4_pdata->i2c_pull_up = of_property_read_bool(np,
 			"synaptics,i2c-pull-up");
-	rmi4_pdata->power_down_enable = of_property_read_bool(np,
-			"synaptics,power-down");
 	rmi4_pdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
 	rmi4_pdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
 
@@ -2005,7 +2003,7 @@
 
 error_reg_en_vcc_i2c:
 	if (rmi4_data->board->i2c_pull_up)
-		reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0);
+		reg_set_optimum_mode_check(rmi4_data->vdd, 0);
 error_reg_opt_i2c:
 	regulator_disable(rmi4_data->vdd);
 error_reg_en_vdd:
@@ -2594,51 +2592,27 @@
 						bool on)
 {
 	int retval;
-	int load_ua;
 
 	if (on == false)
 		goto regulator_hpm;
 
-	load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA;
-	retval = reg_set_optimum_mode_check(rmi4_data->vdd, load_ua);
+	retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_LPM_LOAD_UA);
 	if (retval < 0) {
 		dev_err(&rmi4_data->i2c_client->dev,
-			"Regulator vdd_ana set_opt failed rc=%d\n",
+			"Regulator vcc_ana set_opt failed rc=%d\n",
 			retval);
 		goto fail_regulator_lpm;
 	}
 
-	if (rmi4_data->board->power_down_enable) {
-		retval = regulator_disable(rmi4_data->vdd);
-		if (retval) {
-			dev_err(&rmi4_data->i2c_client->dev,
-				"Regulator vdd disable failed rc=%d\n",
-				retval);
-			goto fail_regulator_lpm;
-		}
-	}
-
 	if (rmi4_data->board->i2c_pull_up) {
-		load_ua = rmi4_data->board->power_down_enable ?
-			0 : RMI4_I2C_LPM_LOAD_UA;
 		retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
-			load_ua);
+			RMI4_I2C_LPM_LOAD_UA);
 		if (retval < 0) {
 			dev_err(&rmi4_data->i2c_client->dev,
-				"Regulator vcc_i2c set_opt failed " \
-				"rc=%d\n", retval);
+				"Regulator vcc_i2c set_opt failed rc=%d\n",
+				retval);
 			goto fail_regulator_lpm;
 		}
-
-		if (rmi4_data->board->power_down_enable) {
-			retval = regulator_disable(rmi4_data->vcc_i2c);
-			if (retval) {
-				dev_err(&rmi4_data->i2c_client->dev,
-					"Regulator vcc_i2c disable failed " \
-					"rc=%d\n", retval);
-				goto fail_regulator_lpm;
-			}
-		}
 	}
 
 	return 0;
@@ -2654,16 +2628,6 @@
 		goto fail_regulator_hpm;
 	}
 
-	if (rmi4_data->board->power_down_enable) {
-		retval = regulator_enable(rmi4_data->vdd);
-		if (retval) {
-			dev_err(&rmi4_data->i2c_client->dev,
-				"Regulator vdd enable failed rc=%d\n",
-				retval);
-			goto fail_regulator_hpm;
-		}
-	}
-
 	if (rmi4_data->board->i2c_pull_up) {
 		retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
 			RMI4_I2C_LOAD_UA);
@@ -2673,26 +2637,6 @@
 				retval);
 			goto fail_regulator_hpm;
 		}
-
-		if (rmi4_data->board->power_down_enable) {
-			retval = regulator_enable(rmi4_data->vcc_i2c);
-			if (retval) {
-				dev_err(&rmi4_data->i2c_client->dev,
-					"Regulator vcc_i2c enable failed " \
-					"rc=%d\n", retval);
-				goto fail_regulator_hpm;
-			}
-		}
-	}
-
-	if (rmi4_data->board->power_down_enable) {
-		retval = synaptics_rmi4_reset_device(rmi4_data);
-		if (retval < 0) {
-			dev_err(&rmi4_data->i2c_client->dev,
-				"%s: Failed to issue reset command, rc = %d\n",
-					__func__, retval);
-			return retval;
-		}
 	}
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 3082e99..2f6e6a7 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -18,6 +18,7 @@
 #include <linux/videodev2.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
+#include <linux/iopoll.h>
 #include <media/msmb_isp.h>
 
 #include "msm_ispif.h"
@@ -38,6 +39,9 @@
 #define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY  0x01
 #define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY    0x02
 
+#define ISPIF_TIMEOUT_SLEEP_US                1000
+#define ISPIF_TIMEOUT_ALL_US                500000
+
 #undef CDBG
 #ifdef CONFIG_MSMB_CAMERA_DEBUG
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -63,6 +67,78 @@
 	{"ispif_ahb_clk", -1},
 };
 
+static struct msm_cam_clk_info ispif_8974_reset_clk_info[] = {
+	{"csi0_src_clk", INIT_RATE},
+	{"csi0_clk", NO_SET_RATE},
+	{"csi0_pix_clk", NO_SET_RATE},
+	{"csi0_rdi_clk", NO_SET_RATE},
+	{"vfe0_clk_src", INIT_RATE},
+	{"camss_vfe_vfe0_clk", NO_SET_RATE},
+	{"camss_csi_vfe0_clk", NO_SET_RATE},
+	{"vfe1_clk_src", INIT_RATE},
+	{"camss_vfe_vfe1_clk", NO_SET_RATE},
+	{"camss_csi_vfe1_clk", NO_SET_RATE},
+};
+
+static int msm_ispif_reset_hw(struct ispif_device *ispif)
+{
+	int rc = 0;
+	long timeout = 0;
+	struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+
+	if (ispif->csid_version < CSID_VERSION_V30) {
+		/* currently reset is done only for 8974 */
+		return 0;
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+	if (rc < 0) {
+		pr_err("%s: cannot enable clock, error = %d",
+			__func__, rc);
+	}
+
+	init_completion(&ispif->reset_complete[VFE0]);
+	if (ispif->hw_num_isps > 1)
+		init_completion(&ispif->reset_complete[VFE1]);
+
+	/* initiate reset of ISPIF */
+	msm_camera_io_w(ISPIF_RST_CMD_MASK,
+				ispif->base + ISPIF_RST_CMD_ADDR);
+	if (ispif->hw_num_isps > 1)
+		msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
+					ispif->base + ISPIF_RST_CMD_1_ADDR);
+
+	timeout = wait_for_completion_interruptible_timeout(
+			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+	CDBG("%s: VFE0 done\n", __func__);
+	if (timeout <= 0) {
+		pr_err("%s: VFE0 reset wait timeout\n", __func__);
+		return -ETIMEDOUT;
+	}
+
+	if (ispif->hw_num_isps > 1) {
+		timeout = wait_for_completion_interruptible_timeout(
+				&ispif->reset_complete[VFE1],
+				msecs_to_jiffies(500));
+		CDBG("%s: VFE1 done\n", __func__);
+		if (timeout <= 0) {
+			pr_err("%s: VFE1 reset wait timeout\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+	if (rc < 0) {
+		pr_err("%s: cannot disable clock, error = %d",
+			__func__, rc);
+	}
+	return rc;
+}
+
 static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
 {
 	int rc = 0;
@@ -573,6 +649,7 @@
 	uint16_t cid_mask = 0;
 	uint32_t intf_addr;
 	enum msm_ispif_vfe_intf vfe_intf;
+	uint32_t stop_flag = 0;
 
 	BUG_ON(!ispif);
 	BUG_ON(!params);
@@ -625,10 +702,12 @@
 			goto end;
 		}
 
-		/* todo_bug_fix? very bad. use readl_poll_timeout */
-		while ((msm_camera_io_r(ispif->base + intf_addr) & 0xF) != 0xF)
-			CDBG("%s: Wait for %d Idle\n", __func__,
-				params->entries[i].intftype);
+		rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
+					(stop_flag & 0xF) == 0xF,
+					ISPIF_TIMEOUT_SLEEP_US,
+					ISPIF_TIMEOUT_ALL_US);
+		if (rc < 0)
+			goto end;
 
 		/* disable CIDs in CID_MASK register */
 		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
@@ -706,6 +785,9 @@
 	ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
 	if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
+		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE0]);
+
 		if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
 			pr_err("%s: VFE0 pix0 overflow.\n", __func__);
 
@@ -721,6 +803,9 @@
 		ispif_process_irq(ispif, out, VFE0);
 	}
 	if (ispif->vfe_info.num_vfe > 1) {
+		if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE1]);
+
 		if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
 			pr_err("%s: VFE1 pix0 overflow.\n", __func__);
 
@@ -811,7 +896,7 @@
 		pr_err("%s: ahb_clk enable failed", __func__);
 		goto error_ahb;
 	}
-
+	msm_ispif_reset_hw(ispif);
 	rc = msm_ispif_reset(ispif);
 	if (rc == 0) {
 		ispif->ispif_state = ISPIF_POWER_UP;
@@ -1002,9 +1087,18 @@
 		goto error_sd_register;
 	}
 
-	if (pdev->dev.of_node)
+
+	if (pdev->dev.of_node) {
 		of_property_read_u32((&pdev->dev)->of_node,
 		"cell-index", &pdev->id);
+		rc = of_property_read_u32((&pdev->dev)->of_node,
+		"qcom,num-isps", &ispif->hw_num_isps);
+		if (rc)
+			/* backward compatibility */
+			ispif->hw_num_isps = 1;
+		/* not an error condition */
+		rc = 0;
+	}
 
 	ispif->mem = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "ispif");
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index faa32aa..45e7354 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -58,5 +58,7 @@
 	enum msm_ispif_state_t ispif_state;
 	struct msm_ispif_vfe_info vfe_info;
 	struct clk *ahb_clk;
+	struct completion reset_complete[VFE_MAX];
+	uint32_t hw_num_isps;
 };
 #endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
index c2a5cad..6bd7feb 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
@@ -140,6 +140,7 @@
 {
 	int i;
 	int rc = 0;
+	long clk_rate;
 	if (enable) {
 		for (i = 0; i < num_clk; i++) {
 			CDBG("%s enable %s\n", __func__,
@@ -158,6 +159,24 @@
 						   clk_info[i].clk_name);
 					goto cam_clk_set_err;
 				}
+			} else if (clk_info[i].clk_rate == INIT_RATE) {
+				clk_rate = clk_get_rate(clk_ptr[i]);
+				if (clk_rate == 0) {
+					clk_rate =
+						  clk_round_rate(clk_ptr[i], 0);
+					if (clk_rate < 0) {
+						pr_err("%s round rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+					rc = clk_set_rate(clk_ptr[i],
+								clk_rate);
+					if (rc < 0) {
+						pr_err("%s set rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+				}
 			}
 			rc = clk_prepare(clk_ptr[i]);
 			if (rc < 0) {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
index 73376e2..2e6f809 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
@@ -18,6 +18,9 @@
 #include <mach/camera2.h>
 #include <media/msm_cam_sensor.h>
 
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
 void msm_camera_io_w(u32 data, void __iomem *addr);
 void msm_camera_io_w_mb(u32 data, void __iomem *addr);
 u32 msm_camera_io_r(void __iomem *addr);
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 79a492e..5140a03 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -500,8 +500,13 @@
 	struct smem_client *client = clt;
 	struct iommu_set *iommu_group_set = &client->res->iommu_group_set;
 	int i;
+	int j;
 	bool is_secure = (flags & SMEM_SECURE);
 	struct iommu_info *iommu_map;
+	if (!domain_num || !partition_num) {
+		dprintk(VIDC_DBG, "passed null to get domain partition!");
+		return -EINVAL;
+	}
 
 	*domain_num = -1;
 	*partition_num = -1;
@@ -512,14 +517,14 @@
 
 	for (i = 0; i < iommu_group_set->count; i++) {
 		iommu_map = &iommu_group_set->iommu_maps[i];
-		if ((iommu_map->is_secure == is_secure) &&
-			(iommu_map->buffer_type & buffer_type)) {
-			*domain_num = iommu_map->domain;
-			*partition_num = 0;
-			if ((buffer_type & HAL_BUFFER_INTERNAL_CMD_QUEUE) &&
-				(iommu_map->npartitions == 2))
-				*partition_num = 1;
-			break;
+		if (iommu_map->is_secure == is_secure) {
+			for (j = 0; j < iommu_map->npartitions; j++) {
+				if (iommu_map->buffer_type[j] & buffer_type) {
+					*domain_num = iommu_map->domain;
+					*partition_num = j;
+					break;
+				}
+			}
 		}
 	}
 	dprintk(VIDC_DBG, "domain: %d, partition: %d found!\n",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index cb08da7..394ecdc5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -386,65 +386,56 @@
 {
 	int rc = 0;
 	struct platform_device *pdev = res->pdev;
-	struct device_node *ctx_node;
+	struct device_node *domains_parent_node = NULL;
+	struct device_node *domains_child_node = NULL;
 	struct iommu_set *iommu_group_set = &res->iommu_group_set;
-	int array_size;
-	int i;
+	int domain_idx = 0;
 	struct iommu_info *iommu_map;
-	u32 *buffer_types = NULL;
+	int array_size = 0;
 
-	if (!of_get_property(pdev->dev.of_node, "qcom,iommu-groups",
-				&array_size)) {
-		dprintk(VIDC_DBG, "iommu_groups property not present\n");
-		iommu_group_set->count = 0;
+	domains_parent_node = of_find_node_by_name(pdev->dev.of_node,
+				"qcom,vidc-iommu-domains");
+	if (!domains_parent_node) {
+		dprintk(VIDC_DBG, "Node qcom,vidc-iommu-domains not found.\n");
 		return 0;
 	}
 
-	iommu_group_set->count = array_size / sizeof(u32);
+	iommu_group_set->count = 0;
+	for_each_child_of_node(domains_parent_node, domains_child_node) {
+		iommu_group_set->count++;
+	}
+
 	if (iommu_group_set->count == 0) {
-		dprintk(VIDC_ERR, "No group present in iommu_groups\n");
+		dprintk(VIDC_ERR, "No group present in iommu_domains\n");
 		rc = -ENOENT;
 		goto err_no_of_node;
 	}
-
 	iommu_group_set->iommu_maps = kzalloc(iommu_group_set->count *
 			sizeof(*(iommu_group_set->iommu_maps)), GFP_KERNEL);
+
 	if (!iommu_group_set->iommu_maps) {
-		dprintk(VIDC_ERR, "%s Failed to alloc iommu_maps\n",
-			__func__);
+		dprintk(VIDC_ERR, "Cannot allocate iommu_maps\n");
 		rc = -ENOMEM;
 		goto err_no_of_node;
 	}
 
-	buffer_types = kzalloc(iommu_group_set->count * sizeof(*buffer_types),
-				GFP_KERNEL);
-	if (!buffer_types) {
-		dprintk(VIDC_ERR,
-			"%s Failed to alloc iommu group buffer types\n",
-			__func__);
-		rc = -ENOMEM;
-		goto err_load_groups;
-	}
+	/* set up each context bank */
+	for_each_child_of_node(domains_parent_node, domains_child_node) {
+		struct device_node *ctx_node = of_parse_phandle(
+						domains_child_node,
+						"qcom,vidc-domain-phandle",
+						0);
+		if (domain_idx >= iommu_group_set->count)
+			break;
 
-	rc = of_property_read_u32_array(pdev->dev.of_node,
-			"qcom,iommu-group-buffer-types", buffer_types,
-			iommu_group_set->count);
-	if (rc) {
-		dprintk(VIDC_ERR,
-		    "%s Failed to read iommu group buffer types\n", __func__);
-		goto err_load_groups;
-	}
-
-	for (i = 0; i < iommu_group_set->count; i++) {
-		iommu_map = &iommu_group_set->iommu_maps[i];
-		ctx_node = of_parse_phandle(pdev->dev.of_node,
-				"qcom,iommu-groups", i);
+		iommu_map = &iommu_group_set->iommu_maps[domain_idx];
 		if (!ctx_node) {
-			dprintk(VIDC_ERR, "Unable to parse phandle : %u\n", i);
+			dprintk(VIDC_ERR, "Unable to parse pHandle\n");
 			rc = -EBADHANDLE;
 			goto err_load_groups;
 		}
 
+		/* domain info from domains.dtsi */
 		rc = of_property_read_string(ctx_node, "label",
 				&(iommu_map->name));
 		if (rc) {
@@ -452,6 +443,11 @@
 			goto err_load_groups;
 		}
 
+		dprintk(VIDC_DBG,
+				"domain %d has name %s\n",
+				domain_idx,
+				iommu_map->name);
+
 		if (!of_get_property(ctx_node, "qcom,virtual-addr-pool",
 				&array_size)) {
 			dprintk(VIDC_ERR,
@@ -463,25 +459,48 @@
 
 		iommu_map->npartitions = array_size / sizeof(u32) / 2;
 
+		dprintk(VIDC_DBG,
+				"%d partitions in domain %d",
+				iommu_map->npartitions,
+				domain_idx);
+
 		rc = of_property_read_u32_array(ctx_node,
 				"qcom,virtual-addr-pool",
 				(u32 *)iommu_map->addr_range,
 				iommu_map->npartitions * 2);
 		if (rc) {
 			dprintk(VIDC_ERR,
-				"Could not read addr pool for group : %s\n",
-				iommu_map->name);
+				"Could not read addr pool for group : %s (%d)\n",
+				iommu_map->name,
+				rc);
 			goto err_load_groups;
 		}
 
-		iommu_map->buffer_type = buffer_types[i];
 		iommu_map->is_secure =
 			of_property_read_bool(ctx_node,	"qcom,secure-domain");
+
+		dprintk(VIDC_DBG,
+				"domain %s : secure = %d",
+				iommu_map->name,
+				iommu_map->is_secure);
+
+		/* setup partitions and buffer type per partition */
+		rc = of_property_read_u32_array(domains_child_node,
+				"qcom,vidc-partition-buffer-types",
+				iommu_map->buffer_type,
+				iommu_map->npartitions);
+
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"cannot load partition buffertype information (%d)",
+					rc);
+			rc = -ENOENT;
+			goto err_load_groups;
+		}
+		domain_idx++;
 	}
-	kfree(buffer_types);
-	return 0;
+	return rc;
 err_load_groups:
-	kfree(buffer_types);
 	msm_vidc_free_iommu_groups(res);
 err_no_of_node:
 	return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 43af909..8176ea5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -16,6 +16,7 @@
 
 #include <linux/platform_device.h>
 #include <media/msm_vidc.h>
+#define MAX_BUFFER_TYPES 32
 
 struct load_freq_table {
 	u32 load;
@@ -39,11 +40,11 @@
 
 struct iommu_info {
 	const char *name;
-	u32 buffer_type;
+	u32 buffer_type[MAX_BUFFER_TYPES];
 	struct iommu_group *group;
 	int domain;
 	bool is_secure;
-	struct addr_range addr_range[2];
+	struct addr_range addr_range[MAX_BUFFER_TYPES];
 	int npartitions;
 };
 
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 47b88db..5416210 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -2804,6 +2804,9 @@
 				iommu_map->addr_range[0].start;
 			memprot.cp_nonpixel_size =
 				iommu_map->addr_range[0].size;
+		} else if (strcmp(iommu_map->name, "venus_cp") == 0) {
+			memprot.cp_nonpixel_start =
+				iommu_map->addr_range[1].start;
 		}
 	}
 
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index a990f43..b140510 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -416,7 +416,7 @@
 		if (ret)
 			pr_err("%s: %s: failed setting runtime active: ret: %d\n",
 			       mmc_hostname(card->host), __func__, ret);
-		else
+		else if (!mmc_card_sdio(card))
 			pm_runtime_enable(&card->dev);
 	}
 
@@ -424,7 +424,7 @@
 	if (ret)
 		return ret;
 
-	if (mmc_use_core_runtime_pm(card->host)) {
+	if (mmc_use_core_runtime_pm(card->host) && !mmc_card_sdio(card)) {
 		card->rpm_attrib.show = show_rpm_delay;
 		card->rpm_attrib.store = store_rpm_delay;
 		sysfs_attr_init(&card->rpm_attrib.attr);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3495f4d..96f8f2e 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2472,6 +2472,7 @@
 	msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
 	msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
 	msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
+	msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
 
 	if (msm_host->pdata->nonremovable)
 		msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 897dc2a..9471188 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -30,7 +30,10 @@
 	"PM8841",
 	"PM8019",
 	"PM8226",
-	"PM8110"
+	"PM8110",
+	"PMA8084",
+	"PMI8962",
+	"PMD9635",
 };
 
 static struct of_device_id qpnp_revid_match_table[] = {
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index c43a777..73c7e62 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -276,6 +276,8 @@
 	struct bms_irq			sw_cc_thr_irq;
 	struct bms_irq			ocv_thr_irq;
 	struct qpnp_vadc_chip		*vadc_dev;
+	struct qpnp_iadc_chip		*iadc_dev;
+	struct qpnp_adc_tm_chip		*adc_tm_dev;
 };
 
 static struct of_device_id qpnp_bms_match_table[] = {
@@ -515,13 +517,13 @@
 	return result_uv;
 }
 
-static s64 cc_reverse_adjust_for_gain(s64 uv)
+static s64 cc_reverse_adjust_for_gain(struct qpnp_bms_chip *chip, s64 uv)
 {
 	struct qpnp_iadc_calib calibration;
 	int gain;
 	s64 result_uv;
 
-	qpnp_iadc_get_gain_and_offset(&calibration);
+	qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration);
 	gain = (int)calibration.gain_raw - (int)calibration.offset_raw;
 
 	pr_debug("reverse adjusting_uv = %lld\n", uv);
@@ -544,7 +546,7 @@
 {
 	struct qpnp_iadc_calib calibration;
 
-	qpnp_iadc_get_gain_and_offset(&calibration);
+	qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration);
 	return cc_adjust_for_gain(cc_reading_to_uv(reading),
 			calibration.gain_raw - calibration.offset_raw);
 }
@@ -587,7 +589,7 @@
 	temp_current = div_s64((vsense_uv * 1000000LL),
 				(int)chip->r_sense_uohm);
 
-	rc = qpnp_iadc_comp_result(&temp_current);
+	rc = qpnp_iadc_comp_result(chip->iadc_dev, &temp_current);
 	if (rc)
 		pr_debug("error compensation failed: %d\n", rc);
 
@@ -814,7 +816,8 @@
 		}
 		*vbat_uv = (int)v_result.physical;
 	} else {
-		rc = qpnp_iadc_vadc_sync_read(iadc_channel, &i_result,
+		rc = qpnp_iadc_vadc_sync_read(chip->iadc_dev,
+					iadc_channel, &i_result,
 					VBAT_SNS, &v_result);
 		if (rc) {
 			pr_err("adc sync read failed with rc: %d\n", rc);
@@ -1055,7 +1058,7 @@
 		return *software_counter;
 	}
 
-	qpnp_iadc_get_gain_and_offset(&calibration);
+	qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration);
 	pr_debug("%scc = %lld, die_temp = %lld\n",
 			cc_type == SHDW_CC ? "shdw_" : "",
 			cc, result.physical);
@@ -1065,7 +1068,7 @@
 					- calibration.offset_raw);
 	cc_pvh = cc_uv_to_pvh(cc_voltage_uv);
 	cc_uah = div_s64(cc_pvh, chip->r_sense_uohm);
-	rc = qpnp_iadc_comp_result(&cc_uah);
+	rc = qpnp_iadc_comp_result(chip->iadc_dev, &cc_uah);
 	if (rc)
 		pr_debug("error compensation failed: %d\n", rc);
 	if (clear_cc == RESET) {
@@ -2132,9 +2135,9 @@
 		target_cc_uah = CC_STEP_INCREMENT_UAH;
 	}
 	iadc_comp_factor = 100000;
-	qpnp_iadc_comp_result(&iadc_comp_factor);
+	qpnp_iadc_comp_result(chip->iadc_dev, &iadc_comp_factor);
 	target_cc_uah = div64_s64(target_cc_uah * 100000, iadc_comp_factor);
-	target_cc_uah = cc_reverse_adjust_for_gain(target_cc_uah);
+	target_cc_uah = cc_reverse_adjust_for_gain(chip, target_cc_uah);
 	cc_raw_64 = convert_cc_uah_to_raw(chip, target_cc_uah);
 	cc_raw = convert_s64_to_s36(cc_raw_64);
 
@@ -2353,13 +2356,14 @@
 	mutex_lock(&chip->vbat_monitor_mutex);
 	if (chip->vbat_monitor_params.state_request !=
 			ADC_TM_HIGH_LOW_THR_DISABLE)
-		qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+		qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->vbat_monitor_params);
 	mutex_unlock(&chip->vbat_monitor_mutex);
 	if (chip->use_voltage_soc) {
 		soc = calculate_soc_from_voltage(chip);
 	} else {
 		if (!chip->batfet_closed)
-			qpnp_iadc_calibrate_for_trim(true);
+			qpnp_iadc_calibrate_for_trim(chip->iadc_dev, true);
 		rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM,
 								&result);
 		if (rc) {
@@ -2455,7 +2459,8 @@
 				chip->vbat_monitor_params.low_thr,
 				chip->vbat_monitor_params.high_thr);
 	}
-	qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+	qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->vbat_monitor_params);
 	mutex_unlock(&chip->vbat_monitor_mutex);
 }
 
@@ -2503,7 +2508,8 @@
 				chip->vbat_monitor_params.low_thr,
 				chip->vbat_monitor_params.high_thr);
 	}
-	qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+	qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->vbat_monitor_params);
 	mutex_unlock(&chip->vbat_monitor_mutex);
 }
 
@@ -2527,7 +2533,7 @@
 			configure_vbat_monitor_low(chip);
 		} else {
 			pr_debug("faulty btm trigger, discarding\n");
-			qpnp_adc_tm_channel_measure(
+			qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
 					&chip->vbat_monitor_params);
 		}
 	} else if (state == ADC_TM_HIGH_STATE) {
@@ -2537,7 +2543,7 @@
 			configure_vbat_monitor_high(chip);
 		} else {
 			pr_debug("faulty btm trigger, discarding\n");
-			qpnp_adc_tm_channel_measure(
+			qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
 					&chip->vbat_monitor_params);
 		}
 	} else {
@@ -2553,7 +2559,8 @@
 
 	chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE;
 
-	rc = qpnp_adc_tm_disable_chan_meas(&chip->vbat_monitor_params);
+	rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+						&chip->vbat_monitor_params);
 	if (rc) {
 		pr_err("tm disable failed: %d\n", rc);
 		return rc;
@@ -2573,12 +2580,6 @@
 {
 	int rc;
 
-	rc = qpnp_adc_tm_is_ready();
-	if (rc) {
-		pr_info("adc tm is not ready yet: %d, defer probe\n", rc);
-		return -EPROBE_DEFER;
-	}
-
 	chip->vbat_monitor_params.low_thr = chip->low_voltage_threshold;
 	chip->vbat_monitor_params.high_thr = chip->max_voltage_uv
 							- VBATT_ERROR_MARGIN;
@@ -2596,12 +2597,14 @@
 		chip->vbat_monitor_params.state_request =
 			ADC_TM_HIGH_LOW_THR_DISABLE;
 	} else {
-		rc = qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+		rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+						&chip->vbat_monitor_params);
 		if (rc) {
 			pr_err("tm setup failed: %d\n", rc);
-			return rc;
+		return rc;
 		}
 	}
+
 	pr_debug("setup complete\n");
 	return 0;
 }
@@ -2961,7 +2964,7 @@
 	 */
 
 	for (i = 0; (!chip->batfet_closed) && i < MAX_CAL_TRIES; i++) {
-		rc = qpnp_iadc_calibrate_for_trim(false);
+		rc = qpnp_iadc_calibrate_for_trim(chip->iadc_dev, false);
 		/*
 		 * Wait 20mS after calibration and before reading battery
 		 * current. The BMS h/w uses calibration values in the
@@ -3086,11 +3089,11 @@
 		if (batfet_closed == false) {
 			/* batfet opened */
 			schedule_work(&chip->batfet_open_work);
-			qpnp_iadc_skip_calibration();
+			qpnp_iadc_skip_calibration(chip->iadc_dev);
 		} else {
 			/* batfet closed */
-			qpnp_iadc_calibrate_for_trim(true);
-			qpnp_iadc_resume_calibration();
+			qpnp_iadc_calibrate_for_trim(chip->iadc_dev, true);
+			qpnp_iadc_resume_calibration(chip->iadc_dev);
 		}
 	}
 }
@@ -3385,6 +3388,38 @@
 	return 0;
 }
 
+static int bms_get_adc(struct qpnp_bms_chip *chip,
+					struct spmi_device *spmi)
+{
+	int rc = 0;
+
+	chip->vadc_dev = qpnp_get_vadc(&spmi->dev, "bms");
+	if (IS_ERR(chip->vadc_dev)) {
+		rc = PTR_ERR(chip->vadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("vadc property missing, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->iadc_dev = qpnp_get_iadc(&spmi->dev, "bms");
+	if (IS_ERR(chip->iadc_dev)) {
+		rc = PTR_ERR(chip->iadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("iadc property missing, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->adc_tm_dev = qpnp_get_adc_tm(&spmi->dev, "bms");
+	if (IS_ERR(chip->adc_tm_dev)) {
+		rc = PTR_ERR(chip->adc_tm_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("adc-tm not ready, defer probe\n");
+		return rc;
+	}
+
+	return 0;
+}
+
 #define SPMI_PROP_READ(chip_prop, qpnp_spmi_property, retval)		\
 do {									\
 	if (retval)							\
@@ -3683,7 +3718,7 @@
 			chip->software_shdw_cc_uah = 0;
 		}
 
-		rc = qpnp_iadc_get_rsense(&rds_rsense_nohm);
+		rc = qpnp_iadc_get_rsense(chip->iadc_dev, &rds_rsense_nohm);
 		if (rc) {
 			pr_err("Unable to read RDS resistance value from IADC; rc = %d\n",
 								rc);
@@ -3740,7 +3775,8 @@
 						- chip->temperature_margin;
 	chip->die_temp_monitor_params.state_request =
 						ADC_TM_HIGH_LOW_THR_ENABLE;
-	return qpnp_adc_tm_channel_measure(&chip->die_temp_monitor_params);
+	return qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->die_temp_monitor_params);
 }
 
 static void btm_notify_die_temp(enum qpnp_tm_state state, void *ctx)
@@ -3763,17 +3799,14 @@
 
 static int setup_die_temp_monitoring(struct qpnp_bms_chip *chip)
 {
-	int rc = qpnp_adc_tm_is_ready();
-	if (rc) {
-		pr_info("adc tm is not ready yet: %d, defer probe\n", rc);
-		return -EPROBE_DEFER;
-	}
+	int rc;
+
 	chip->die_temp_monitor_params.channel = DIE_TEMP;
 	chip->die_temp_monitor_params.btm_ctx = (void *)chip;
 	chip->die_temp_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
 	chip->die_temp_monitor_params.threshold_notification =
 						&btm_notify_die_temp;
-	refresh_die_temp_monitor(chip);
+	rc = refresh_die_temp_monitor(chip);
 	if (rc) {
 		pr_err("tm setup failed: %d\n", rc);
 		return rc;
@@ -3795,20 +3828,9 @@
 		return -ENOMEM;
 	}
 
-	chip->vadc_dev = qpnp_get_vadc(&(spmi->dev), "bms");
-	if (IS_ERR(chip->vadc_dev)) {
-		rc = PTR_ERR(chip->vadc_dev);
-		if (rc != -EPROBE_DEFER)
-			pr_err("vadc property missing, rc=%d\n", rc);
+	rc = bms_get_adc(chip, spmi);
+	if (rc < 0)
 		goto error_read;
-	}
-
-	rc = qpnp_iadc_is_ready();
-	if (rc) {
-		pr_info("iadc not ready: %d, deferring probe\n", rc);
-		rc = -EPROBE_DEFER;
-		goto error_read;
-	}
 
 	mutex_init(&chip->bms_output_lock);
 	mutex_init(&chip->last_ocv_uv_mutex);
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index c023a6d..0883312 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -100,6 +100,8 @@
 #define BOOST_ENABLE_CONTROL			0x46
 #define COMP_OVR1				0xEA
 #define BAT_IF_BTC_CTRL				0x49
+#define USB_OCP_THR				0x52
+#define USB_OCP_CLR				0x53
 
 #define REG_OFFSET_PERP_SUBTYPE			0x05
 
@@ -142,6 +144,11 @@
 #define BAT_THM_EN			BIT(1)
 #define BAT_ID_EN			BIT(0)
 #define BOOST_PWR_EN			BIT(7)
+#define OCP_CLR_BIT			BIT(7)
+#define OCP_THR_MASK			0x03
+#define OCP_THR_900_MA			0x02
+#define OCP_THR_500_MA			0x01
+#define OCP_THR_200_MA			0x00
 
 /* Interrupt definitions */
 /* smbb_chg_interrupts */
@@ -267,6 +274,7 @@
 	u16				misc_base;
 	u16				freq_base;
 	struct qpnp_chg_irq		usbin_valid;
+	struct qpnp_chg_irq		usb_ocp;
 	struct qpnp_chg_irq		dcin_valid;
 	struct qpnp_chg_irq		chg_gone;
 	struct qpnp_chg_irq		chg_fastchg;
@@ -328,6 +336,7 @@
 	struct qpnp_chg_regulator	otg_vreg;
 	struct qpnp_chg_regulator	boost_vreg;
 	struct qpnp_vadc_chip		*vadc_dev;
+	struct qpnp_adc_tm_chip		*adc_tm_dev;
 };
 
 
@@ -918,7 +927,7 @@
 	struct qpnp_chg_chip *chip = container_of(work,
 				struct qpnp_chg_chip, adc_measure_work);
 
-	if (qpnp_adc_tm_channel_measure(&chip->adc_param))
+	if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
 		pr_err("request ADC error\n");
 }
 
@@ -982,6 +991,32 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t
+qpnp_chg_usb_usb_ocp_irq_handler(int irq, void *_chip)
+{
+	struct qpnp_chg_chip *chip = _chip;
+	int rc;
+
+	pr_debug("usb-ocp triggered\n");
+
+	rc = qpnp_chg_masked_write(chip,
+			chip->usb_chgpth_base + USB_OCP_CLR,
+			OCP_CLR_BIT,
+			OCP_CLR_BIT, 1);
+	if (rc)
+		pr_err("Failed to clear OCP bit rc = %d\n", rc);
+
+	/* force usb ovp fet off */
+	rc = qpnp_chg_masked_write(chip,
+			chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+			USB_OTG_EN_BIT,
+			USB_OTG_EN_BIT, 1);
+	if (rc)
+		pr_err("Failed to turn off usb ovp rc = %d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
 #define ENUM_T_STOP_BIT		BIT(0)
 static irqreturn_t
 qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
@@ -2346,7 +2381,7 @@
 		qpnp_chg_set_appropriate_vbatdet(chip);
 	}
 
-	if (qpnp_adc_tm_channel_measure(&chip->adc_param))
+	if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
 		pr_err("request ADC error\n");
 
 	power_supply_changed(&chip->batt_psy);
@@ -2631,6 +2666,27 @@
 				return rc;
 			}
 
+			if ((subtype == SMBBP_USB_CHGPTH_SUBTYPE) ||
+				(subtype == SMBCL_USB_CHGPTH_SUBTYPE)) {
+				chip->usb_ocp.irq = spmi_get_irq_byname(spmi,
+						spmi_resource, "usb-ocp");
+				if (chip->usb_ocp.irq < 0) {
+					pr_err("Unable to get usbin irq\n");
+					return rc;
+				}
+				rc = devm_request_irq(chip->dev,
+					chip->usb_ocp.irq,
+					qpnp_chg_usb_usb_ocp_irq_handler,
+					IRQF_TRIGGER_RISING, "usb-ocp", chip);
+				if (rc < 0) {
+					pr_err("Can't request %d usb-ocp: %d\n",
+							chip->usb_ocp.irq, rc);
+					return rc;
+				}
+
+				enable_irq_wake(chip->usb_ocp.irq);
+			}
+
 			enable_irq_wake(chip->usbin_valid.irq);
 			enable_irq_wake(chip->chg_gone.irq);
 			break;
@@ -2685,8 +2741,11 @@
 			return rc;
 		}
 
-		if (batt_data.max_voltage_uv >= 0)
+		if (batt_data.max_voltage_uv >= 0) {
 			chip->max_voltage_mv = batt_data.max_voltage_uv / 1000;
+			chip->safe_voltage_mv = chip->max_voltage_mv
+				+ MAX_DELTA_VDD_MAX_MV;
+		}
 		if (batt_data.iterm_ua >= 0)
 			chip->term_current = batt_data.iterm_ua / 1000;
 	}
@@ -2882,6 +2941,16 @@
 			0xFF,
 			0x80, 1);
 
+		if ((subtype == SMBBP_USB_CHGPTH_SUBTYPE) ||
+			(subtype == SMBCL_USB_CHGPTH_SUBTYPE)) {
+			rc = qpnp_chg_masked_write(chip,
+				chip->usb_chgpth_base + USB_OCP_THR,
+				OCP_THR_MASK,
+				OCP_THR_900_MA, 1);
+			if (rc)
+				pr_err("Failed to configure OCP rc = %d\n", rc);
+		}
+
 		break;
 	case SMBB_DC_CHGPTH_SUBTYPE:
 		break;
@@ -2999,6 +3068,7 @@
 	if (rc) {
 		/* Select BAT_THM as default BPD scheme */
 		chip->bpd_detection = BPD_TYPE_BAT_THM;
+		rc = 0;
 	} else {
 		chip->bpd_detection = get_bpd(bpd);
 		if (chip->bpd_detection < 0) {
@@ -3009,9 +3079,11 @@
 
 	/* Look up JEITA compliance parameters if cool and warm temp provided */
 	if (chip->cool_bat_decidegc && chip->warm_bat_decidegc) {
-		rc = qpnp_adc_tm_is_ready();
-		if (rc) {
-			pr_err("tm not ready %d\n", rc);
+		chip->adc_tm_dev = qpnp_get_adc_tm(chip->dev, "chg");
+		if (IS_ERR(chip->adc_tm_dev)) {
+			rc = PTR_ERR(chip->adc_tm_dev);
+			if (rc != -EPROBE_DEFER)
+				pr_err("adc-tm not ready, defer probe\n");
 			return rc;
 		}
 
@@ -3358,7 +3430,8 @@
 		chip->adc_param.channel = LR_MUX1_BATT_THERM;
 
 		if (get_prop_batt_present(chip)) {
-			rc = qpnp_adc_tm_channel_measure(&chip->adc_param);
+			rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+							&chip->adc_param);
 			if (rc) {
 				pr_err("request ADC error %d\n", rc);
 				goto fail_chg_enable;
@@ -3418,7 +3491,8 @@
 	struct qpnp_chg_chip *chip = dev_get_drvdata(&spmi->dev);
 	if (chip->cool_bat_decidegc && chip->warm_bat_decidegc
 						&& chip->batt_present) {
-		qpnp_adc_tm_disable_chan_meas(&chip->adc_param);
+		qpnp_adc_tm_disable_chan_meas(chip->adc_tm_dev,
+							&chip->adc_param);
 	}
 	cancel_work_sync(&chip->adc_measure_work);
 	cancel_delayed_work_sync(&chip->eoc_work);
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index eedb1e5..03e9021 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -167,21 +167,20 @@
 		pr_err_ratelimited("%s: decode failed on hwirq %lu\n",
 							__func__, d->hwirq);
 		return rc;
-	} else {
-		if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
-			rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
-						&q_spec, &irq_d->priv_d);
-			if (rc) {
-				pr_err_ratelimited(
-					"%s: decode failed on hwirq %lu\n",
-					__func__, d->hwirq);
-				return rc;
-			}
-
-		}
-		arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
 	}
 
+	if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
+		rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
+					&q_spec, &irq_d->priv_d);
+		if (rc) {
+			pr_err_ratelimited(
+				"%s: decode failed on hwirq %lu rc = %d\n",
+				__func__, d->hwirq, rc);
+			return rc;
+		}
+	}
+	arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
+
 	return 0;
 }
 
@@ -191,6 +190,7 @@
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
 	int rc;
+	uint8_t prev_int_en = per_d->int_en;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
@@ -201,10 +201,16 @@
 		return;
 	}
 
-	qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
-
 	per_d->int_en &= ~irq_d->mask_shift;
 
+	if (prev_int_en && !(per_d->int_en)) {
+		/*
+		 * no interrupt on this peripheral is enabled
+		 * ask the arbiter to ignore this peripheral
+		 */
+		qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+	}
+
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 					(u8 *)&irq_d->mask_shift, 1);
 	if (rc) {
@@ -221,6 +227,7 @@
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
 	int rc;
+	uint8_t prev_int_en = per_d->int_en;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
@@ -231,10 +238,16 @@
 		return;
 	}
 
-	qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
-
 	per_d->int_en &= ~irq_d->mask_shift;
 
+	if (prev_int_en && !(per_d->int_en)) {
+		/*
+		 * no interrupt on this peripheral is enabled
+		 * ask the arbiter to ignore this peripheral
+		 */
+		qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+	}
+
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc) {
@@ -256,6 +269,7 @@
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
 	int rc;
+	uint8_t prev_int_en = per_d->int_en;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
@@ -266,9 +280,15 @@
 		return;
 	}
 
-	qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
-
 	per_d->int_en |= irq_d->mask_shift;
+	if (!prev_int_en && per_d->int_en) {
+		/*
+		 * no interrupt prior to this call was enabled for the
+		 * peripheral. Ask the arbiter to enable interrupts for
+		 * this peripheral
+		 */
+		qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
+	}
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
 					&irq_d->mask_shift, 1);
 	if (rc) {
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 05a4806..f85a576 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -101,6 +101,9 @@
 #define PMIC_ARB_APID_MASK		0xFF
 #define PMIC_ARB_PPID_MASK		0xFFF
 
+/* interrupt enable bit */
+#define SPMI_PIC_ACC_ENABLE_BIT		BIT(0)
+
 /**
  * base - base address of the PMIC Arbiter core registers.
  * intr - base address of the SPMI interrupt control registers
@@ -434,8 +437,10 @@
 
 	spin_lock_irqsave(&pmic_arb->lock, flags);
 	status = readl_relaxed(pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
-	if (!status) {
-		writel_relaxed(0x1, pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
+	if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
+		status = status | SPMI_PIC_ACC_ENABLE_BIT;
+		writel_relaxed(status,
+				pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
 		/* Interrupt needs to be enabled before returning to caller */
 		wmb();
 	}
@@ -467,8 +472,11 @@
 
 	spin_lock_irqsave(&pmic_arb->lock, flags);
 	status = readl_relaxed(pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
-	if (status) {
-		writel_relaxed(0x0, pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
+	if (status & SPMI_PIC_ACC_ENABLE_BIT) {
+		/* clear the enable bit and write */
+		status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
+		writel_relaxed(status,
+				pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
 		/* Interrupt needs to be disabled before returning to caller */
 		wmb();
 	}
@@ -480,7 +488,7 @@
 periph_interrupt(struct spmi_pmic_arb_dev *pmic_arb, u8 apid)
 {
 	u16 ppid = get_peripheral_id(pmic_arb, apid);
-	void __iomem *base = pmic_arb->intr;
+	void __iomem *intr = pmic_arb->intr;
 	u8 sid = (ppid >> 8) & 0x0F;
 	u8 pid = ppid & 0xFF;
 	u32 status;
@@ -491,11 +499,20 @@
 		/* return IRQ_NONE; */
 	}
 
+	status = readl_relaxed(intr + SPMI_PIC_ACC_ENABLE(apid));
+	if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
+		/*
+		 * All interrupts from this peripheral are disabled
+		 * don't bother calling the qpnpint handler
+		 */
+		return IRQ_HANDLED;
+	}
+
 	/* Read the peripheral specific interrupt bits */
-	status = readl_relaxed(base + SPMI_PIC_IRQ_STATUS(apid));
+	status = readl_relaxed(intr + SPMI_PIC_IRQ_STATUS(apid));
 
 	/* Clear the peripheral interrupts */
-	writel_relaxed(status, base + SPMI_PIC_IRQ_CLEAR(apid));
+	writel_relaxed(status, intr + SPMI_PIC_IRQ_CLEAR(apid));
 	/* Interrupt needs to be cleared/acknowledged before exiting ISR */
 	mb();
 
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index fc21fbb..348ed3e 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -363,21 +363,27 @@
 static inline int
 spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
 {
-	BUG_ON(!ctrl || !ctrl->cmd);
+	if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
+		return -EINVAL;
+
 	return ctrl->cmd(ctrl, opcode, sid);
 }
 
 static inline int spmi_read_cmd(struct spmi_controller *ctrl,
 				u8 opcode, u8 sid, u16 addr, u8 bc, u8 *buf)
 {
-	BUG_ON(!ctrl || !ctrl->read_cmd);
+	if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
+		return -EINVAL;
+
 	return ctrl->read_cmd(ctrl, opcode, sid, addr, bc, buf);
 }
 
 static inline int spmi_write_cmd(struct spmi_controller *ctrl,
 				u8 opcode, u8 sid, u16 addr, u8 bc, u8 *buf)
 {
-	BUG_ON(!ctrl || !ctrl->write_cmd);
+	if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
+		return -EINVAL;
+
 	return ctrl->write_cmd(ctrl, opcode, sid, addr, bc, buf);
 }
 
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index eae5cc8..5b78cdd 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -171,6 +171,7 @@
 
 struct qpnp_adc_tm_sensor {
 	struct thermal_zone_device	*tz_dev;
+	struct qpnp_adc_tm_chip		*chip;
 	enum thermal_device_mode	mode;
 	uint32_t			sensor_num;
 	enum qpnp_adc_meas_timer_select	timer_select;
@@ -187,15 +188,18 @@
 	uint32_t			scale_type;
 };
 
-struct qpnp_adc_tm_drv {
+struct qpnp_adc_tm_chip {
 	struct qpnp_adc_drv		*adc;
+	struct list_head		list;
 	bool				adc_tm_initialized;
 	int				max_channels_available;
 	struct qpnp_vadc_chip		*vadc_dev;
+	struct work_struct		trigger_high_thr_work;
+	struct work_struct		trigger_low_thr_work;
 	struct qpnp_adc_tm_sensor	sensor[0];
 };
 
-struct qpnp_adc_tm_drv	*qpnp_adc_tm;
+LIST_HEAD(qpnp_adc_tm_device_list);
 
 struct qpnp_adc_tm_trip_reg_type {
 	uint16_t low_thr_lsb_addr;
@@ -258,67 +262,79 @@
 	[SCALE_RPMIC_THERM] = {qpnp_adc_scale_millidegc_pmic_voltage_thr},
 };
 
-static int32_t qpnp_adc_tm_read_reg(int16_t reg, u8 *data)
+static int32_t qpnp_adc_tm_read_reg(struct qpnp_adc_tm_chip *chip,
+						int16_t reg, u8 *data)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	int rc = 0;
 
-	rc = spmi_ext_register_readl(adc_tm->adc->spmi->ctrl,
-		adc_tm->adc->slave, (adc_tm->adc->offset + reg), data, 1);
+	rc = spmi_ext_register_readl(chip->adc->spmi->ctrl,
+		chip->adc->slave, (chip->adc->offset + reg), data, 1);
 	if (rc < 0)
 		pr_err("adc-tm read reg %d failed with %d\n", reg, rc);
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_write_reg(int16_t reg, u8 data)
+static int32_t qpnp_adc_tm_write_reg(struct qpnp_adc_tm_chip *chip,
+							int16_t reg, u8 data)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	int rc = 0;
 	u8 *buf;
 
 	buf = &data;
 
-	rc = spmi_ext_register_writel(adc_tm->adc->spmi->ctrl,
-		adc_tm->adc->slave, (adc_tm->adc->offset + reg), buf, 1);
+	rc = spmi_ext_register_writel(chip->adc->spmi->ctrl,
+		chip->adc->slave, (chip->adc->offset + reg), buf, 1);
 	if (rc < 0)
 		pr_err("adc-tm write reg %d failed with %d\n", reg, rc);
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_enable(void)
+static int32_t qpnp_adc_tm_enable(struct qpnp_adc_tm_chip *chip)
 {
 	int rc = 0;
 	u8 data = 0;
 
 	data = QPNP_ADC_TM_EN;
-	rc = qpnp_adc_tm_write_reg(QPNP_EN_CTL1, data);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data);
 	if (rc < 0)
 		pr_err("adc-tm enable failed\n");
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_disable(void)
+static int32_t qpnp_adc_tm_disable(struct qpnp_adc_tm_chip *chip)
 {
 	u8 data = 0;
 	int rc = 0;
 
-	rc = qpnp_adc_tm_write_reg(QPNP_EN_CTL1, data);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data);
 	if (rc < 0)
 		pr_err("adc-tm disable failed\n");
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_enable_if_channel_meas(void)
+static int qpnp_adc_tm_is_valid(struct qpnp_adc_tm_chip *chip)
+{
+	struct qpnp_adc_tm_chip *adc_tm_chip = NULL;
+
+	list_for_each_entry(adc_tm_chip, &qpnp_adc_tm_device_list, list)
+		if (chip == adc_tm_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static int32_t qpnp_adc_tm_enable_if_channel_meas(
+					struct qpnp_adc_tm_chip *chip)
 {
 	u8 adc_tm_meas_en = 0;
 	int rc = 0;
 
 	/* Check if a measurement request is still required */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MULTI_MEAS_EN,
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 							&adc_tm_meas_en);
 	if (rc) {
 		pr_err("adc-tm-tm read status high failed with %d\n", rc);
@@ -327,10 +343,11 @@
 
 	/* Enable only if there are pending measurement requests */
 	if (adc_tm_meas_en) {
-		qpnp_adc_tm_enable();
+		qpnp_adc_tm_enable(chip);
 
 		/* Request conversion */
-		rc = qpnp_adc_tm_write_reg(QPNP_CONV_REQ, QPNP_CONV_REQ_SET);
+		rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
+							QPNP_CONV_REQ_SET);
 		if (rc < 0) {
 			pr_err("adc-tm request conversion failed\n");
 			return rc;
@@ -340,13 +357,13 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_req_sts_check(void)
+static int32_t qpnp_adc_tm_req_sts_check(struct qpnp_adc_tm_chip *chip)
 {
 	u8 status1;
 	int rc, count = 0;
 
 	/* The VADC_TM bank needs to be disabled for new conversion request */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS1, &status1);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1);
 	if (rc) {
 		pr_err("adc-tm read status1 failed\n");
 		return rc;
@@ -354,7 +371,7 @@
 
 	/* Disable the bank if a conversion is occuring */
 	while ((status1 & QPNP_STATUS1_REQ_STS) && (count < 5)) {
-		rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS1, &status1);
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1);
 		if (rc < 0)
 			pr_err("adc-tm disable failed\n");
 		/* Wait time is based on the optimum sampling rate
@@ -367,18 +384,19 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_check_revision(uint32_t btm_chan_num)
+static int32_t qpnp_adc_tm_check_revision(struct qpnp_adc_tm_chip *chip,
+							uint32_t btm_chan_num)
 {
 	u8 rev, perph_subtype;
 	int rc = 0;
 
-	rc = qpnp_adc_tm_read_reg(QPNP_REVISION3, &rev);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_REVISION3, &rev);
 	if (rc) {
 		pr_err("adc-tm revision read failed\n");
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_read_reg(QPNP_PERPH_SUBTYPE, &perph_subtype);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_PERPH_SUBTYPE, &perph_subtype);
 	if (rc) {
 		pr_err("adc-tm perph_subtype read failed\n");
 		return rc;
@@ -394,21 +412,23 @@
 
 	return rc;
 }
-static int32_t qpnp_adc_tm_mode_select(u8 mode_ctl)
+static int32_t qpnp_adc_tm_mode_select(struct qpnp_adc_tm_chip *chip,
+								u8 mode_ctl)
 {
 	int rc;
 
 	mode_ctl |= (QPNP_ADC_TRIM_EN | QPNP_AMUX_TRIM_EN);
 
 	/* VADC_BTM current sets mode to recurring measurements */
-	rc = qpnp_adc_tm_write_reg(QPNP_MODE_CTL, mode_ctl);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_MODE_CTL, mode_ctl);
 	if (rc < 0)
 		pr_err("adc-tm write mode selection err\n");
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_timer_interval_select(uint32_t btm_chan,
+static int32_t qpnp_adc_tm_timer_interval_select(
+		struct qpnp_adc_tm_chip *chip, uint32_t btm_chan,
 		struct qpnp_vadc_chan_properties *chan_prop)
 {
 	int rc;
@@ -417,7 +437,8 @@
 	/* Configure kernel clients to timer1 */
 	switch (chan_prop->timer_select) {
 	case ADC_MEAS_TIMER_SELECT1:
-		rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL,
+		rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL,
 				chan_prop->meas_interval1);
 		if (rc < 0) {
 			pr_err("timer1 configure failed\n");
@@ -426,7 +447,8 @@
 	break;
 	case ADC_MEAS_TIMER_SELECT2:
 		/* Thermal channels uses timer2, default to 1 second */
-		rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer2 configure read failed\n");
@@ -435,7 +457,8 @@
 		meas_interval_timer2 |=
 			(chan_prop->meas_interval2 <<
 			QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT);
-		rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 			meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer2 configure failed\n");
@@ -443,7 +466,8 @@
 		}
 	break;
 	case ADC_MEAS_TIMER_SELECT3:
-		rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer3 read failed\n");
@@ -451,7 +475,8 @@
 		}
 		chan_prop->meas_interval2 = ADC_MEAS3_INTERVAL_1S;
 		meas_interval_timer2 |= chan_prop->meas_interval2;
-		rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 			meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer3 configure failed\n");
@@ -469,13 +494,13 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_reg_update(uint16_t addr,
-		u8 mask, bool state)
+static int32_t qpnp_adc_tm_reg_update(struct qpnp_adc_tm_chip *chip,
+		uint16_t addr, u8 mask, bool state)
 {
 	u8 reg_value = 0;
 	int rc = 0;
 
-	rc = qpnp_adc_tm_read_reg(addr, &reg_value);
+	rc = qpnp_adc_tm_read_reg(chip, addr, &reg_value);
 	if (rc < 0) {
 		pr_err("read failed for addr:0x%x\n", addr);
 		return rc;
@@ -487,7 +512,7 @@
 
 	pr_debug("state:%d, reg:0x%x with bits:0x%x and mask:0x%x\n",
 					state, addr, reg_value, ~mask);
-	rc = qpnp_adc_tm_write_reg(addr, reg_value);
+	rc = qpnp_adc_tm_write_reg(chip, addr, reg_value);
 	if (rc < 0) {
 		pr_err("write failed for addr:%x\n", addr);
 		return rc;
@@ -496,12 +521,13 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_thr_update(uint32_t btm_chan,
+static int32_t qpnp_adc_tm_thr_update(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan,
 			struct qpnp_vadc_chan_properties *chan_prop)
 {
 	int rc = 0;
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 			adc_tm_data[btm_chan].low_thr_lsb_addr,
 			QPNP_ADC_TM_THR_LSB_MASK(chan_prop->low_thr));
 	if (rc < 0) {
@@ -509,7 +535,7 @@
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 		adc_tm_data[btm_chan].low_thr_msb_addr,
 		QPNP_ADC_TM_THR_MSB_MASK(chan_prop->low_thr));
 	if (rc < 0) {
@@ -517,7 +543,7 @@
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 		adc_tm_data[btm_chan].high_thr_lsb_addr,
 		QPNP_ADC_TM_THR_LSB_MASK(chan_prop->high_thr));
 	if (rc < 0) {
@@ -525,7 +551,7 @@
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 		adc_tm_data[btm_chan].high_thr_msb_addr,
 		QPNP_ADC_TM_THR_MSB_MASK(chan_prop->high_thr));
 	if (rc < 0)
@@ -537,17 +563,17 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_channel_configure(uint32_t btm_chan,
+static int32_t qpnp_adc_tm_channel_configure(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan,
 			struct qpnp_vadc_chan_properties *chan_prop,
 			uint32_t amux_channel)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	int rc = 0, i = 0, chan_idx = 0;
 	bool chan_found = false;
 	u8 sensor_mask = 0;
 
-	while (i < adc_tm->max_channels_available) {
-		if (adc_tm->sensor[i].btm_channel_num == btm_chan) {
+	while (i < chip->max_channels_available) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
 			chan_idx = i;
 			chan_found = true;
 			i++;
@@ -561,9 +587,9 @@
 	}
 
 	sensor_mask = 1 << chan_idx;
-	if (!adc_tm->sensor[chan_idx].thermal_node) {
+	if (!chip->sensor[chan_idx].thermal_node) {
 		/* Update low and high notification thresholds */
-		rc = qpnp_adc_tm_thr_update(btm_chan,
+		rc = qpnp_adc_tm_thr_update(chip, btm_chan,
 				chan_prop);
 		if (rc < 0) {
 			pr_err("setting chan:%d threshold failed\n", btm_chan);
@@ -577,7 +603,7 @@
 			pr_debug("low sensor mask:%x with state:%d\n",
 					sensor_mask, chan_prop->state_request);
 			/* Enable low threshold's interrupt */
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_LOW_THR_INT_EN, sensor_mask, true);
 			if (rc < 0) {
 				pr_err("low thr enable err:%d\n", btm_chan);
@@ -591,7 +617,7 @@
 					ADC_TM_HIGH_LOW_THR_ENABLE)) {
 			/* Enable high threshold's interrupt */
 			pr_debug("high sensor mask:%x\n", sensor_mask);
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_HIGH_THR_INT_EN, sensor_mask, true);
 			if (rc < 0) {
 				pr_err("high thr enable err:%d\n", btm_chan);
@@ -601,7 +627,7 @@
 	}
 
 	/* Enable corresponding BTM channel measurement */
-	rc = qpnp_adc_tm_reg_update(
+	rc = qpnp_adc_tm_reg_update(chip,
 		QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, true);
 	if (rc < 0) {
 		pr_err("multi measurement en failed\n");
@@ -611,7 +637,7 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_configure(
+static int32_t qpnp_adc_tm_configure(struct qpnp_adc_tm_chip *chip,
 			struct qpnp_adc_amux_properties *chan_prop)
 {
 	u8 decimation = 0, op_cntrl = 0;
@@ -619,19 +645,19 @@
 	uint32_t btm_chan = 0;
 
 	/* Disable bank */
-	rc = qpnp_adc_tm_disable();
+	rc = qpnp_adc_tm_disable(chip);
 	if (rc)
 		return rc;
 
 	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check();
+	rc = qpnp_adc_tm_req_sts_check(chip);
 	if (rc < 0) {
 		pr_err("adc-tm req_sts check failed\n");
 		return rc;
 	}
 
 	/* Set measurement in recurring mode */
-	rc = qpnp_adc_tm_mode_select(chan_prop->mode_sel);
+	rc = qpnp_adc_tm_mode_select(chip, chan_prop->mode_sel);
 	if (rc < 0) {
 		pr_err("adc-tm mode select failed\n");
 		return rc;
@@ -639,7 +665,7 @@
 
 	/* Configure AMUX channel select for the corresponding BTM channel*/
 	btm_chan = chan_prop->chan_prop->tm_channel_select;
-	rc = qpnp_adc_tm_write_reg(btm_chan, chan_prop->amux_channel);
+	rc = qpnp_adc_tm_write_reg(chip, btm_chan, chan_prop->amux_channel);
 	if (rc < 0) {
 		pr_err("adc-tm channel selection err\n");
 		return rc;
@@ -648,14 +674,14 @@
 	/* Digital paramater setup */
 	decimation |= chan_prop->decimation <<
 				QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT;
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_DIG_PARAM, decimation);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_DIG_PARAM, decimation);
 	if (rc < 0) {
 		pr_err("adc-tm digital parameter setup err\n");
 		return rc;
 	}
 
 	/* Hardware setting time */
-	rc = qpnp_adc_tm_write_reg(QPNP_HW_SETTLE_DELAY,
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_HW_SETTLE_DELAY,
 					chan_prop->hw_settle_time);
 	if (rc < 0) {
 		pr_err("adc-tm hw settling time setup err\n");
@@ -663,7 +689,7 @@
 	}
 
 	/* Fast averaging setup */
-	rc = qpnp_adc_tm_write_reg(QPNP_FAST_AVG_CTL,
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_CTL,
 					chan_prop->fast_avg_setup);
 	if (rc < 0) {
 		pr_err("adc-tm fast-avg setup err\n");
@@ -671,7 +697,7 @@
 	}
 
 	/* Measurement interval setup */
-	rc = qpnp_adc_tm_timer_interval_select(btm_chan,
+	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
 						chan_prop->chan_prop);
 	if (rc < 0) {
 		pr_err("adc-tm timer select failed\n");
@@ -679,17 +705,18 @@
 	}
 
 	/* Channel configuration setup */
-	rc = qpnp_adc_tm_channel_configure(btm_chan, chan_prop->chan_prop,
-					chan_prop->amux_channel);
+	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
+			chan_prop->chan_prop, chan_prop->amux_channel);
 	if (rc < 0) {
 		pr_err("adc-tm channel configure failed\n");
 		return rc;
 	}
 
 	/* Recurring interval measurement enable */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_MEAS_INTERVAL_OP_CTL, &op_cntrl);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+								&op_cntrl);
 	op_cntrl |= QPNP_ADC_MEAS_INTERVAL_OP;
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
 			op_cntrl, true);
 	if (rc < 0) {
 		pr_err("adc-tm meas interval op configure failed\n");
@@ -697,12 +724,12 @@
 	}
 
 	/* Enable bank */
-	rc = qpnp_adc_tm_enable();
+	rc = qpnp_adc_tm_enable(chip);
 	if (rc)
 		return rc;
 
 	/* Request conversion */
-	rc = qpnp_adc_tm_write_reg(QPNP_CONV_REQ, QPNP_CONV_REQ_SET);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET);
 	if (rc < 0) {
 		pr_err("adc-tm request conversion failed\n");
 		return rc;
@@ -714,13 +741,13 @@
 static int qpnp_adc_tm_get_mode(struct thermal_zone_device *thermal,
 			      enum thermal_device_mode *mode)
 {
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
 
-	if (!adc_tm_sensor || qpnp_adc_tm_check_revision(
-			adc_tm_sensor->btm_channel_num) || !mode)
+	if ((IS_ERR(adc_tm)) || qpnp_adc_tm_check_revision(
+			adc_tm->chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
-	*mode = adc_tm_sensor->mode;
+	*mode = adc_tm->mode;
 
 	return 0;
 }
@@ -729,35 +756,38 @@
 			      enum thermal_device_mode mode)
 {
 	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
-	struct qpnp_adc_tm_drv *adc_drv = qpnp_adc_tm;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	int rc = 0, channel;
 	u8 sensor_mask = 0;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(adc_tm->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
 	if (mode == THERMAL_DEVICE_ENABLED) {
-		adc_drv->adc->amux_prop->amux_channel =
+		chip->adc->amux_prop->amux_channel =
 					adc_tm->vadc_channel_num;
 		channel = adc_tm->sensor_num;
-		adc_drv->adc->amux_prop->decimation =
-			adc_drv->adc->adc_channels[channel].adc_decimation;
-		adc_drv->adc->amux_prop->hw_settle_time =
-			adc_drv->adc->adc_channels[channel].hw_settle_time;
-		adc_drv->adc->amux_prop->fast_avg_setup =
-			adc_drv->adc->adc_channels[channel].fast_avg_setup;
-		adc_drv->adc->amux_prop->mode_sel =
+		chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[channel].adc_decimation;
+		chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[channel].hw_settle_time;
+		chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[channel].fast_avg_setup;
+		chip->adc->amux_prop->mode_sel =
 			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-		adc_drv->adc->amux_prop->chan_prop->timer_select =
+		chip->adc->amux_prop->chan_prop->timer_select =
 					ADC_MEAS_TIMER_SELECT1;
-		adc_drv->adc->amux_prop->chan_prop->meas_interval1 =
+		chip->adc->amux_prop->chan_prop->meas_interval1 =
 						ADC_MEAS1_INTERVAL_1S;
-		adc_drv->adc->amux_prop->chan_prop->low_thr = adc_tm->low_thr;
-		adc_drv->adc->amux_prop->chan_prop->high_thr = adc_tm->high_thr;
-		adc_drv->adc->amux_prop->chan_prop->tm_channel_select =
+		chip->adc->amux_prop->chan_prop->low_thr = adc_tm->low_thr;
+		chip->adc->amux_prop->chan_prop->high_thr = adc_tm->high_thr;
+		chip->adc->amux_prop->chan_prop->tm_channel_select =
 			adc_tm->btm_channel_num;
 
-		rc = qpnp_adc_tm_configure(adc_drv->adc->amux_prop);
+		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
 		if (rc) {
 			pr_err("adc-tm tm configure failed with %d\n", rc);
 			return -EINVAL;
@@ -765,27 +795,27 @@
 	} else if (mode == THERMAL_DEVICE_DISABLED) {
 		sensor_mask = 1 << adc_tm->sensor_num;
 		/* Disable bank */
-		rc = qpnp_adc_tm_disable();
+		rc = qpnp_adc_tm_disable(chip);
 		if (rc < 0) {
 			pr_err("adc-tm disable failed\n");
 			return rc;
 		}
 
 		/* Check if a conversion is in progress */
-		rc = qpnp_adc_tm_req_sts_check();
+		rc = qpnp_adc_tm_req_sts_check(chip);
 		if (rc < 0) {
 			pr_err("adc-tm req_sts check failed\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, false);
 		if (rc < 0) {
 			pr_err("multi measurement update failed\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_enable_if_channel_meas();
+		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
 		if (rc < 0) {
 			pr_err("re-enabling measurement failed\n");
 			return rc;
@@ -801,9 +831,12 @@
 				   int trip, enum thermal_trip_type *type)
 {
 	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(adc_tm->btm_channel_num)
-						|| !type || type < 0)
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
 	switch (trip) {
@@ -824,15 +857,17 @@
 				   int trip, unsigned long *temp)
 {
 	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
 	int64_t result = 0;
 	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
 	unsigned int reg, rc = 0, btm_channel_num;
 	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
 	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(
-					adc_tm_sensor->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm_sensor->btm_channel_num))
 		return -EINVAL;
 
 	btm_channel_num = adc_tm_sensor->btm_channel_num;
@@ -843,13 +878,15 @@
 
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
-		rc = qpnp_adc_tm_read_reg(reg_low_thr_lsb, &trip_warm_thr0);
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_lsb,
+							&trip_warm_thr0);
 		if (rc) {
 			pr_err("adc-tm low_thr_lsb err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_read_reg(reg_low_thr_msb, &trip_warm_thr1);
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_msb,
+							&trip_warm_thr1);
 		if (rc) {
 			pr_err("adc-tm low_thr_msb err\n");
 			return rc;
@@ -857,13 +894,15 @@
 	reg = (trip_warm_thr1 << 8) | trip_warm_thr0;
 	break;
 	case ADC_TM_TRIP_LOW_COOL:
-		rc = qpnp_adc_tm_read_reg(reg_high_thr_lsb, &trip_cool_thr0);
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_lsb,
+							&trip_cool_thr0);
 		if (rc) {
 			pr_err("adc-tm_tm high_thr_lsb err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_read_reg(reg_high_thr_msb, &trip_cool_thr1);
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_msb,
+							&trip_cool_thr1);
 		if (rc) {
 			pr_err("adc-tm_tm high_thr_lsb err\n");
 			return rc;
@@ -874,7 +913,7 @@
 		return -EINVAL;
 	}
 
-	rc = qpnp_adc_tm_scale_voltage_therm_pu2(adc_tm->vadc_dev, reg,
+	rc = qpnp_adc_tm_scale_voltage_therm_pu2(chip->vadc_dev, reg,
 								&result);
 	if (rc < 0) {
 		pr_err("Failed to lookup the therm thresholds\n");
@@ -889,19 +928,21 @@
 static int qpnp_adc_tm_set_trip_temp(struct thermal_zone_device *thermal,
 				   int trip, long temp)
 {
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	struct qpnp_adc_tm_config tm_config;
 	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
 	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
 	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
 	int rc = 0, btm_channel_num;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(
-				adc_tm_sensor->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
-	tm_config.channel = adc_tm_sensor->vadc_channel_num;
+	tm_config.channel = adc_tm->vadc_channel_num;
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
 		tm_config.high_thr_temp = temp;
@@ -915,7 +956,7 @@
 
 	pr_debug("requested a high - %d and low - %d with trip - %d\n",
 			tm_config.high_thr_temp, tm_config.low_thr_temp, trip);
-	rc = qpnp_adc_tm_scale_therm_voltage_pu2(adc_tm->vadc_dev, &tm_config);
+	rc = qpnp_adc_tm_scale_therm_voltage_pu2(chip->vadc_dev, &tm_config);
 	if (rc < 0) {
 		pr_err("Failed to lookup the adc-tm thresholds\n");
 		return rc;
@@ -926,7 +967,7 @@
 	trip_cool_thr0 = ((tm_config.high_thr_voltage << 24) >> 24);
 	trip_cool_thr1 = ((tm_config.high_thr_voltage << 16) >> 24);
 
-	btm_channel_num = adc_tm_sensor->btm_channel_num;
+	btm_channel_num = adc_tm->btm_channel_num;
 	reg_low_thr_lsb = adc_tm_data[btm_channel_num].low_thr_lsb_addr;
 	reg_low_thr_msb = adc_tm_data[btm_channel_num].low_thr_msb_addr;
 	reg_high_thr_lsb = adc_tm_data[btm_channel_num].high_thr_lsb_addr;
@@ -934,32 +975,36 @@
 
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
-		rc = qpnp_adc_tm_write_reg(reg_low_thr_lsb, trip_cool_thr0);
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_lsb,
+							trip_cool_thr0);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_write_reg(reg_low_thr_msb, trip_cool_thr1);
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_msb,
+							trip_cool_thr1);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm_sensor->low_thr = tm_config.high_thr_voltage;
+	adc_tm->low_thr = tm_config.high_thr_voltage;
 	break;
 	case ADC_TM_TRIP_LOW_COOL:
-		rc = qpnp_adc_tm_write_reg(reg_high_thr_lsb, trip_warm_thr0);
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_lsb,
+							trip_warm_thr0);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_write_reg(reg_high_thr_msb, trip_warm_thr1);
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_msb,
+							trip_warm_thr1);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm_sensor->high_thr = tm_config.low_thr_voltage;
+	adc_tm->high_thr = tm_config.low_thr_voltage;
 	break;
 	default:
 		return -EINVAL;
@@ -1034,11 +1079,15 @@
 			int trip, enum thermal_trip_activation_mode mode)
 {
 	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	int rc = 0, sensor_mask = 0;
 	u8 thr_int_en = 0;
 	bool state = false;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(adc_tm->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
 	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
@@ -1053,7 +1102,7 @@
 		/* low_thr (lower voltage) for higher temp */
 		thr_int_en = adc_tm_data[adc_tm->btm_channel_num].
 							low_thr_int_chan_en;
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_LOW_THR_INT_EN,
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
 				sensor_mask, state);
 		if (rc)
 			pr_err("channel:%x failed\n", adc_tm->btm_channel_num);
@@ -1062,7 +1111,7 @@
 		/* high_thr (higher voltage) for cooler temp */
 		thr_int_en = adc_tm_data[adc_tm->btm_channel_num].
 							high_thr_int_chan_en;
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_HIGH_THR_INT_EN,
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
 				sensor_mask, state);
 		if (rc)
 			pr_err("channel:%x failed\n", adc_tm->btm_channel_num);
@@ -1074,32 +1123,31 @@
 	return rc;
 }
 
-static int qpnp_adc_tm_read_status(void)
+static int qpnp_adc_tm_read_status(struct qpnp_adc_tm_chip *chip)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	u8 status_low = 0, status_high = 0, qpnp_adc_tm_meas_en = 0;
 	u8 adc_tm_low_enable = 0, adc_tm_high_enable = 0;
 	u8 sensor_mask = 0;
 	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0, btm_chan_num;
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
+	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
 
-	mutex_lock(&adc_tm->adc->adc_lock);
+	mutex_lock(&chip->adc->adc_lock);
 
-	rc = qpnp_adc_tm_req_sts_check();
+	rc = qpnp_adc_tm_req_sts_check(chip);
 	if (rc) {
 		pr_err("adc-tm-tm req sts check failed with %d\n", rc);
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS_LOW, &status_low);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW, &status_low);
 	if (rc) {
 		pr_err("adc-tm-tm read status low failed with %d\n", rc);
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS_HIGH, &status_high);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH, &status_high);
 	if (rc) {
 		pr_err("adc-tm-tm read status high failed with %d\n", rc);
 		goto fail;
@@ -1107,7 +1155,7 @@
 
 	/* Check which interrupt threshold is lower and measure against the
 	 * enabled channel */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MULTI_MEAS_EN,
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 							&qpnp_adc_tm_meas_en);
 	if (rc) {
 		pr_err("adc-tm-tm read status high failed with %d\n", rc);
@@ -1119,37 +1167,37 @@
 
 	if (adc_tm_high_enable) {
 		sensor_notify_num = adc_tm_high_enable;
-		while (i < adc_tm->max_channels_available) {
+		while (i < chip->max_channels_available) {
 			if ((sensor_notify_num & 0x1) == 1)
 				sensor_num = i;
 			sensor_notify_num >>= 1;
 			i++;
 		}
 
-		btm_chan_num = adc_tm->sensor[sensor_num].btm_channel_num;
+		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
 		pr_debug("high:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 			sensor_num, adc_tm_high_enable, adc_tm_low_enable,
 			qpnp_adc_tm_meas_en);
-		if (!adc_tm->sensor[sensor_num].thermal_node) {
+		if (!chip->sensor[sensor_num].thermal_node) {
 			/* For non thermal registered clients
 				such as usb_id, vbatt, pmic_therm */
 			sensor_mask = 1 << sensor_num;
 			pr_debug("non thermal node - mask:%x\n", sensor_mask);
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_HIGH_THR_INT_EN,
 				sensor_mask, false);
 			if (rc < 0) {
 				pr_err("high threshold int read failed\n");
 				goto fail;
 			}
-			adc_tm->sensor[sensor_num].high_thr_notify = true;
+			chip->sensor[sensor_num].high_thr_notify = true;
 		} else {
 			/* Uses the thermal sysfs registered device to disable
 				the corresponding high voltage threshold which
 				 is triggered by low temp */
 			pr_debug("thermal node with mask:%x\n", sensor_mask);
 			rc = qpnp_adc_tm_activate_trip_type(
-				adc_tm->sensor[sensor_num].tz_dev,
+				chip->sensor[sensor_num].tz_dev,
 				ADC_TM_TRIP_LOW_COOL,
 				THERMAL_TRIP_ACTIVATION_DISABLED);
 			if (rc < 0) {
@@ -1162,37 +1210,37 @@
 	if (adc_tm_low_enable) {
 		sensor_notify_num = adc_tm_low_enable;
 		i = 0;
-		while (i < adc_tm->max_channels_available) {
+		while (i < chip->max_channels_available) {
 			if ((sensor_notify_num & 0x1) == 1)
 				sensor_num = i;
 			sensor_notify_num >>= 1;
 			i++;
 		}
 
-		btm_chan_num = adc_tm->sensor[sensor_num].btm_channel_num;
+		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
 		pr_debug("low:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 			sensor_num, adc_tm_high_enable, adc_tm_low_enable,
 			qpnp_adc_tm_meas_en);
-		if (!adc_tm->sensor[sensor_num].thermal_node) {
+		if (!chip->sensor[sensor_num].thermal_node) {
 			/* For non thermal registered clients
 				such as usb_id, vbatt, pmic_therm */
 			pr_debug("non thermal node - mask:%x\n", sensor_mask);
 			sensor_mask = 1 << sensor_num;
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_LOW_THR_INT_EN,
 				sensor_mask, false);
 			if (rc < 0) {
 				pr_err("low threshold int read failed\n");
 				goto fail;
 			}
-			adc_tm->sensor[sensor_num].low_thr_notify = true;
+			chip->sensor[sensor_num].low_thr_notify = true;
 		} else {
 			/* Uses the thermal sysfs registered device to disable
 				the corresponding low voltage threshold which
 				 is triggered by high temp */
 			pr_debug("thermal node with mask:%x\n", sensor_mask);
 			rc = qpnp_adc_tm_activate_trip_type(
-				adc_tm->sensor[sensor_num].tz_dev,
+				chip->sensor[sensor_num].tz_dev,
 				ADC_TM_TRIP_HIGH_WARM,
 				THERMAL_TRIP_ACTIVATION_DISABLED);
 			if (rc < 0) {
@@ -1203,14 +1251,14 @@
 	}
 
 	if (adc_tm_high_enable || adc_tm_low_enable) {
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_MULTI_MEAS_EN,
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 			sensor_mask, false);
 		if (rc < 0) {
 			pr_err("multi meas disable for channel failed\n");
 			goto fail;
 		}
 
-		rc = qpnp_adc_tm_enable_if_channel_meas();
+		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
 		if (rc < 0) {
 			pr_err("re-enabling measurement failed\n");
 			return rc;
@@ -1220,62 +1268,67 @@
 								sensor_mask);
 
 fail:
-	mutex_unlock(&adc_tm->adc->adc_lock);
+	mutex_unlock(&chip->adc->adc_lock);
 
 	if (adc_tm_high_enable || adc_tm_low_enable)
-		schedule_work(&adc_tm->sensor[sensor_num].work);
+		schedule_work(&chip->sensor[sensor_num].work);
 
 	return rc;
 }
 
 static void qpnp_adc_tm_high_thr_work(struct work_struct *work)
 {
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_high_thr_work);
 	int rc;
 
-	rc = qpnp_adc_tm_read_status();
+	rc = qpnp_adc_tm_read_status(chip);
 	if (rc < 0)
 		pr_err("adc-tm high thr work failed\n");
 
 	return;
 }
-DECLARE_WORK(trigger_completion_adc_tm_high_thr_work,
-					qpnp_adc_tm_high_thr_work);
 
 static irqreturn_t qpnp_adc_tm_high_thr_isr(int irq, void *data)
 {
-	qpnp_adc_tm_disable();
+	struct qpnp_adc_tm_chip *chip = data;
 
-	schedule_work(&trigger_completion_adc_tm_high_thr_work);
+	qpnp_adc_tm_disable(chip);
+
+	schedule_work(&chip->trigger_high_thr_work);
 
 	return IRQ_HANDLED;
 }
 
 static void qpnp_adc_tm_low_thr_work(struct work_struct *work)
 {
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_low_thr_work);
 	int rc;
 
-	rc = qpnp_adc_tm_read_status();
+	rc = qpnp_adc_tm_read_status(chip);
 	if (rc < 0)
 		pr_err("adc-tm low thr work failed\n");
 
 	return;
 }
-DECLARE_WORK(trigger_completion_adc_tm_low_thr_work, qpnp_adc_tm_low_thr_work);
 
 static irqreturn_t qpnp_adc_tm_low_thr_isr(int irq, void *data)
 {
-	qpnp_adc_tm_disable();
+	struct qpnp_adc_tm_chip *chip = data;
 
-	schedule_work(&trigger_completion_adc_tm_low_thr_work);
+	qpnp_adc_tm_disable(chip);
+
+	schedule_work(&chip->trigger_low_thr_work);
 
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t qpnp_adc_tm_isr(int irq, void *dev_id)
+static irqreturn_t qpnp_adc_tm_isr(int irq, void *data)
 {
-	struct qpnp_adc_tm_drv *adc_tm = dev_id;
+	struct qpnp_adc_tm_chip *chip = data;
 
-	complete(&adc_tm->adc->adc_rslt_completion);
+	complete(&chip->adc->adc_rslt_completion);
 
 	return IRQ_HANDLED;
 }
@@ -1284,11 +1337,11 @@
 			     unsigned long *temp)
 {
 	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
 	struct qpnp_vadc_result result;
 	int rc = 0;
 
-	rc = qpnp_vadc_read(adc_tm->vadc_dev,
+	rc = qpnp_vadc_read(chip->vadc_dev,
 				adc_tm_sensor->vadc_channel_num, &result);
 	if (rc)
 		return rc;
@@ -1308,14 +1361,14 @@
 	.set_trip_temp = qpnp_adc_tm_set_trip_temp,
 };
 
-int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_btm_param *param)
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	uint32_t channel, dt_index = 0, scale_type = 0;
 	int rc = 0, i = 0;
 	bool chan_found = false;
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
+	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
 
 	if (param->threshold_notification == NULL) {
@@ -1323,11 +1376,11 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&adc_tm->adc->adc_lock);
+	mutex_lock(&chip->adc->adc_lock);
 
 	channel = param->channel;
-	while (i < adc_tm->max_channels_available) {
-		if (adc_tm->adc->adc_channels[i].channel_num ==
+	while (i < chip->max_channels_available) {
+		if (chip->adc->adc_channels[i].channel_num ==
 							channel) {
 			dt_index = i;
 			chan_found = true;
@@ -1342,12 +1395,12 @@
 		goto fail_unlock;
 	}
 
-	rc = qpnp_adc_tm_check_revision(
-			adc_tm->sensor[dt_index].btm_channel_num);
+	rc = qpnp_adc_tm_check_revision(chip,
+			chip->sensor[dt_index].btm_channel_num);
 	if (rc < 0)
 		goto fail_unlock;
 
-	scale_type = adc_tm->adc->adc_channels[dt_index].adc_scale_fn;
+	scale_type = chip->adc->adc_channels[dt_index].adc_scale_fn;
 	if (scale_type >= SCALE_RSCALE_NONE) {
 		rc = -EBADF;
 		goto fail_unlock;
@@ -1355,155 +1408,158 @@
 
 	pr_debug("channel:%d, scale_type:%d, dt_idx:%d",
 					channel, scale_type, dt_index);
-	adc_tm->adc->amux_prop->amux_channel = channel;
-	adc_tm->adc->amux_prop->decimation =
-			adc_tm->adc->adc_channels[dt_index].adc_decimation;
-	adc_tm->adc->amux_prop->hw_settle_time =
-			adc_tm->adc->adc_channels[dt_index].hw_settle_time;
-	adc_tm->adc->amux_prop->fast_avg_setup =
-			adc_tm->adc->adc_channels[dt_index].fast_avg_setup;
-	adc_tm->adc->amux_prop->mode_sel =
+	chip->adc->amux_prop->amux_channel = channel;
+	chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[dt_index].adc_decimation;
+	chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[dt_index].hw_settle_time;
+	chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[dt_index].fast_avg_setup;
+	chip->adc->amux_prop->mode_sel =
 		ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-	adc_tm->adc->amux_prop->chan_prop->meas_interval1 =
+	chip->adc->amux_prop->chan_prop->meas_interval1 =
 						ADC_MEAS1_INTERVAL_1S;
-	adc_tm_rscale_fn[scale_type].chan(adc_tm->vadc_dev, param,
-			&adc_tm->adc->amux_prop->chan_prop->low_thr,
-			&adc_tm->adc->amux_prop->chan_prop->high_thr);
-	adc_tm->adc->amux_prop->chan_prop->tm_channel_select =
-				adc_tm->sensor[dt_index].btm_channel_num;
-	adc_tm->adc->amux_prop->chan_prop->timer_select =
+	adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev, param,
+			&chip->adc->amux_prop->chan_prop->low_thr,
+			&chip->adc->amux_prop->chan_prop->high_thr);
+	chip->adc->amux_prop->chan_prop->tm_channel_select =
+				chip->sensor[dt_index].btm_channel_num;
+	chip->adc->amux_prop->chan_prop->timer_select =
 					ADC_MEAS_TIMER_SELECT1;
-	adc_tm->adc->amux_prop->chan_prop->state_request =
+	chip->adc->amux_prop->chan_prop->state_request =
 					param->state_request;
-	rc = qpnp_adc_tm_configure(adc_tm->adc->amux_prop);
+	rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
 	if (rc) {
 		pr_err("adc-tm configure failed with %d\n", rc);
 		goto fail_unlock;
 	}
 
-	adc_tm->sensor[dt_index].btm_param = param;
-	adc_tm->sensor[dt_index].scale_type = scale_type;
+	chip->sensor[dt_index].btm_param = param;
+	chip->sensor[dt_index].scale_type = scale_type;
 
 fail_unlock:
-	mutex_unlock(&adc_tm->adc->adc_lock);
+	mutex_unlock(&chip->adc->adc_lock);
 
 	return rc;
 }
 EXPORT_SYMBOL(qpnp_adc_tm_channel_measure);
 
-int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_btm_param *param)
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	uint32_t channel, dt_index = 0, btm_chan_num;
 	u8 sensor_mask = 0;
 	int rc = 0;
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
+	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
 
-	mutex_lock(&adc_tm->adc->adc_lock);
+	mutex_lock(&chip->adc->adc_lock);
 
 	/* Disable bank */
-	rc = qpnp_adc_tm_disable();
+	rc = qpnp_adc_tm_disable(chip);
 	if (rc < 0) {
 		pr_err("adc-tm disable failed\n");
 		goto fail;
 	}
 
 	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check();
+	rc = qpnp_adc_tm_req_sts_check(chip);
 	if (rc < 0) {
 		pr_err("adc-tm req_sts check failed\n");
 		goto fail;
 	}
 
 	channel = param->channel;
-	while ((adc_tm->adc->adc_channels[dt_index].channel_num
-		!= channel) && (dt_index < adc_tm->max_channels_available))
+	while ((chip->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < chip->max_channels_available))
 		dt_index++;
 
-	if (dt_index >= adc_tm->max_channels_available) {
+	if (dt_index >= chip->max_channels_available) {
 		pr_err("not a valid ADC_TMN channel\n");
 		rc = -EINVAL;
 		goto fail;
 	}
 
-	btm_chan_num = adc_tm->sensor[dt_index].btm_channel_num;
-	sensor_mask = 1 << adc_tm->sensor[dt_index].sensor_num;
+	btm_chan_num = chip->sensor[dt_index].btm_channel_num;
+	sensor_mask = 1 << chip->sensor[dt_index].sensor_num;
 
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_LOW_THR_INT_EN,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
 		sensor_mask, false);
 	if (rc < 0) {
 		pr_err("low threshold int write failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_HIGH_THR_INT_EN,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
 		sensor_mask, false);
 	if (rc < 0) {
 		pr_err("high threshold int enable failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_MULTI_MEAS_EN,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 		sensor_mask, false);
 	if (rc < 0) {
 		pr_err("multi measurement en failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_enable_if_channel_meas();
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
 	if (rc < 0)
 		pr_err("re-enabling measurement failed\n");
 
 fail:
-	mutex_unlock(&adc_tm->adc->adc_lock);
+	mutex_unlock(&chip->adc->adc_lock);
 
 	return rc;
 }
 EXPORT_SYMBOL(qpnp_adc_tm_disable_chan_meas);
 
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_btm_param *param)
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+				struct qpnp_adc_tm_btm_param *param)
 {
 	param->channel = LR_MUX10_PU2_AMUX_USB_ID_LV;
-	return qpnp_adc_tm_channel_measure(param);
+	return qpnp_adc_tm_channel_measure(chip, param);
 }
 EXPORT_SYMBOL(qpnp_adc_tm_usbid_configure);
 
-int32_t qpnp_adc_tm_usbid_end(void)
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
 {
 	struct qpnp_adc_tm_btm_param param;
 
-	return qpnp_adc_tm_disable_chan_meas(&param);
+	return qpnp_adc_tm_disable_chan_meas(chip, &param);
 }
 EXPORT_SYMBOL(qpnp_adc_tm_usbid_end);
 
-int32_t qpnp_adc_tm_is_ready(void)
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
+	struct qpnp_adc_tm_chip *chip;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
-		return -EPROBE_DEFER;
-	else
-		return 0;
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-adc_tm", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(chip, &qpnp_adc_tm_device_list, list)
+		if (chip->adc->spmi->dev.of_node == node)
+			return chip;
+
+	return ERR_PTR(-EPROBE_DEFER);
 }
-EXPORT_SYMBOL(qpnp_adc_tm_is_ready);
+EXPORT_SYMBOL(qpnp_get_adc_tm);
 
 static int __devinit qpnp_adc_tm_probe(struct spmi_device *spmi)
 {
 	struct device_node *node = spmi->dev.of_node, *child;
-	struct qpnp_adc_tm_drv *adc_tm;
+	struct qpnp_adc_tm_chip *chip;
 	struct qpnp_adc_drv *adc_qpnp;
-	int32_t count_adc_channel_list = 0, rc, sen_idx = 0;
+	int32_t count_adc_channel_list = 0, rc, sen_idx = 0, i = 0;
 	u8 thr_init = 0;
-
-	if (!node)
-		return -EINVAL;
-
-	if (qpnp_adc_tm) {
-		pr_err("adc-tm already in use\n");
-		return -EBUSY;
-	}
+	bool thermal_node = false;
 
 	for_each_child_of_node(node, child)
 		count_adc_channel_list++;
@@ -1513,16 +1569,15 @@
 		return -EINVAL;
 	}
 
-	adc_tm = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_tm_drv) +
+	chip = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_tm_chip) +
 			(count_adc_channel_list *
 			sizeof(struct qpnp_adc_tm_sensor)),
 				GFP_KERNEL);
-	if (!adc_tm) {
+	if (!chip) {
 		dev_err(&spmi->dev, "Unable to allocate memory\n");
 		return -ENOMEM;
 	}
 
-	qpnp_adc_tm = adc_tm;
 	adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
 			GFP_KERNEL);
 	if (!adc_qpnp) {
@@ -1531,75 +1586,74 @@
 		goto fail;
 	}
 
-	adc_tm->adc = adc_qpnp;
+	chip->adc = adc_qpnp;
 
-	rc = qpnp_adc_get_devicetree_data(spmi, adc_tm->adc);
+	rc = qpnp_adc_get_devicetree_data(spmi, chip->adc);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to read device tree\n");
 		goto fail;
 	}
-	mutex_init(&adc_tm->adc->adc_lock);
+	mutex_init(&chip->adc->adc_lock);
 
 	/* Register the ADC peripheral interrupt */
-	adc_tm->adc->adc_high_thr_irq = spmi_get_irq_byname(spmi,
+	chip->adc->adc_high_thr_irq = spmi_get_irq_byname(spmi,
 						NULL, "high-thr-en-set");
-	if (adc_tm->adc->adc_high_thr_irq < 0) {
+	if (chip->adc->adc_high_thr_irq < 0) {
 		pr_err("Invalid irq\n");
 		rc = -ENXIO;
 		goto fail;
 	}
 
-	adc_tm->adc->adc_low_thr_irq = spmi_get_irq_byname(spmi,
+	chip->adc->adc_low_thr_irq = spmi_get_irq_byname(spmi,
 						NULL, "low-thr-en-set");
-	if (adc_tm->adc->adc_low_thr_irq < 0) {
+	if (chip->adc->adc_low_thr_irq < 0) {
 		pr_err("Invalid irq\n");
 		rc = -ENXIO;
 		goto fail;
 	}
 
-	adc_tm->vadc_dev = qpnp_get_vadc(&spmi->dev, "adc_tm");
-	if (IS_ERR(adc_tm->vadc_dev)) {
-		rc = PTR_ERR(adc_tm->vadc_dev);
+	chip->vadc_dev = qpnp_get_vadc(&spmi->dev, "adc_tm");
+	if (IS_ERR(chip->vadc_dev)) {
+		rc = PTR_ERR(chip->vadc_dev);
 		if (rc != -EPROBE_DEFER)
 			pr_err("vadc property missing, rc=%d\n", rc);
 		goto fail;
 	}
 
-	rc = devm_request_irq(&spmi->dev, adc_tm->adc->adc_irq_eoc,
+	rc = devm_request_irq(&spmi->dev, chip->adc->adc_irq_eoc,
 				qpnp_adc_tm_isr, IRQF_TRIGGER_RISING,
-				"qpnp_adc_tm_interrupt", adc_tm);
+				"qpnp_adc_tm_interrupt", chip);
 	if (rc) {
 		dev_err(&spmi->dev,
 			"failed to request adc irq with error %d\n", rc);
 		goto fail;
 	} else {
-		enable_irq_wake(adc_tm->adc->adc_irq_eoc);
+		enable_irq_wake(chip->adc->adc_irq_eoc);
 	}
 
-	rc = devm_request_irq(&spmi->dev, adc_tm->adc->adc_high_thr_irq,
+	rc = devm_request_irq(&spmi->dev, chip->adc->adc_high_thr_irq,
 				qpnp_adc_tm_high_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", adc_tm);
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", chip);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to request adc irq\n");
 		goto fail;
 	} else {
-		enable_irq_wake(adc_tm->adc->adc_high_thr_irq);
+		enable_irq_wake(chip->adc->adc_high_thr_irq);
 	}
 
-	rc = devm_request_irq(&spmi->dev, adc_tm->adc->adc_low_thr_irq,
+	rc = devm_request_irq(&spmi->dev, chip->adc->adc_low_thr_irq,
 				qpnp_adc_tm_low_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", adc_tm);
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", chip);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to request adc irq\n");
 		goto fail;
 	} else {
-		enable_irq_wake(adc_tm->adc->adc_low_thr_irq);
+		enable_irq_wake(chip->adc->adc_low_thr_irq);
 	}
 
 	for_each_child_of_node(node, child) {
 		char name[25];
 		int btm_channel_num;
-		bool thermal_node = false;
 
 		rc = of_property_read_u32(child,
 				"qcom,btm-channel-number", &btm_channel_num);
@@ -1607,80 +1661,95 @@
 			pr_err("Invalid btm channel number\n");
 			goto fail;
 		}
-		adc_tm->sensor[sen_idx].btm_channel_num = btm_channel_num;
-		adc_tm->sensor[sen_idx].vadc_channel_num =
-				adc_tm->adc->adc_channels[sen_idx].channel_num;
-		adc_tm->sensor[sen_idx].sensor_num = sen_idx;
+		chip->sensor[sen_idx].btm_channel_num = btm_channel_num;
+		chip->sensor[sen_idx].vadc_channel_num =
+				chip->adc->adc_channels[sen_idx].channel_num;
+		chip->sensor[sen_idx].sensor_num = sen_idx;
+		chip->sensor[sen_idx].chip = chip;
 		pr_debug("btm_chan:%x, vadc_chan:%x\n", btm_channel_num,
-			adc_tm->adc->adc_channels[sen_idx].channel_num);
+			chip->adc->adc_channels[sen_idx].channel_num);
 		thermal_node = of_property_read_bool(child,
 					"qcom,thermal-node");
 		if (thermal_node) {
 			/* Register with the thermal zone */
 			pr_debug("thermal node%x\n", btm_channel_num);
-			adc_tm->sensor[sen_idx].mode = THERMAL_DEVICE_DISABLED;
-			adc_tm->sensor[sen_idx].thermal_node = true;
+			chip->sensor[sen_idx].mode = THERMAL_DEVICE_DISABLED;
+			chip->sensor[sen_idx].thermal_node = true;
 			snprintf(name, sizeof(name),
-				adc_tm->adc->adc_channels[sen_idx].name);
-			adc_tm->sensor[sen_idx].meas_interval =
+				chip->adc->adc_channels[sen_idx].name);
+			chip->sensor[sen_idx].meas_interval =
 				QPNP_ADC_TM_MEAS_INTERVAL;
-			adc_tm->sensor[sen_idx].low_thr =
+			chip->sensor[sen_idx].low_thr =
 						QPNP_ADC_TM_M0_LOW_THR;
-			adc_tm->sensor[sen_idx].high_thr =
+			chip->sensor[sen_idx].high_thr =
 						QPNP_ADC_TM_M0_HIGH_THR;
-			adc_tm->sensor[sen_idx].tz_dev =
+			chip->sensor[sen_idx].tz_dev =
 				thermal_zone_device_register(name,
 				ADC_TM_TRIP_NUM,
-				&adc_tm->sensor[sen_idx],
+				&chip->sensor[sen_idx],
 				&qpnp_adc_tm_thermal_ops, 0, 0, 0, 0);
-			if (IS_ERR(adc_tm->sensor[sen_idx].tz_dev))
+			if (IS_ERR(chip->sensor[sen_idx].tz_dev))
 				pr_err("thermal device register failed.\n");
 		}
-		INIT_WORK(&adc_tm->sensor[sen_idx].work, notify_adc_tm_fn);
+		INIT_WORK(&chip->sensor[sen_idx].work, notify_adc_tm_fn);
 		sen_idx++;
 	}
-	adc_tm->max_channels_available = count_adc_channel_list;
-	dev_set_drvdata(&spmi->dev, adc_tm);
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_HIGH_THR_INT_EN, thr_init);
+	chip->max_channels_available = count_adc_channel_list;
+	INIT_WORK(&chip->trigger_high_thr_work, qpnp_adc_tm_high_thr_work);
+	INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
+	dev_set_drvdata(&spmi->dev, chip);
+	list_add(&chip->list, &qpnp_adc_tm_device_list);
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+								thr_init);
 	if (rc < 0) {
 		pr_err("high thr init failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_LOW_THR_INT_EN, thr_init);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+								thr_init);
 	if (rc < 0) {
 		pr_err("low thr init failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MULTI_MEAS_EN, thr_init);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+								thr_init);
 	if (rc < 0) {
 		pr_err("multi meas en failed\n");
 		goto fail;
 	}
 
-	adc_tm->adc_tm_initialized = true;
-
 	pr_debug("OK\n");
 	return 0;
 fail:
-	qpnp_adc_tm = NULL;
+	for_each_child_of_node(node, child) {
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node)
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
+		i++;
+	}
+	dev_set_drvdata(&spmi->dev, NULL);
 	return rc;
 }
 
 static int __devexit qpnp_adc_tm_remove(struct spmi_device *spmi)
 {
-	struct qpnp_adc_tm_drv *adc_tm = dev_get_drvdata(&spmi->dev);
-	struct device_node *node = spmi->dev.of_node;
-	struct device_node *child;
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&spmi->dev);
+	struct device_node *node = spmi->dev.of_node, *child;
+	bool thermal_node = false;
 	int i = 0;
 
 	for_each_child_of_node(node, child) {
-		thermal_zone_device_unregister(adc_tm->sensor[i].tz_dev);
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node)
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
 		i++;
 	}
 
-	adc_tm->adc_tm_initialized = false;
 	dev_set_drvdata(&spmi->dev, NULL);
 
 	return 0;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index c89f6d8..d3ea3be 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -204,6 +204,7 @@
 	int			pmic_id_irq;
 	struct work_struct	id_work;
 	struct qpnp_adc_tm_btm_param	adc_param;
+	struct qpnp_adc_tm_chip *adc_tm_dev;
 	struct delayed_work	init_adc_work;
 	bool			id_adc_detect;
 	u8			dcd_retries;
@@ -1709,7 +1710,8 @@
 	}
 
 	dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
-	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
+	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER) ||
+	      (mdwc->charger.chg_type == DWC3_FLOATED_CHARGER));
 	host_bus_suspend = mdwc->host_mode == 1;
 
 	if (!dcp && !host_bus_suspend)
@@ -1841,7 +1843,8 @@
 	}
 
 	dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
-	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
+	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER) ||
+	      (mdwc->charger.chg_type == DWC3_FLOATED_CHARGER));
 	host_bus_suspend = mdwc->host_mode == 1;
 
 	if (mdwc->lpm_flags & MDWC3_TCXO_SHUTDOWN) {
@@ -2354,7 +2357,7 @@
 	dwc3_id_work(&mdwc->id_work);
 
 	/* re-arm ADC interrupt */
-	qpnp_adc_tm_usbid_configure(&mdwc->adc_param);
+	qpnp_adc_tm_usbid_configure(mdwc->adc_tm_dev, &mdwc->adc_param);
 }
 
 static void dwc3_init_adc_work(struct work_struct *w)
@@ -2363,10 +2366,14 @@
 							init_adc_work.work);
 	int ret;
 
-	ret = qpnp_adc_tm_is_ready();
-	if (ret == -EPROBE_DEFER) {
-		queue_delayed_work(system_nrt_wq, to_delayed_work(w),
+	mdwc->adc_tm_dev = qpnp_get_adc_tm(mdwc->dev, "dwc_usb3-adc_tm");
+	if (IS_ERR(mdwc->adc_tm_dev)) {
+		if (PTR_ERR(mdwc->adc_tm_dev) == -EPROBE_DEFER)
+			queue_delayed_work(system_nrt_wq, to_delayed_work(w),
 					msecs_to_jiffies(100));
+		else
+			mdwc->adc_tm_dev = NULL;
+
 		return;
 	}
 
@@ -2377,7 +2384,7 @@
 	mdwc->adc_param.btm_ctx = mdwc;
 	mdwc->adc_param.threshold_notification = dwc3_adc_notification;
 
-	ret = qpnp_adc_tm_usbid_configure(&mdwc->adc_param);
+	ret = qpnp_adc_tm_usbid_configure(mdwc->adc_tm_dev, &mdwc->adc_param);
 	if (ret) {
 		dev_err(mdwc->dev, "%s: request ADC error %d\n", __func__, ret);
 		return;
@@ -2412,7 +2419,7 @@
 			dwc3_init_adc_work(&mdwc->init_adc_work.work);
 		return size;
 	} else if (!strnicmp(buf, "disable", 7)) {
-		qpnp_adc_tm_usbid_end();
+		qpnp_adc_tm_usbid_end(mdwc->adc_tm_dev);
 		mdwc->id_adc_detect = false;
 		return size;
 	}
@@ -3042,7 +3049,7 @@
 	}
 
 	if (mdwc->id_adc_detect)
-		qpnp_adc_tm_usbid_end();
+		qpnp_adc_tm_usbid_end(mdwc->adc_tm_dev);
 	if (dwc3_debugfs_root)
 		debugfs_remove_recursive(dwc3_debugfs_root);
 	if (mdwc->otg_xceiv) {
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 0d4d580..d0d9d34 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -727,7 +727,9 @@
 					work = 1;
 					break;
 				case DWC3_FLOATED_CHARGER:
-					dotg->charger_retry_count++;
+					if (dotg->charger_retry_count <
+							max_chgr_retry_count)
+						dotg->charger_retry_count++;
 					/*
 					 * In case of floating charger, if
 					 * retry count equal to max retry count
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index ac87cbd..94fced0 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -274,8 +274,8 @@
 {
 	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
 
-	if (mfd->ref_cnt > 1)
-		mfd->ref_cnt = 1;
+	for (; mfd->ref_cnt > 1; mfd->ref_cnt--)
+		pm_runtime_put(mfd->fbi->dev);
 
 	mdss_fb_release(mfd->fbi, 0);
 }
@@ -1096,7 +1096,8 @@
 					   mfd->op_enable);
 		if (result) {
 			pm_runtime_put(info->dev);
-			pr_err("mdss_fb_open: can't turn on display!\n");
+			pr_err("can't turn on fb%d! rc=%d\n", mfd->index,
+				result);
 			return result;
 		}
 	}
@@ -1122,8 +1123,7 @@
 		ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
 				       mfd->op_enable);
 		if (ret) {
-			pr_err("can't turn off display attached to fb%d!\n",
-				mfd->index);
+			pr_err("can't turn off fb%d! rc=%d\n", mfd->index, ret);
 			return ret;
 		}
 	}
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 7609cdc..ad134a0 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -193,12 +193,18 @@
 		{20480, 247500} } },
 };
 
-static bool is_cea_format(int mode)
+static bool hdmi_tx_is_cea_format(int mode)
 {
-	if ((mode > 0) && (mode < HDMI_EVFRMT_END))
-		return true;
+	bool cea_fmt;
+
+	if ((mode > 0) && (mode <= HDMI_EVFRMT_END))
+		cea_fmt = true;
 	else
-		return false;
+		cea_fmt = false;
+
+	DEV_DBG("%s: %s\n", __func__, cea_fmt ? "Yes" : "No");
+
+	return cea_fmt;
 }
 
 const char *hdmi_tx_pm_name(enum hdmi_tx_power_module_type module)
@@ -2211,7 +2217,7 @@
 	}
 
 	if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
-	    is_cea_format(hdmi_ctrl->video_resolution)) {
+	    hdmi_tx_is_cea_format(hdmi_ctrl->video_resolution)) {
 		rc = hdmi_tx_audio_setup(hdmi_ctrl);
 		if (rc) {
 			DEV_ERR("%s: hdmi_msm_audio_setup failed. rc=%d\n",
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 6f9c98e..0a41ef8 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -133,7 +133,6 @@
 struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
 
 static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
-static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata);
 static int mdss_mdp_parse_dt(struct platform_device *pdev);
 static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
 static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
@@ -911,18 +910,6 @@
 	}
 }
 
-static void mdss_mdp_shutdown(struct platform_device *pdev)
-{
-	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
-
-	if (!mdata)
-		return;
-
-	pr_debug("display shutdown\n");
-
-	mdss_mdp_suspend_sub(mdata);
-}
-
 static int mdss_mdp_probe(struct platform_device *pdev)
 {
 	struct resource *res;
@@ -1795,7 +1782,7 @@
 	.remove = mdss_mdp_remove,
 	.suspend = mdss_mdp_suspend,
 	.resume = mdss_mdp_resume,
-	.shutdown = mdss_mdp_shutdown,
+	.shutdown = NULL,
 	.driver = {
 		/*
 		 * Driver name must match the device name added in
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 9b81633..6fb8883 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -409,11 +409,9 @@
 	if (ctx->polling_en) {
 		rc = mdss_mdp_video_pollwait(ctl);
 	} else {
-		rc = wait_for_completion_interruptible_timeout(&ctx->vsync_comp,
+		rc = wait_for_completion_timeout(&ctx->vsync_comp,
 				usecs_to_jiffies(VSYNC_TIMEOUT_US));
-		if (rc < 0) {
-			pr_warn("vsync wait interrupted ctl=%d\n", ctl->num);
-		} else if (rc == 0) {
+		if (rc == 0) {
 			pr_warn("vsync wait timeout %d, fallback to poll mode\n",
 					ctl->num);
 			ctx->polling_en++;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 4032b91..c4dee86 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -595,7 +595,8 @@
 	mutex_lock(&mfd->lock);
 	if (pipe->play_cnt == 0) {
 		pr_debug("failed for pipe %d\n", pipe->num);
-		list_del(&pipe->used_list);
+		if (!list_empty(&pipe->used_list))
+			list_del_init(&pipe->used_list);
 		mdss_mdp_pipe_destroy(pipe);
 	}
 
@@ -827,20 +828,17 @@
 			pipe->params_changed++;
 			buf = &pipe->front_buf;
 		} else if (!pipe->params_changed) {
-			if (pipe->mixer) {
-				if (!mdss_mdp_pipe_is_staged(pipe)) {
-					list_del(&pipe->used_list);
-					list_add(&pipe->cleanup_list,
-						 &mdp5_data->pipes_cleanup);
-				}
+			if (pipe->mixer && !mdss_mdp_pipe_is_staged(pipe) &&
+			    !list_empty(&pipe->used_list)) {
+				list_del_init(&pipe->used_list);
+				list_add(&pipe->cleanup_list,
+					&mdp5_data->pipes_cleanup);
 			}
 			continue;
 		} else if (pipe->front_buf.num_planes) {
 			buf = &pipe->front_buf;
 		} else {
-			pr_warn("pipe queue w/o buffer. unstaging layer\n");
-			pipe->params_changed = 0;
-			mdss_mdp_mixer_pipe_unstage(pipe);
+			pr_warn("pipe queue w/o buffer\n");
 			continue;
 		}
 
@@ -894,9 +892,11 @@
 				continue;
 			}
 			mutex_lock(&mfd->lock);
-			list_del(&pipe->used_list);
-			list_add(&pipe->cleanup_list,
-				&mdp5_data->pipes_cleanup);
+			if (!list_empty(&pipe->used_list)) {
+				list_del_init(&pipe->used_list);
+				list_add(&pipe->cleanup_list,
+					&mdp5_data->pipes_cleanup);
+			}
 			mutex_unlock(&mfd->lock);
 			mdss_mdp_mixer_pipe_unstage(pipe);
 			mdss_mdp_pipe_unmap(pipe);
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
index d121695..73016d6 100644
--- a/include/linux/input/synaptics_dsx.h
+++ b/include/linux/input/synaptics_dsx.h
@@ -35,11 +35,8 @@
  * struct synaptics_rmi4_platform_data - rmi4 platform data
  * @x_flip: x flip flag
  * @y_flip: y flip flag
- * @i2c_pull_up: pull up i2c bus with regulator
- * @power_down_enable: enable complete regulator shutdown in suspend
  * @irq_gpio: attention interrupt gpio
  * @irq_flags: flags used by the irq
- * @reset_flags: flags used by reset line
  * @reset_gpio: reset gpio
  * @panel_x: panel maximum values on the x
  * @panel_y: panel maximum values on the y
@@ -50,7 +47,6 @@
 	bool x_flip;
 	bool y_flip;
 	bool i2c_pull_up;
-	bool power_down_enable;
 	unsigned irq_gpio;
 	u32 irq_flags;
 	u32 reset_flags;
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 87047d2..f74fcbe 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -20,7 +20,10 @@
 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
+#define KGSL_CONTEXT_END_OF_FRAME	0x00000100
+
 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
+#define KGSL_CONTEXT_SYNC               0x00000400
 /* bits [12:15] are reserved for future use */
 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
 #define KGSL_CONTEXT_TYPE_SHIFT         20
@@ -283,7 +286,7 @@
 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
 
-/* issue indirect commands to the GPU.
+/* DEPRECATED: issue indirect commands to the GPU.
  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
  * ibaddr and sizedwords must specify a subset of a buffer created
  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
@@ -291,6 +294,9 @@
  * timestamp is a returned counter value which can be passed to
  * other ioctls to determine when the commands have been executed by
  * the GPU.
+ *
+ * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
+ * instead
  */
 struct kgsl_ringbuffer_issueibcmds {
 	unsigned int drawctxt_id;
@@ -805,6 +811,77 @@
 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
 
+/*
+ * struct kgsl_cmd_syncpoint_timestamp
+ * @context_id: ID of a KGSL context
+ * @timestamp: GPU timestamp
+ *
+ * This structure defines a syncpoint comprising a context/timestamp pair. A
+ * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
+ * dependencies that must be met before the command can be submitted to the
+ * hardware
+ */
+struct kgsl_cmd_syncpoint_timestamp {
+	unsigned int context_id;
+	unsigned int timestamp;
+};
+
+#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
+
+struct kgsl_cmd_syncpoint_fence {
+	int fd;
+};
+
+#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
+
+/**
+ * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
+ * @type: type of sync point defined here
+ * @priv: Pointer to the type specific buffer
+ * @size: Size of the type specific buffer
+ *
+ * This structure contains pointers defining a specific command sync point.
+ * The pointer and size should point to a type appropriate structure.
+ */
+struct kgsl_cmd_syncpoint {
+	int type;
+	void __user *priv;
+	unsigned int size;
+};
+
+/**
+ * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
+ * @context_id: KGSL context ID that owns the commands
+ * @flags:
+ * @cmdlist: User pointer to a list of kgsl_ibdesc structures
+ * @numcmds: Number of commands listed in cmdlist
+ * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
+ * @numsyncs: Number of sync points listed in synclist
+ * @timestamp: On entry the a user defined timestamp, on exist the timestamp
+ * assigned to the command batch
+ *
+ * This structure specifies a command to send to the GPU hardware.  This is
+ * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
+ * submit IB lists and it adds sync points to block the IB until the
+ * dependencies are satisified.  This entry point is the new and preferred way
+ * to submit commands to the GPU.
+ */
+
+struct kgsl_submit_commands {
+	unsigned int context_id;
+	unsigned int flags;
+	struct kgsl_ibdesc __user *cmdlist;
+	unsigned int numcmds;
+	struct kgsl_cmd_syncpoint __user *synclist;
+	unsigned int numsyncs;
+	unsigned int timestamp;
+/* private: reserved for future use */
+	unsigned int __pad[4];
+};
+
+#define IOCTL_KGSL_SUBMIT_COMMANDS \
+	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
+
 #ifdef __KERNEL__
 #ifdef CONFIG_MSM_KGSL_DRM
 int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index a9e13d5..4f39eaa 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -143,6 +143,12 @@
 /* Structure device for qpnp vadc */
 struct qpnp_vadc_chip;
 
+/* Structure device for qpnp iadc */
+struct qpnp_iadc_chip;
+
+/* Structure device for qpnp adc tm */
+struct qpnp_adc_tm_chip;
+
 /**
  * enum qpnp_adc_decimation_type - Sampling rate supported.
  * %DECIMATION_TYPE1: 512
@@ -1436,44 +1442,59 @@
 			|| defined(CONFIG_SENSORS_QPNP_ADC_CURRENT_MODULE)
 /**
  * qpnp_iadc_read() - Performs ADC read on the current channel.
+ * @dev:	Structure device for qpnp iadc
  * @channel:	Input channel to perform the ADC read.
  * @result:	Current across rsense in mA.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *dev,
+				enum qpnp_iadc_channels channel,
 				struct qpnp_iadc_result *result);
 /**
  * qpnp_iadc_get_rsense() - Reads the RDS resistance value from the
 			trim registers.
+ * @dev:	Structure device for qpnp iadc
  * @rsense:	RDS resistance in nOhms.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_get_rsense(int32_t *rsense);
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *dev, int32_t *rsense);
 /**
  * qpnp_iadc_get_gain_and_offset() - Performs gain calibration
  *				over 17.8571mV and offset over selected
  *				channel. Channel can be internal rsense,
  *				external rsense and alternate lead pair.
+ * @dev:	Structure device for qpnp iadc
  * @result:	result structure where the gain and offset is stored of
  *		type qpnp_iadc_calib.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_calib *result);
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *dev,
+					struct qpnp_iadc_calib *result);
 /**
- * qpnp_iadc_is_ready() - Clients can use this API to check if the
- *			  device is ready to use.
- * @result:	0 on success and -EPROBE_DEFER when probe for the device
- *		has not occured.
+ * qpnp_get_iadc() - Clients need to register with the iadc with the
+ *		corresponding device instance it wants to read the channels.
+ *		Read the bindings document on how to pass the phandle for
+ *		the corresponding vadc driver to register with.
+ * @dev:	Clients device structure
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the iadc
+ * @struct qpnp_iadc_chip * - On success returns the iadc device structure
+ *		pointer used everytime client makes an ADC request.
  */
-int32_t qpnp_iadc_is_ready(void);
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name);
 /**
  * qpnp_iadc_vadc_sync_read() - Performs synchronous VADC and IADC read.
  *		The api is to be used only by the BMS to perform
  *		simultaneous VADC and IADC measurement for battery voltage
  *		and current.
+ * @dev:	Structure device for qpnp iadc
  * @i_channel:	Input battery current channel to perform the IADC read.
  * @i_result:	Current across the rsense in mA.
  * @v_channel:	Input battery voltage channel to perform VADC read.
  * @v_result:	Voltage on the vbatt channel with units in mV.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_vadc_sync_read(
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *dev,
 	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
 	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result);
 /**
@@ -1481,31 +1502,60 @@
  *		IADC. The offset and gain values are programmed in the trim
  *		registers. The offset and the gain can be retrieved using
  *		qpnp_iadc_get_gain_and_offset
+ * @dev:	Structure device for qpnp iadc
  * @batfet_closed: batfet is opened or closed. The IADC chooses proper
  *			channel (internal/external) based on batfet status
  *			for calibration.
  * RETURNS:	0 on success.
  */
-int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed);
-int32_t qpnp_iadc_comp_result(int64_t *result);
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *dev,
+						bool batfet_closed);
+/**
+ * qpnp_iadc_comp_result() - Compensates the result of the current based on
+ *		the gain and offset co-effients and rsense parameters.
+ * @dev:	Structure device for qpnp iadc
+ * @result:	Current value to perform the compensation.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *dev, int64_t *result);
+/**
+ * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
+ *				to skip iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occured.
+ */
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *dev);
+/**
+ * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
+ *				to resume iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occured.
+ */
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *dev);
 #else
-static inline int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
-						struct qpnp_iadc_result *result)
+static inline int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+	enum qpnp_iadc_channels channel, struct qpnp_iadc_result *result)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_get_rsense(int32_t *rsense)
+static inline int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc,
+							int32_t *rsense)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_calib
-									*result)
+static inline int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+				struct qpnp_iadc_calib *result)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_is_ready(void)
-{ return -ENXIO; }
-static inline int32_t qpnp_iadc_vadc_sync_read(
+static inline struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
 	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
 	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed)
+static inline int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+							bool batfet_closed)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_comp_result(int64_t *result, int32_t sign)
+static inline int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc,
+						int64_t *result, int32_t sign)
 { return -ENXIO; }
 #endif
 
@@ -1522,14 +1572,15 @@
  *		Clients pass the low/high voltage along with the threshold
  *		notification callback.
  */
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_btm_param *param);
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
 /**
  * qpnp_adc_tm_usbid_end() - Disables the monitoring of channel 0 thats
  *		assigned for monitoring USB_ID. Disables the low/high
  *		threshold activation for channel 0 as well.
  * @param:	none.
  */
-int32_t qpnp_adc_tm_usbid_end(void);
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip);
 /**
  * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to
  *		monitor the corresponding ADC channel for threshold detection.
@@ -1540,7 +1591,8 @@
  *		Clients pass the low/high temperature along with the threshold
  *		notification callback.
  */
-int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_btm_param *param);
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
 /**
  * qpnp_adc_tm_disable_chan_meas() - Disables the monitoring of channel thats
  *		assigned for monitoring kernel clients. Disables the low/high
@@ -1549,45 +1601,36 @@
  *		This is used to identify the channel for which the corresponding
  *		channels high/low threshold notification will be disabled.
  */
-int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_btm_param *param);
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
 /**
- * qpnp_adc_tm_is_ready() - Clients can use this API to check if the
- *			  device is ready to use.
- * @result:	0 on success and -EPROBE_DEFER when probe for the device
- *		has not occured.
+ * qpnp_get_adc_tm() - Clients need to register with the adc_tm using the
+ *		corresponding device instance it wants to read the channels
+ *		from. Read the bindings document on how to pass the phandle
+ *		for the corresponding adc_tm driver to register with.
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the vadc
+ * @struct qpnp_adc_tm_chip * - On success returns the vadc device structure
+ *		pointer that needs to be used during an ADC TM request.
  */
-int32_t	qpnp_adc_tm_is_ready(void);
-/**
- * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
- *				to skip iadc calibrations
- * @result:	0 on success and -EPROBE_DEFER when probe for the device
- *		has not occured.
- */
-int qpnp_iadc_skip_calibration(void);
-/**
- * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
- *				to resume iadc calibrations
- * @result:	0 on success and -EPROBE_DEFER when probe for the device
- *		has not occured.
- */
-int qpnp_iadc_resume_calibration(void);
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name);
 #else
 static inline int32_t qpnp_adc_tm_usbid_configure(
+			struct qpnp_adc_tm_chip *chip,
 			struct qpnp_adc_tm_btm_param *param)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_tm_usbid_end(void)
+static inline int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
 { return -ENXIO; }
 static inline int32_t qpnp_adc_tm_channel_measure(
-		struct qpnp_adc_tm_btm_param *param)
+					struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_tm_disable_chan_meas(void)
+static inline int32_t qpnp_adc_tm_disable_chan_meas(
+					struct qpnp_adc_tm_chip *chip)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_tm_is_ready(void)
-{ return -ENXIO; }
-static inline int qpnp_iadc_skip_calibration(void)
-{ return -ENXIO; }
-static inline int qpnp_iadc_resume_calibration(void);
-{ return -ENXIO; }
+static inline struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
 #endif
 
 #endif
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 4ad1ca9..bd4cddf 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -92,7 +92,7 @@
 #define READDONE_IDX_SEQ_ID 10
 
 #define SOFT_PAUSE_PERIOD       30   /* ramp up/down for 30ms    */
-#define SOFT_PAUSE_STEP         2000 /* Step value 2ms or 2000us */
+#define SOFT_PAUSE_STEP         0 /* Step value 0ms or 0us */
 enum {
 	SOFT_PAUSE_CURVE_LINEAR = 0,
 	SOFT_PAUSE_CURVE_EXP,
@@ -100,7 +100,7 @@
 };
 
 #define SOFT_VOLUME_PERIOD       30   /* ramp up/down for 30ms    */
-#define SOFT_VOLUME_STEP         2000 /* Step value 2ms or 2000us */
+#define SOFT_VOLUME_STEP         0 /* Step value 0ms or 0us */
 enum {
 	SOFT_VOLUME_CURVE_LINEAR = 0,
 	SOFT_VOLUME_CURVE_EXP,
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 056e2dc..60dd522 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -379,6 +379,21 @@
 	return false;
 }
 
+static void voc_set_error_state(uint16_t reset_proc)
+{
+	struct voice_data *v = NULL;
+	int i;
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		if (reset_proc == APR_DEST_MODEM  && i == VOC_PATH_FULL)
+			continue;
+
+		v = &common.voice[i];
+		if (v != NULL)
+			v->voc_state = VOC_ERROR;
+	}
+}
+
 static bool is_other_session_active(u32 session_id)
 {
 	int i;
@@ -4347,7 +4362,7 @@
 	mutex_lock(&v->lock);
 
 	if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR ||
-	    v->voc_state == VOC_STANDBY) {
+	    v->voc_state == VOC_CHANGE || v->voc_state == VOC_STANDBY) {
 
 		pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
 
@@ -4357,7 +4372,13 @@
 		voice_destroy_mvm_cvs_session(v);
 
 		v->voc_state = VOC_RELEASE;
+	} else {
+		pr_err("%s: Error: End voice called in state %d\n",
+			__func__, v->voc_state);
+
+		ret = -EINVAL;
 	}
+
 	mutex_unlock(&v->lock);
 	return ret;
 }
@@ -4559,8 +4580,10 @@
 		}
 
 		v->voc_state = VOC_RUN;
-	} else if (v->voc_state == VOC_STANDBY) {
-		pr_err("Error: start voice in Standby\n");
+	} else {
+		pr_err("%s: Error: Start voice called in state %d\n",
+			__func__, v->voc_state);
+
 		ret = -EINVAL;
 		goto fail;
 	}
@@ -4602,7 +4625,6 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
-	uint32_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -4619,25 +4641,6 @@
 		if (data->reset_proc == APR_DEST_MODEM) {
 			pr_debug("%s: Received MODEM reset event\n", __func__);
 
-			session_id = voc_get_session_id(VOICE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOICE2_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(QCHAT_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
 		} else {
 			pr_debug("%s: Reset event received in Voice service\n",
 				__func__);
@@ -4655,7 +4658,7 @@
 		}
 		/* clean up srvcc rec flag */
 		c->srvcc_rec_flag = false;
-
+		voc_set_error_state(data->reset_proc);
 		return 0;
 	}
 
@@ -4753,7 +4756,6 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
-	uint32_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -4770,25 +4772,6 @@
 		if (data->reset_proc == APR_DEST_MODEM) {
 			pr_debug("%s: Received Modem reset event\n", __func__);
 
-			session_id = voc_get_session_id(VOICE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOICE2_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(QCHAT_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
 		} else {
 			pr_debug("%s: Reset event received in Voice service\n",
 				 __func__);
@@ -4800,6 +4783,8 @@
 			for (i = 0; i < MAX_VOC_SESSIONS; i++)
 				c->voice[i].cvs_handle = 0;
 		}
+
+		voc_set_error_state(data->reset_proc);
 		return 0;
 	}
 
@@ -5034,7 +5019,6 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
-	uint32_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -5047,25 +5031,6 @@
 		if (data->reset_proc == APR_DEST_MODEM) {
 			pr_debug("%s: Received Modem reset event\n", __func__);
 
-			session_id = voc_get_session_id(VOICE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOICE2_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(QCHAT_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
 		} else {
 			pr_debug("%s: Reset event received in Voice service\n",
 				 __func__);
@@ -5077,6 +5042,8 @@
 			for (i = 0; i < MAX_VOC_SESSIONS; i++)
 				c->voice[i].cvp_handle = 0;
 		}
+
+		voc_set_error_state(data->reset_proc);
 		return 0;
 	}