Merge "input: sensor: Add power init and enable/disable function for stk3x1x"
diff --git a/Documentation/devicetree/bindings/bif/qpnp-bsi.txt b/Documentation/devicetree/bindings/bif/qpnp-bsi.txt
index 29267dd..0ff10a4 100644
--- a/Documentation/devicetree/bindings/bif/qpnp-bsi.txt
+++ b/Documentation/devicetree/bindings/bif/qpnp-bsi.txt
@@ -49,6 +49,7 @@
 - qcom,vref-microvolts:   Reference voltage used for BCL divider circuit in
                            microvolts.  If no value is specified, then
                            1800000 uV is assumed.
+- qcom,bsi-vadc:	  Corresponding VADC device phandle.
 
 All properties specified within for the BIF framework can also be used. These
 properties can be found in bif.txt.
@@ -86,6 +87,7 @@
 				qcom,channel-num = <0x31>;
 				qcom,pullup-ohms = <100000>;
 				qcom,vref-microvolts = <1800000>;
+				qcom,bsi-vadc = <&pm8941_vadc>;
 			};
 		};
 	};
diff --git a/Documentation/devicetree/bindings/dma/sps/sps.txt b/Documentation/devicetree/bindings/dma/sps/sps.txt
index a979c56..26102c8 100644
--- a/Documentation/devicetree/bindings/dma/sps/sps.txt
+++ b/Documentation/devicetree/bindings/dma/sps/sps.txt
@@ -7,7 +7,11 @@
   - compatible: should be "qcom,msm_sps"
 
 Optional properties:
-  - reg: offset and size of the register set in the memory map
+  - reg: offset and size for the memory mapping, including maps for
+    BAM DMA BAM, BAM DMA peripheral, pipe memory and reserved memory.
+  - reg-names: indicates various resources passed to driver (via reg
+    property) by name. "reg-names" examples are "bam_mem", "core_mem"
+    , "pipe_mem" and "res_mem".
   - interrupts: IRQ line
   - qcom,device-type: specify the device configuration of BAM DMA and
     pipe memory. Can be one of
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
index 418447d..39be71e 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -24,6 +24,12 @@
 Channel node
 NOTE: Atleast one Channel node is required.
 
+Client required property:
+- qcom,<consumer name>-iadc : The phandle to the corresponding iadc device.
+			The consumer name passed to the driver when calling
+			qpnp_get_iadc() is used to associate the client
+			with the corresponding device.
+
 Required properties:
 - label : Channel name used for sysfs entry.
 - reg : AMUX channel number.
@@ -84,6 +90,8 @@
 			6 : 64
 			7 : 128
 			8 : 256
+- qcom,iadc-vadc : Corresponding phandle of the VADC device to read the die_temperature and set
+		simultaneous voltage and current conversion requests.
 
 Example:
 	/* Main Node */
@@ -97,6 +105,7 @@
                         qcom,adc-bit-resolution = <16>;
                         qcom,adc-vdd-reference = <1800>;
 			qcom,rsense = <1500>;
+			qcom,iadc-vadc = <&pm8941_vadc>;
 
 			/* Channel Node */
                         chan@0 = {
@@ -110,3 +119,9 @@
                                 qcom,fast-avg-setup = <0>;
                         };
 	};
+
+Client device example:
+/* Add to the clients node that needs the IADC */
+client_node {
+	qcom,client-iadc = <&pm8941_iadc>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
index 8de8bdd..7f34a8f 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -20,6 +20,12 @@
 Channel nodes
 NOTE: Atleast one Channel node is required.
 
+Client required property:
+- qcom,<consumer name>-vadc : The phandle to the corresponding vadc device.
+			The consumer name passed to the driver when calling
+			qpnp_get_vadc() is used to associate the client
+			with the corresponding device.
+
 Required properties:
 - label : Channel name used for sysfs entry.
 - reg : AMUX channel number.
@@ -113,6 +119,12 @@
                         };
 	};
 
+Client device example:
+/* Add to the clients node that needs the VADC channel A/D */
+client_node {
+	qcom,client-vadc = <&pm8941_vadc>;
+};
+
 /* Clients have an option of measuring an analog signal through an MPP.
    MPP block is not part of the VADC block but is an individual PMIC
    block that has an option to support clients to configure an MPP as
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
index 3720172..0f35e73 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
@@ -20,7 +20,6 @@
  - synaptics,panel-x		: panel x dimension
  - synaptics,panel-y		: panel y dimension
  - synaptics,fw-image-name	: name of firmware .img file in /etc/firmware
- - synaptics,power-down		: fully power down regulators in suspend
 
 Example:
 	i2c@f9927000 { /* BLSP1 QUP5 */
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp.txt b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
index b60760e..9524360 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
@@ -47,6 +47,7 @@
 - qcom,default-state:  default state of the led, should be "on" or "off"
 - qcom,torch-enable: set flash led to torch mode
 - flash_boost-supply: SMBB regulator for LED flash mode
+- torch_boost-supply: SMBB regulator for LED torch mode
 
 RGB Led is a tri-colored led, Red, Blue & Green.
 
@@ -222,6 +223,7 @@
 			compatible = "qcom,leds-qpnp";
 			status = "okay";
 			flash_boost-supply = <&pm8941_chg_boost>;
+			torch_boost-supply = <&pm8941_boost>;
 			qcom,flash_0 {
 				qcom,max-current = <1000>;
 				qcom,default-state = "off";
diff --git a/Documentation/devicetree/bindings/media/video/msm-ispif.txt b/Documentation/devicetree/bindings/media/video/msm-ispif.txt
index ff33b17..dc1187a 100644
--- a/Documentation/devicetree/bindings/media/video/msm-ispif.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-ispif.txt
@@ -4,6 +4,7 @@
 - cell-index: ispif hardware core index
 - compatible :
     - "qcom,ispif"
+    - "qcom,ispif-v3.0"
 - reg : offset and length of the register set for the device
     for the ispif operating in compatible mode.
 - reg-names : should specify relevant names to each reg property defined.
@@ -11,9 +12,13 @@
 - interrupt-names : should specify relevant names to each interrupts
   property defined.
 
+Optional properties:
+- qcom,num-isps: The number of ISPs the ISPIF module is connected to. If not set
+  the default value used is 1
+
 Example:
 
-   qcom,ispif@0xfda0a000 {
+   qcom,ispif@fda0a000 {
        cell-index = <0>;
        compatible = "qcom,ispif";
        reg = <0xfda0a000 0x300>;
@@ -21,3 +26,16 @@
        interrupts = <0 55 0>;
        interrupt-names = "ispif";
    };
+
+or
+
+   qcom,ispif@fda0a000 {
+       cell-index = <0>;
+       compatible = "qcom,ispif-v3.0", "qcom,ispif";
+       reg = <0xfda0a000 0x300>;
+       reg-names = "ispif";
+       interrupts = <0 55 0>;
+       interrupt-names = "ispif";
+       qcom,num-isps = <2>
+   };
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index ac60e38..cc2506d 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -33,15 +33,31 @@
   request by different video encoder usecases.
 - qcom,dec-ddr-ab-ib : list of bus vectors(ab, ib pair) for ddr bandwidth
   request by different video decoder usecases.
-- qcom,iommu-groups : list of IOMMU groups to be used.  Groups are defined as
-  phandles in <target>-iommu-domains.dtsi (e.g msm8974-v1-iommu-domains.dtsi)
-- qcom,iommu-group-buffer-types : bitmap of buffer types that can be mapped into
-  the corresponding IOMMU group. Buffer types are defined within the vidc driver
-  by "enum hal_buffer" in msm_smem.h
 - qcom,buffer-type-tz-usage-table : a key-value pair, mapping a buffer type
   (enum hal_buffer) to its corresponding TZ usage. The TZ usages are defined
   as "enum cp_mem_usage" in include/linux/msm_ion.h
 - qcom,has-ocmem: indicate the target has ocmem if this property exists
+- qcom,vidc-iommu-domains: node containing individual domain nodes, each with:
+	- a unique domain name for the domain node (e.g vidc,domain-ns)
+	- qcom,vidc-domain-phandle: phandle for the domain as defined in
+	  <target>-iommu-domains.dtsi (e.g msm8974-v1-iommu-domains.dtsi)
+	- qcom,vidc-partition-buffer-types: bitmap of buffer types that can
+	  be mapped into each IOMMU domain partition.  There must be exactly
+	  one buffer bitmap per partition in the domain, with order of the
+	  bitmaps to be the same as the order of the respective partitions.
+	- Buffer types are defined as the following:
+	  input = 0x1
+	  output = 0x2
+	  output2 = 0x2
+	  extradata input = 0x4
+	  extradata output = 0x8
+	  extradata output2 = 0x8
+	  internal scratch = 0x10
+	  internal scratch1 = 0x20
+	  internal scratch2 = 0x40
+	  internal persist = 0x80
+	  internal persist1 = 0x100
+	  internal cmd queue = 0x200
 
 Example:
 
@@ -71,9 +87,20 @@
 			<60000 664950>;
 		qcom,dec-ddr-ab-ib = <0 0>,
 			<110000 909000>;
-		qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
-		qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
 		qcom,buffer-type-tz-usage-table = <0x1 0x1>,
 						<0x1fe 0x2>;
 		qcom,max-hw-load = <1224450>; /* 4k @ 30 + 1080p @ 30*/
+		qcom,vidc-iommu-domains {
+			qcom,domain-ns {
+				qcom,vidc-domain-phandle = <&venus_domain_ns>;
+				qcom,vidc-partition-buffer-types = <0x1ff>,
+							<0x200>;
+			};
+
+			qcom,domain-cp {
+				qcom,vidc-domain-phandle = <&venus_domain_cp>;
+				qcom,vidc-partition-buffer-types = <0x2>,
+							<0x1f1>;
+			};
+		};
 	};
diff --git a/Documentation/devicetree/bindings/power/qpnp-bms.txt b/Documentation/devicetree/bindings/power/qpnp-bms.txt
index 0672f14..1162298 100644
--- a/Documentation/devicetree/bindings/power/qpnp-bms.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-bms.txt
@@ -86,6 +86,8 @@
 			for storing the FCC(mAh) in the 8-bit BMS register.
 			For example - A value of 10 indicates:
 			FCC value (in mAh) = (8-bit register value) * 10.
+- qcom,bms-vadc: Corresponding VADC device's phandle.
+- qcom,bms-iadc: Corresponding IADC device's phandle.
 
 Parent node optional properties:
 - qcom,ignore-shutdown-soc: A boolean that controls whether BMS will
@@ -101,6 +103,8 @@
 - qcom,use-ocv-thresholds : A boolean that controls whether BMS will take
 			new OCVs only between the defined thresholds.
 - qcom,enable-fcc-learning: A boolean that defines if FCC learning is enabled.
+- qcom,bms-adc_tm: Corresponding ADC_TM device's phandle to set recurring measurements
+		and receive notifications for die_temperature, vbatt.
 
 
 All sub node required properties:
@@ -148,6 +152,9 @@
 	qcom,hold-soc-est = <3>;
 	qcom,tm-temp-margin = <5000>;
 	qcom,battery-data = <&mtp_batterydata>;
+	qcom,bms-vadc = <&pm8941_vadc>;
+	qcom,bms-iadc = <&pm8941_iadc>;
+	qcom,bms-adc_tm = <&pm8941_adc_tm>;
 
 	qcom,bms-iadc@3800 {
 		reg = <0x3800 0x100>;
diff --git a/Documentation/devicetree/bindings/power/qpnp-charger.txt b/Documentation/devicetree/bindings/power/qpnp-charger.txt
index e3c3555..4a5e58c 100644
--- a/Documentation/devicetree/bindings/power/qpnp-charger.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-charger.txt
@@ -80,6 +80,7 @@
 					for the boost regulator.
 - qcom,resume-soc			Capacity in percent at which charging should resume
 					when a fully charged battery drops below this level.
+- qcom,chg-vadc				Corresponding VADC device's phandle.
 
 Sub node required structure:
 - A qcom,chg node must be a child of an SPMI node that has specified
@@ -88,6 +89,8 @@
 	to the collective charging device. For example USB detection
 	and the battery interface are each seperate peripherals and
 	each should be their own subnode.
+- qcom,chg-adc_tm			Corresponding ADC TM device's phandle to set recurring
+					measurements and receive notification for batt_therm.
 
 Sub node required properties:
 - compatible:		Must be "qcom,qpnp-charger".
@@ -102,6 +105,7 @@
 
 			qcom,usb-chgpth:
 			 - usbin-valid
+			 - usb-ocp (only for SMBBP and SMBCL)
 
 			qcom,chgr:
 			 - chg-done
@@ -148,6 +152,9 @@
 			 - coarse-det-usb:	Coarse detect interrupt triggers
 						at low voltage on USB_IN.
 			 - chg-gone:		Triggers on VCHG line.
+			 - usb-ocp		Triggers on over current conditions when
+						reverse boosting. (Only available on
+						SMBCL and SMBBP devices).
 
 			qcom,dc-chgpth:
 			 - dcin-valid:		Indicates a valid DC charger
@@ -196,6 +203,8 @@
 		qcom,batt-hot-percent = <25>;
 		qcom,batt-cold-percent = <85>;
 		qcom,btc-disabled = <0>;
+		qcom,chg-vadc = <&pm8941_vadc>;
+		qcom,chg-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,chgr@1000 {
 			reg = <0x1000 0x100>;
diff --git a/Documentation/devicetree/bindings/regulator/krait-regulator-pmic.txt b/Documentation/devicetree/bindings/regulator/krait-regulator-pmic.txt
new file mode 100644
index 0000000..a0730f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/krait-regulator-pmic.txt
@@ -0,0 +1,47 @@
+Krait Voltage regulators in the PMIC
+
+In certain MSMs the CPUs are powered using a single supply powered by PMIC ganged regulators
+operating in different phases. The krait-regulator-pmic node represents the gang leader and its
+associated control, power stage and frequency peripherals.
+
+[First Level Nodes]
+Required properties:
+- compatible:			Must be "qcom,krait-regulator-pmic".
+- spmi-dev-container:	        Specifies that all the device nodes specified
+				within this node should have their resources coalesced into a
+				single spmi_device.  This is used to specify all SPMI peripherals
+				that logically make up the gang leader.
+ - #address-cells:		The number of cells dedicated to represent an address
+				This must be set to '1'.
+ - #size-cells:			The number of cells dedicated to represent address
+				space range of a peripheral. This must be set to '1'.
+
+[Second Level Nodes]
+Required properties:
+- reg:             Specifies the SPMI address and size for this peripheral.
+There must be exactly three subnodes qcom,ctl qcom,ps and qcom,freq representing control,
+power stage and frequency SPMI peripherals respectively of the gang leader.
+
+Example:
+		krait_regulator_pmic: qcom,krait-regulator-pmic {
+			spmi-dev-container;
+			compatible = "qcom,krait-regulator-pmic";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			status = "disabled";
+
+			qcom,ctl@2000 {
+				status = "disabled";
+				reg = <0x2000 0x100>;
+			};
+
+			qcom,ps@2100 {
+				status = "disabled";
+				reg = <0x2100 0x100>;
+			};
+
+			qcom,freq@2200 {
+				status = "disabled";
+				reg = <0x2200 0x100>;
+			};
+		};
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
index 3854598..e1681ca 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -27,6 +27,12 @@
 		crosses a set threshold, read temperature and enable/set trip types supported
 		by the thermal framework.
 
+Client required property:
+- qcom,<consumer name>-adc_tm : The phandle to the corresponding adc_tm device.
+			The consumer name passed to the driver when calling
+			qpnp_get_adc_tm() is used to associate the client
+			with the corresponding device.
+
 Channel nodes
 NOTE: Atleast one Channel node is required.
 
@@ -95,6 +101,13 @@
 			8 : 256
 - qcom,btm-channel-number : There are 5 BTM channels. The BTM channel numbers are statically
 			    allocated to the corresponding channel node.
+- qcom,adc_tm-vadc : phandle to the corresponding VADC device to read the ADC channels.
+
+Client device example:
+/* Add to the clients node that needs the ADC_TM channel A/D */
+client_node {
+	qcom,client-adc_tm = <&pm8941_adc_tm>;
+};
 
 Example:
 	/* Main Node */
@@ -111,6 +124,7 @@
 					  "low-thr-en-set";
                         qcom,adc-bit-resolution = <15>;
                         qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pm8941_vadc>;
 
 			/* Channel Node to be registered as part of thermal sysfs */
                         chan@b5 {
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
index 19fbd3a..1c42692 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
@@ -37,6 +37,7 @@
 - qcom,default-temp:   Specifies the default temperature in millicelcius to use
 			if no ADC channel is present to read the real time
 			temperature.
+- qcom,temp_alarm-vadc: Corresponding VADC device's phandle.
 
 Note, if a given optional qcom,* binding is not present, then the default
 hardware state for that feature will be maintained.
@@ -61,6 +62,7 @@
 			label = "pm8941_tz";
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
+			qcom,temp_alarm-vadc = <&pm8941_vadc>;
 		};
 	};
 };
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
index f2707f6..b93dc4d 100644
--- a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -39,7 +39,11 @@
 - hsic,data-pad-offset : Offset of TLMM register for configuring HSIC
   DATA GPIO PAD.
 - qcom,phy-sof-workaround : If present then HSIC PHY has h/w BUGs related to
-  SOFs. Software workarounds are required for the same.
+  SOFs. All the relevant software workarounds are required for the same during
+  suspend, reset and resume.
+- qcom,phy-susp-sof-workaround : If present then HSIC PHY has h/w BUG related to
+  SOFs while entering SUSPEND. Relevant software workaround is required for the same
+  during SUSPEND only.
 - qcom,pool-64-bit-align: If present then the pool's memory will be aligned
   to 64 bits
 - qcom,enable_hbm: if present host bus manager is enabled.
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index ff7b03d..f2a9940 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -49,6 +49,9 @@
   is not supported and attached host should always be assumed as SDP.
 - USB3_GDSC-supply : phandle to the globally distributed switch controller
   regulator node to the USB controller.
+- qcom,dwc_usb3-adc_tm: Corresponding ADC_TM device's phandle to set recurring
+		measurements on USB_ID channel when using ADC and receive
+		notifications for set thresholds.
 
 Sub nodes:
 - Sub node for "DWC3- USB3 controller".
@@ -72,6 +75,7 @@
 		qcom,dwc-usb3-msm-dbm-eps = <4>
 		qcom,vdd-voltage-level = <1 5 7>;
 		qcom,dwc-hsphy-init = <0x00D195A4>;
+		qcom,dwc_usb3-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,msm_bus,name = "usb3";
 		qcom,msm_bus,num_cases = <2>;
diff --git a/arch/arm/boot/dts/apq8026-v1-mtp.dts b/arch/arm/boot/dts/apq8026-v1-mtp.dts
index b89c676..7900ddf 100644
--- a/arch/arm/boot/dts/apq8026-v1-mtp.dts
+++ b/arch/arm/boot/dts/apq8026-v1-mtp.dts
@@ -20,3 +20,10 @@
 	compatible = "qcom,apq8026-mtp", "qcom,apq8026", "qcom,mtp";
 	qcom,msm-id = <199 8 0>;
 };
+
+&cci {
+	/* Rotate rear camera to 0 degrees */
+	qcom,camera@6f {
+	qcom,mount-angle = <0>;
+	};
+};
diff --git a/arch/arm/boot/dts/apq8074-dragonboard.dtsi b/arch/arm/boot/dts/apq8074-dragonboard.dtsi
index 53e9b3b..acd0bb9 100644
--- a/arch/arm/boot/dts/apq8074-dragonboard.dtsi
+++ b/arch/arm/boot/dts/apq8074-dragonboard.dtsi
@@ -90,6 +90,7 @@
 			hsic,ignore-cal-pad-config;
 			hsic,strobe-pad-offset = <0x2050>;
 			hsic,data-pad-offset = <0x2054>;
+			qcom,phy-susp-sof-workaround;
 
 			qcom,msm-bus,name = "hsic";
 			qcom,msm-bus,num-cases = <2>;
diff --git a/arch/arm/boot/dts/apq8074-v2-cdp.dts b/arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
similarity index 92%
rename from arch/arm/boot/dts/apq8074-v2-cdp.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
index 1dc0912..0489b55 100644
--- a/arch/arm/boot/dts/apq8074-v2-cdp.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
 /include/ "msm8974-cdp.dtsi"
 
 / {
-	model = "Qualcomm APQ 8074v2 CDP";
+	model = "Qualcomm APQ 8074v2.0-1 CDP";
 	compatible = "qcom,apq8074-cdp", "qcom,apq8074", "qcom,cdp";
 	qcom,msm-id = <184 1 0x20000>;
 };
diff --git a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts b/arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
similarity index 89%
rename from arch/arm/boot/dts/apq8074-v2-dragonboard.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
index 5a6f5f3..128d8bd 100644
--- a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
 /include/ "apq8074-dragonboard.dtsi"
 
 / {
-	model = "Qualcomm APQ 8074v2 DRAGONBOARD";
+	model = "Qualcomm APQ 8074v2.0-1 DRAGONBOARD";
 	compatible = "qcom,apq8074-dragonboard", "qcom,apq8074", "qcom,dragonboard";
 	qcom,msm-id = <184 10 0x20000>;
 };
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
similarity index 89%
rename from arch/arm/boot/dts/apq8074-v2-liquid.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
index a0ecb50..63c32f3 100644
--- a/arch/arm/boot/dts/apq8074-v2-liquid.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
 /include/ "msm8974-liquid.dtsi"
 
 / {
-	model = "Qualcomm APQ 8074v2 LIQUID";
+	model = "Qualcomm APQ 8074v2.0-1 LIQUID";
 	compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
 	qcom,msm-id = <184 9 0x20000>;
 };
diff --git a/arch/arm/boot/dts/apq8074-v2.dtsi b/arch/arm/boot/dts/apq8074-v2.0-1.dtsi
similarity index 97%
rename from arch/arm/boot/dts/apq8074-v2.dtsi
rename to arch/arm/boot/dts/apq8074-v2.0-1.dtsi
index c700a5c..8314fab 100644
--- a/arch/arm/boot/dts/apq8074-v2.dtsi
+++ b/arch/arm/boot/dts/apq8074-v2.0-1.dtsi
@@ -16,7 +16,7 @@
  * msm8974.dtsi file.
  */
 
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
 
 &soc {
 	qcom,qseecom@a700000 {
diff --git a/arch/arm/boot/dts/apq8074-v2-cdp.dts b/arch/arm/boot/dts/apq8074-v2.2-cdp.dts
similarity index 90%
copy from arch/arm/boot/dts/apq8074-v2-cdp.dts
copy to arch/arm/boot/dts/apq8074-v2.2-cdp.dts
index 1dc0912..a3dc490 100644
--- a/arch/arm/boot/dts/apq8074-v2-cdp.dts
+++ b/arch/arm/boot/dts/apq8074-v2.2-cdp.dts
@@ -12,13 +12,13 @@
 
 /dts-v1/;
 
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.2.dtsi"
 /include/ "msm8974-cdp.dtsi"
 
 / {
-	model = "Qualcomm APQ 8074v2 CDP";
+	model = "Qualcomm APQ 8074v2.2 CDP";
 	compatible = "qcom,apq8074-cdp", "qcom,apq8074", "qcom,cdp";
-	qcom,msm-id = <184 1 0x20000>;
+	qcom,msm-id = <184 1 0x20002>;
 };
 
 &usb3 {
diff --git a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts b/arch/arm/boot/dts/apq8074-v2.2-dragonboard.dts
similarity index 85%
copy from arch/arm/boot/dts/apq8074-v2-dragonboard.dts
copy to arch/arm/boot/dts/apq8074-v2.2-dragonboard.dts
index 5a6f5f3..6989088 100644
--- a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts
+++ b/arch/arm/boot/dts/apq8074-v2.2-dragonboard.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.2.dtsi"
 /include/ "apq8074-dragonboard.dtsi"
 
 / {
-	model = "Qualcomm APQ 8074v2 DRAGONBOARD";
+	model = "Qualcomm APQ 8074v2.2 DRAGONBOARD";
 	compatible = "qcom,apq8074-dragonboard", "qcom,apq8074", "qcom,dragonboard";
-	qcom,msm-id = <184 10 0x20000>;
+	qcom,msm-id = <184 10 0x20002>;
 };
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/arch/arm/boot/dts/apq8074-v2.2-liquid.dts
similarity index 86%
copy from arch/arm/boot/dts/apq8074-v2-liquid.dts
copy to arch/arm/boot/dts/apq8074-v2.2-liquid.dts
index a0ecb50..09d6e66 100644
--- a/arch/arm/boot/dts/apq8074-v2-liquid.dts
+++ b/arch/arm/boot/dts/apq8074-v2.2-liquid.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.2.dtsi"
 /include/ "msm8974-liquid.dtsi"
 
 / {
-	model = "Qualcomm APQ 8074v2 LIQUID";
+	model = "Qualcomm APQ 8074v2.2 LIQUID";
 	compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
-	qcom,msm-id = <184 9 0x20000>;
+	qcom,msm-id = <184 9 0x20002>;
 };
diff --git a/arch/arm/boot/dts/apq8074-v2.dtsi b/arch/arm/boot/dts/apq8074-v2.2.dtsi
similarity index 97%
copy from arch/arm/boot/dts/apq8074-v2.dtsi
copy to arch/arm/boot/dts/apq8074-v2.2.dtsi
index c700a5c..ddf7ec8 100644
--- a/arch/arm/boot/dts/apq8074-v2.dtsi
+++ b/arch/arm/boot/dts/apq8074-v2.2.dtsi
@@ -16,7 +16,7 @@
  * msm8974.dtsi file.
  */
 
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.2.dtsi"
 
 &soc {
 	qcom,qseecom@a700000 {
diff --git a/arch/arm/boot/dts/apq8084-coresight.dtsi b/arch/arm/boot/dts/apq8084-coresight.dtsi
index 1905e3b..6cd238a 100644
--- a/arch/arm/boot/dts/apq8084-coresight.dtsi
+++ b/arch/arm/boot/dts/apq8084-coresight.dtsi
@@ -133,6 +133,70 @@
 		coresight-child-ports = <7>;
 	};
 
+	etm0: etm@fc34c000 {
+		compatible = "arm,coresight-etm";
+		reg = <0xfc34c000 0x1000>;
+		reg-names = "etm-base";
+
+		coresight-id = <10>;
+		coresight-name = "coresight-etm0";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_kpss>;
+		coresight-child-ports = <0>;
+
+		qcom,pc-save;
+		qcom,round-robin;
+	};
+
+	etm1: etm@fc34d000 {
+		compatible = "arm,coresight-etm";
+		reg = <0xfc34d000 0x1000>;
+		reg-names = "etm-base";
+
+		coresight-id = <11>;
+		coresight-name = "coresight-etm1";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_kpss>;
+		coresight-child-ports = <1>;
+
+		qcom,pc-save;
+		qcom,round-robin;
+	};
+
+	etm2: etm@fc34e000 {
+		compatible = "arm,coresight-etm";
+		reg = <0xfc34e000 0x1000>;
+		reg-names = "etm-base";
+
+		coresight-id = <12>;
+		coresight-name = "coresight-etm2";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_kpss>;
+		coresight-child-ports = <2>;
+
+		qcom,pc-save;
+		qcom,round-robin;
+	};
+
+	etm3: etm@fc34f000 {
+		compatible = "arm,coresight-etm";
+		reg = <0xfc34f000 0x1000>;
+		reg-names = "etm-base";
+
+		coresight-id = <13>;
+		coresight-name = "coresight-etm3";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_kpss>;
+		coresight-child-ports = <3>;
+
+		qcom,pc-save;
+		qcom,round-robin;
+	};
+
 	csr: csr@fc301000 {
 		compatible = "qcom,coresight-csr";
 		reg = <0xfc301000 0x1000>;
diff --git a/arch/arm/boot/dts/apq8084-regulator.dtsi b/arch/arm/boot/dts/apq8084-regulator.dtsi
index 998b469..3d5fc7c 100644
--- a/arch/arm/boot/dts/apq8084-regulator.dtsi
+++ b/arch/arm/boot/dts/apq8084-regulator.dtsi
@@ -326,9 +326,9 @@
 };
 
 &rpm_bus {
-	rpm-regulator-smpb1 {
+	rpm-regulator-smpa1 {
 		compatible = "qcom,rpm-regulator-smd-resource";
-		qcom,resource-name = "smpb";
+		qcom,resource-name = "smpa";
 		qcom,resource-id = <1>;
 		qcom,regulator-type = <1>;
 		qcom,hpm-min-load = <100000>;
@@ -342,13 +342,23 @@
 		};
 	};
 
-	rpm-regulator-smpb2 {
+	rpm-regulator-smpa2 {
 		compatible = "qcom,rpm-regulator-smd-resource";
-		qcom,resource-name = "smpb";
+		qcom,resource-name = "smpa";
 		qcom,resource-id = <2>;
 		qcom,regulator-type = <1>;
 		qcom,hpm-min-load = <100000>;
 
+		pma8084_s2_corner: regulator-s2-corner {
+			compatible = "qcom,rpm-regulator-smd";
+			regulator-name = "8084_s2_corner";
+			qcom,set = <3>;
+			regulator-min-microvolt = <1>;
+			regulator-max-microvolt = <7>;
+			qcom,use-voltage-corner;
+			qcom,consumer-supplies = "vdd_dig", "";
+		};
+
 		pma8084_s2_corner_ao: regulator-s2-corner-ao {
 			compatible = "qcom,rpm-regulator-smd";
 			regulator-name = "8084_s2_corner_ao";
diff --git a/arch/arm/boot/dts/apq8084.dtsi b/arch/arm/boot/dts/apq8084.dtsi
index b027f7d..943f2a3 100644
--- a/arch/arm/boot/dts/apq8084.dtsi
+++ b/arch/arm/boot/dts/apq8084.dtsi
@@ -344,6 +344,12 @@
 		qcom,pet-time = <10000>;
 		qcom,ipi-ping;
 	};
+
+	qcom,msm-rng@f9bff000{
+		compatible = "qcom,msm-rng";
+		reg = <0xf9bff000 0x200>;
+		qcom,msm-rng-iface-clk;
+	};
 };
 
 &gdsc_venus {
diff --git a/arch/arm/boot/dts/mpq8092-iommu.dtsi b/arch/arm/boot/dts/mpq8092-iommu.dtsi
index baec2d5..320440c 100644
--- a/arch/arm/boot/dts/mpq8092-iommu.dtsi
+++ b/arch/arm/boot/dts/mpq8092-iommu.dtsi
@@ -27,23 +27,19 @@
 				0x215c
 				0x220c
 				0x22bc
-				0x2008
-				0x200c
-				0x2010>;
+				0x2008>;
 
-	qcom,iommu-bfb-data =  <0x3FFF
+	qcom,iommu-bfb-data =  <0x0F
 				0x4
 				0x4
 				0x0
 				0x0
-				0x10
-				0x50
+				0x4
+				0x14
 				0x0
-				0x2000
-				0x2804
-				0x9614
-				0x0
-				0x0
+				0x800
+				0x800
+				0x3a04
 				0x0
 				0x0>;
 };
@@ -66,21 +62,19 @@
 				0x2008
 				0x200c
 				0x2010
-				0x2014
-				0x2018>;
+				0x2014>;
 
-	qcom,iommu-bfb-data =  <0x7FFFFF
+	qcom,iommu-bfb-data =  <0x3FFFF
 				0x4
-				0x10
+				0x4
 				0x0
-				0x5000
-				0x5a1d
-				0x1822d
+				0x1000
+				0x0e00
+				0x8207
 				0x0
 				0x0
-				0x28
-				0x68
-				0x0
+				0x8
+				0x24
 				0x0
 				0x0
 				0x0
@@ -114,9 +108,9 @@
 				0x4
 				0x8
 				0x0
-				0x13607
-				0x4201
-				0x14221
+				0x13205
+				0x4000
+				0x14020
 				0x0
 				0x0
 				0x94
@@ -184,10 +178,13 @@
 				0x2620
 				0x2624
 				0x2628
-				0x262c>;
+				0x262c
+				0x2630
+				0x2634
+				0x2638>;
 
 	qcom,iommu-bfb-data =  <0x3
-				0x8
+				0x4
 				0x10
 				0x0
 				0x0
@@ -196,26 +193,79 @@
 				0x0
 				0x0
 				0x1
-				0x101
+				0x81
 				0x0
 				0x0
-				0x7
+				0x1f
 				0x4
 				0x8
 				0x14
 				0x0
 				0x0
 				0xc
-				0x6c
+				0x3c
 				0x0
-				0x8
+				0x4
 				0x10
-				0x0>;
+				0x0
+				0x15
+				0x3020100
+				0x04>;
 };
 
 &vpu_iommu {
 	status = "ok";
 
+	qcom,iommu-bfb-regs =  <0x204c
+				0x2050
+				0x2514
+				0x2540
+				0x256c
+				0x2314
+				0x2394
+				0x2414
+				0x2494
+				0x20ac
+				0x215c
+				0x220c
+				0x22bc
+				0x2008
+				0x200c
+				0x2010
+				0x2014
+				0x2018
+				0x201c
+				0x2020
+				0x2024
+				0x2028
+				0x202c
+				0x2030>;
+
+	qcom,iommu-bfb-data =  <0xffffffff
+				0xfffff
+				0x4
+				0x8
+				0x0
+				0x0
+				0x34
+				0x104
+				0x0
+				0x6800
+				0x6800
+				0x18034
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0>;
+
 	interrupts = <0 300 0>;
 	vpu_cb_0: qcom,iommu-ctx@fdeec000 {
 		interrupts = <0 302 0>;
@@ -229,3 +279,7 @@
 		interrupts = <0 302 0>;
 	};
 };
+
+&vcap_iommu {
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/mpq8092-rumi.dts b/arch/arm/boot/dts/mpq8092-rumi.dts
index 1abaf55..3ba2fbc 100644
--- a/arch/arm/boot/dts/mpq8092-rumi.dts
+++ b/arch/arm/boot/dts/mpq8092-rumi.dts
@@ -18,7 +18,7 @@
 / {
 	model = "Qualcomm MPQ8092 RUMI";
 	compatible = "qcom,mpq8092-rumi", "qcom,mpq8092", "qcom,rumi";
-	qcom,msm-id = <146 16 0>;
+	qcom,msm-id = <146 15 0>;
 };
 
 &soc {
diff --git a/arch/arm/boot/dts/mpq8092-rumi.dtsi b/arch/arm/boot/dts/mpq8092-rumi.dtsi
index cc345d8..af49eaf 100644
--- a/arch/arm/boot/dts/mpq8092-rumi.dtsi
+++ b/arch/arm/boot/dts/mpq8092-rumi.dtsi
@@ -10,6 +10,12 @@
  * GNU General Public License for more details.
  */
 
+/ {
+	aliases {
+		spi0 = &spi_0;
+	};
+};
+
 &soc {
 	timer {
 		clock-frequency = <5000000>;
@@ -41,17 +47,28 @@
 		status = "disable";
 	};
 
-	spi@f9923000 {
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9923000 0x1000>;
-		interrupts = <0 95 0>;
-		spi-max-frequency = <24000000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		gpios = <&msmgpio 3 0>, /* CLK  */
-			<&msmgpio 1 0>, /* MISO */
-			<&msmgpio 0 0>; /* MOSI */
-		cs-gpios = <&msmgpio 9 0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xf000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
+		qcom,gpio-mosi = <&msmgpio 0  0>;
+		qcom,gpio-miso = <&msmgpio 1  0>;
+		qcom,gpio-clk  = <&msmgpio 3  0>;
+		qcom,gpio-cs2  = <&msmgpio 11 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 
 		ethernet-switch@2 {
 			compatible = "simtec,ks8851";
diff --git a/arch/arm/boot/dts/mpq8092-sim.dts b/arch/arm/boot/dts/mpq8092-sim.dts
index 676ef3b..2384a45 100644
--- a/arch/arm/boot/dts/mpq8092-sim.dts
+++ b/arch/arm/boot/dts/mpq8092-sim.dts
@@ -17,7 +17,7 @@
 / {
 	model = "Qualcomm MPQ8092 Simulator";
 	compatible = "qcom,mpq8092-sim", "qcom,mpq8092", "qcom,sim";
-	qcom,msm-id = <126 16 0>;
+	qcom,msm-id = <146 16 0>;
 };
 
 &soc {
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index 2f67f3e..9a37316 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -229,6 +229,15 @@
 		qcom,current-limit = <800>;
 	};
 
+	qcom,sps@f9980000 {
+		compatible = "qcom,msm_sps";
+		reg = <0xf9984000 0x15000>,
+		      <0xf9999000 0xb000>;
+		reg-names = "bam_mem", "core_mem";
+		interrupts = <0 94 0>;
+		qcom,pipe-attr-ee;
+	};
+
 	sata: sata@fc580000 {
 		compatible = "qcom,msm-ahci";
 		reg = <0xfc580000 0x17c>;
@@ -308,5 +317,9 @@
 	status = "ok";
 };
 
+&gdsc_vcap {
+	status = "ok";
+};
+
 /include/ "msm-pma8084.dtsi"
 /include/ "mpq8092-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm-gdsc.dtsi b/arch/arm/boot/dts/msm-gdsc.dtsi
index 495f65a..2f732a4 100644
--- a/arch/arm/boot/dts/msm-gdsc.dtsi
+++ b/arch/arm/boot/dts/msm-gdsc.dtsi
@@ -109,4 +109,11 @@
 		reg = <0xfc401ec0 0x4>;
 		status = "disabled";
 	};
+
+	gdsc_vcap: qcom,gdsc@fd8c1804 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vcap";
+		reg = <0xfd8c1804 0x4>;
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/msm-iommu-v1.dtsi b/arch/arm/boot/dts/msm-iommu-v1.dtsi
index ef8677d..cd5adaa 100644
--- a/arch/arm/boot/dts/msm-iommu-v1.dtsi
+++ b/arch/arm/boot/dts/msm-iommu-v1.dtsi
@@ -1056,4 +1056,98 @@
 			label = "lpass_core_cb_2";
 		};
 	};
+
+	vcap_iommu: qcom,iommu@fdfb6000 {
+		compatible = "qcom,msm-smmu-v1";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfdfb6000 0x10000>;
+		reg-names = "iommu_base";
+		interrupts = <0 315 0>;
+		qcom,needs-alt-core-clk;
+		label = "vcap_iommu";
+		status = "disabled";
+		qcom,msm-bus,name = "vcap_ebi";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<48 512 0 0>,
+			<48 512 0 1000>;
+
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <8>;
+		qcom,iommu-pmu-event-classes = <0x00
+						0x01
+						0x08
+						0x09
+						0x0A
+						0x10
+						0x11
+						0x12
+						0x80
+						0x81
+						0x82
+						0x83
+						0x90
+						0x91
+						0x92
+						0xb0
+						0xb1>;
+
+		qcom,iommu-bfb-regs =  <0x204c
+					0x2514
+					0x2540
+					0x256c
+					0x2314
+					0x2394
+					0x2414
+					0x2494
+					0x20ac
+					0x215c
+					0x220c
+					0x22bc
+					0x2008
+					0x200c>;
+
+		qcom,iommu-bfb-data = <0x0ff
+					0x00000004
+					0x00000008
+					0x0
+					0x0
+					0x00000008
+					0x00000028
+					0x0
+					0x001000
+					0x001000
+					0x003008
+					0x0
+					0x0
+					0x0>;
+
+		qcom,iommu-ctx@fdfbe000 {
+			compatible = "qcom,msm-smmu-v1-ctx";
+			reg = <0xfdfbe000 0x1000>;
+			interrupts = <0 313 0>;
+			qcom,iommu-ctx-sids = <0>;
+			label = "vcap_cb0";
+		};
+
+		qcom,iommu-ctx@fdfbf000 {
+			compatible = "qcom,msm-smmu-v1-ctx";
+			reg = <0xfdfbf000 0x1000>;
+			interrupts = <0 313 0>;
+			qcom,iommu-ctx-sids = <1>;
+			label = "vcap_cb1";
+		};
+
+		qcom,iommu-ctx@fdfc0000 {
+			compatible = "qcom,msm-smmu-v1-ctx";
+			reg = <0xfdfc0000 0x1000>;
+			interrupts = <0 313 0>;
+			qcom,iommu-ctx-sids = <>;
+			label = "vcap_cb2";
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index 4f3e461..1d7eaa5 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -59,15 +59,17 @@
 
 			qcom,vddmax-mv = <4200>;
 			qcom,vddsafe-mv = <4230>;
-			qcom,vinmin-mv = <4200>;
+			qcom,vinmin-mv = <4300>;
 			qcom,vbatdet-mv = <4100>;
 			qcom,ibatmax-ma = <1500>;
 			qcom,ibatterm-ma = <100>;
 			qcom,ibatsafe-ma = <1500>;
 			qcom,thermal-mitigation = <1500 700 600 325>;
-			qcom,vbatdet-delta-mv = <350>;
+			qcom,vbatdet-delta-mv = <100>;
 			qcom,resume-soc = <99>;
 			qcom,tchg-mins = <150>;
+			qcom,chg-vadc = <&pm8110_vadc>;
+			qcom,chg-adc_tm = <&pm8110_adc_tm>;
 
 			qcom,chgr@1000 {
 				status = "disabled";
@@ -132,11 +134,13 @@
 				reg = <0x1300 0x100>;
 				interrupts =	<0 0x13 0x0>,
 						<0 0x13 0x1>,
-						<0x0 0x13 0x2>;
+						<0x0 0x13 0x2>,
+						<0x0 0x13 0x3>;
 
 				interrupt-names =	"coarse-det-usb",
 							"usbin-valid",
-							"chg-gone";
+							"chg-gone",
+							"usb-ocp";
 			};
 
 			qcom,chg-misc@1600 {
@@ -249,7 +253,7 @@
 			};
 		};
 
-		iadc@3600 {
+		pm8110_iadc: iadc@3600 {
 			compatible = "qcom,qpnp-iadc";
 			reg = <0x3600 0x100>;
 			#address-cells = <1>;
@@ -258,6 +262,7 @@
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-bit-resolution = <16>;
 			qcom,adc-vdd-reference = <1800>;
+			qcom,iadc-vadc = <&pm8110_vadc>;
 
 			chan@0 {
 				label = "internal_rsense";
@@ -284,6 +289,7 @@
 						"low-thr-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pm8110_vadc>;
 		};
 
 		qcom,temp-alarm@2400 {
@@ -293,6 +299,7 @@
 			label = "pm8110_tz";
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
+			qcom,temp_alarm-vadc = <&pm8110_vadc>;
 		};
 
 		pm8110_bms: qcom,bms {
@@ -320,6 +327,9 @@
 			qcom,low-ocv-correction-limit-uv = <100>;
 			qcom,high-ocv-correction-limit-uv = <50>;
 			qcom,hold-soc-est = <3>;
+			qcom,bms-vadc = <&pm8110_vadc>;
+			qcom,bms-iadc = <&pm8110_iadc>;
+			qcom,bms-adc_tm = <&pm8110_adc_tm>;
 
 			qcom,bms-iadc@3800 {
 				reg = <0x3800 0x100>;
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index d7c2155..307e4c7 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -75,14 +75,16 @@
 
 			qcom,vddmax-mv = <4200>;
 			qcom,vddsafe-mv = <4230>;
-			qcom,vinmin-mv = <4200>;
-			qcom,vbatdet-delta-mv = <150>;
+			qcom,vinmin-mv = <4300>;
+			qcom,vbatdet-delta-mv = <100>;
 			qcom,ibatmax-ma = <1500>;
 			qcom,ibatterm-ma = <100>;
 			qcom,ibatsafe-ma = <1500>;
 			qcom,thermal-mitigation = <1500 700 600 325>;
 			qcom,resume-soc = <99>;
 			qcom,tchg-mins = <150>;
+			qcom,chg-vadc = <&pm8226_vadc>;
+			qcom,chg-adc_tm = <&pm8226_adc_tm>;
 
 			qcom,chgr@1000 {
 				status = "disabled";
@@ -140,19 +142,6 @@
 							"bat-fet-on",
 							"vcp-on",
 							"psi";
-
-			};
-
-			pm8226_chg_otg: qcom,usb-chgpth@1300 {
-				status = "disabled";
-				reg = <0x1300 0x100>;
-				interrupts =	<0 0x13 0x0>,
-						<0 0x13 0x1>,
-						<0x0 0x13 0x2>;
-
-				interrupt-names =	"coarse-det-usb",
-							"usbin-valid",
-							"chg-gone";
 			};
 
 			pm8226_chg_boost: qcom,boost@1500 {
@@ -165,6 +154,21 @@
 							"limit-error";
 			};
 
+
+			pm8226_chg_otg: qcom,usb-chgpth@1300 {
+				status = "disabled";
+				reg = <0x1300 0x100>;
+				interrupts =	<0 0x13 0x0>,
+						<0 0x13 0x1>,
+						<0x0 0x13 0x2>,
+						<0x0 0x13 0x3>;
+
+				interrupt-names =	"coarse-det-usb",
+							"usbin-valid",
+							"chg-gone",
+							"usb-ocp";
+			};
+
 			qcom,chg-misc@1600 {
 				status = "disabled";
 				reg = <0x1600 0x100>;
@@ -196,6 +200,9 @@
 			qcom,high-ocv-correction-limit-uv = <50>;
 			qcom,hold-soc-est = <3>;
 			qcom,low-voltage-threshold = <3420000>;
+			qcom,bms-vadc = <&pm8226_vadc>;
+			qcom,bms-iadc = <&pm8226_iadc>;
+			qcom,bms-adc_tm = <&pm8226_adc_tm>;
 
 			qcom,bms-iadc@3800 {
 				reg = <0x3800 0x100>;
@@ -396,7 +403,7 @@
 			};
 		};
 
-		iadc@3600 {
+		pm8226_iadc: iadc@3600 {
 			compatible = "qcom,qpnp-iadc";
 			reg = <0x3600 0x100>;
 			#address-cells = <1>;
@@ -405,6 +412,7 @@
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-bit-resolution = <16>;
 			qcom,adc-vdd-reference = <1800>;
+			qcom,iadc-vadc = <&pm8226_vadc>;
 
 			chan@0 {
 				label = "internal_rsense";
@@ -431,6 +439,7 @@
 						"low-thr-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pm8226_vadc>;
 		};
 
 		qcom,temp-alarm@2400 {
@@ -440,6 +449,7 @@
 			label = "pm8226_tz";
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
+			qcom,temp_alarm-vadc = <&pm8226_vadc>;
 		};
 
 		qcom,pm8226_rtc {
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 2460377..0c2b02b 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -39,6 +39,7 @@
 		label = "pm8941_tz";
 		qcom,channel-num = <8>;
 		qcom,threshold-set = <0>;
+		qcom,temp_alarm-vadc = <&pm8941_vadc>;
 	};
 
 	qcom,power-on@800 {
@@ -100,6 +101,7 @@
 		qcom,min-clock-period = <1000>;
 		qcom,max-clock-period = <160000>;
 		qcom,sample-rate = <4>;
+		qcom,bsi-vadc = <&pm8941_vadc>;
 	};
 
 	pm8941_coincell: qcom,coincell@2800 {
@@ -132,6 +134,9 @@
 		qcom,low-ocv-correction-limit-uv = <100>;
 		qcom,high-ocv-correction-limit-uv = <50>;
 		qcom,hold-soc-est = <3>;
+		qcom,bms-vadc = <&pm8941_vadc>;
+		qcom,bms-iadc = <&pm8941_iadc>;
+		qcom,bms-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,bms-iadc@3800 {
 			reg = <0x3800 0x100>;
@@ -200,6 +205,8 @@
 		qcom,vbatdet-delta-mv = <100>;
 		qcom,resume-soc = <99>;
 		qcom,tchg-mins = <150>;
+		qcom,chg-vadc = <&pm8941_vadc>;
+		qcom,chg-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,chgr@1000 {
 			status = "disabled";
@@ -806,6 +813,7 @@
 		interrupt-names = "eoc-int-en-set";
 		qcom,adc-bit-resolution = <16>;
 		qcom,adc-vdd-reference = <1800>;
+		qcom,iadc-vadc = <&pm8941_vadc>;
 
 		chan@0 {
 			label = "internal_rsense";
@@ -832,6 +840,7 @@
 					"low-thr-en-set";
 		qcom,adc-bit-resolution = <15>;
 		qcom,adc-vdd-reference = <1800>;
+		qcom,adc_tm-vadc = <&pm8941_vadc>;
 
 		/* Channel Node */
 		chan@b9 {
@@ -1243,6 +1252,7 @@
 		reg = <0xd300 0x100>;
 		label = "flash";
 		flash_boost-supply = <&pm8941_chg_boost>;
+		torch_boost-supply = <&pm8941_boost>;
 	};
 
 	qcom,leds@d400 {
diff --git a/arch/arm/boot/dts/msm-pma8084.dtsi b/arch/arm/boot/dts/msm-pma8084.dtsi
index ecbfc53..808f0f6 100644
--- a/arch/arm/boot/dts/msm-pma8084.dtsi
+++ b/arch/arm/boot/dts/msm-pma8084.dtsi
@@ -77,6 +77,7 @@
 			interrupts = <0x0 0x24 0x0>;
 			label = "pma8084_tz";
 			qcom,threshold-set = <0>;
+			qcom,temp_alarm-vadc = <&pma8084_vadc>;
 		};
 
 		qcom,coincell@2800 {
@@ -322,6 +323,7 @@
 						"low-thr-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pma8084_vadc>;
 		};
 
 		qcom,rtc {
diff --git a/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
index 53860ac..56e8a09 100644
--- a/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
@@ -38,7 +38,7 @@
 		qcom,csid-sd-index = <0>;
 		qcom,actuator-src = <&actuator0>;
 		qcom,led-flash-src = <&led_flash0>;
-		qcom,mount-angle = <0>;
+		qcom,mount-angle = <90>;
 		qcom,sensor-name = "ov8825";
 		cam_vdig-supply = <&pm8226_l5>;
 		cam_vana-supply = <&pm8226_l19>;
diff --git a/arch/arm/boot/dts/msm8226-cdp.dtsi b/arch/arm/boot/dts/msm8226-cdp.dtsi
index d94b41d..104cb4c 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8226-cdp.dtsi
@@ -319,7 +319,7 @@
 				qcom,ctrl-delay-us = <0>;
 				qcom,boost-curr-lim = <3>;
 				qcom,cp-sel = <0>;
-				qcom,switch-freq = <2>;
+				qcom,switch-freq = <11>;
 				qcom,ovp-val = <0>;
 				qcom,num-strings = <1>;
 				qcom,id = <0>;
@@ -416,10 +416,6 @@
 	};
 };
 
-&usb_otg_sw {
-	status = "okay";
-};
-
 &pm8226_vadc {
 	chan@14 {
 		label = "pa_therm0";
diff --git a/arch/arm/boot/dts/msm8226-iommu-domains.dtsi b/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
index 25fca2a..769e4df 100644
--- a/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
+++ b/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
@@ -24,7 +24,8 @@
 		venus_domain_cp: qcom,iommu-domain2 {
 			label = "venus_cp";
 			qcom,iommu-contexts = <&venus_cp>;
-			qcom,virtual-addr-pool = <0x1000000 0x3f000000>;
+			qcom,virtual-addr-pool = <0x1000000 0x1f800000
+						  0x20800000 0x1f800000>;
 			qcom,secure-domain;
 		};
 	};
diff --git a/arch/arm/boot/dts/msm8226-mtp.dtsi b/arch/arm/boot/dts/msm8226-mtp.dtsi
index 825e853..977c772 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-mtp.dtsi
@@ -127,7 +127,7 @@
 	interrupt-names = "core_irq", "async_irq", "pmic_id_irq";
 
 	qcom,hsusb-otg-mode = <3>;
-	vbus_otg-supply = <&usb_otg_sw>;
+	vbus_otg-supply = <&pm8226_chg_otg>;
 };
 
 &sdcc1 {
@@ -327,7 +327,7 @@
 				qcom,ctrl-delay-us = <0>;
 				qcom,boost-curr-lim = <3>;
 				qcom,cp-sel = <0>;
-				qcom,switch-freq = <2>;
+				qcom,switch-freq = <11>;
 				qcom,ovp-val = <0>;
 				qcom,num-strings = <1>;
 				qcom,id = <0>;
@@ -452,10 +452,6 @@
 	qcom,charging-disabled;
 };
 
-&usb_otg_sw {
-	status = "okay";
-};
-
 &slim_msm {
 	tapan_codec {
 		qcom,cdc-micbias1-ext-cap;
diff --git a/arch/arm/boot/dts/msm8226-qrd.dtsi b/arch/arm/boot/dts/msm8226-qrd.dtsi
index 55d8691..a10b499 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dtsi
+++ b/arch/arm/boot/dts/msm8226-qrd.dtsi
@@ -328,7 +328,7 @@
 				qcom,ctrl-delay-us = <0>;
 				qcom,boost-curr-lim = <3>;
 				qcom,cp-sel = <0>;
-				qcom,switch-freq = <2>;
+				qcom,switch-freq = <11>;
 				qcom,ovp-val = <0>;
 				qcom,num-strings = <1>;
 				qcom,id = <0>;
@@ -442,3 +442,7 @@
 		qcom,fast-avg-setup = <0>;
 	};
 };
+
+&android_usb {
+	qcom,android-usb-cdrom;
+};
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index d587b77..5b3da9b 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -482,14 +482,10 @@
 	regulator-name = "8226_smbbp_boost";
 };
 
-&soc {
-	usb_otg_sw: regulator-ncp380 {
-		compatible = "regulator-fixed";
-		regulator-name = "usb_otg_sw";
-		gpio = <&msmgpio 67 0>;
-		parent-supply = <&pm8226_chg_boost>;
-		startup-delay-us = <4000>;
-		enable-active-high;
-		status = "disabled";
-	};
+&pm8226_chg {
+	otg-parent-supply = <&pm8226_chg_boost>;
+};
+
+&pm8226_chg_otg {
+	regulator-name = "8226_smbbp_otg";
 };
diff --git a/arch/arm/boot/dts/msm8226-sim.dts b/arch/arm/boot/dts/msm8226-sim.dts
index 2405646..1da94b3 100644
--- a/arch/arm/boot/dts/msm8226-sim.dts
+++ b/arch/arm/boot/dts/msm8226-sim.dts
@@ -12,7 +12,6 @@
 
 /dts-v1/;
 /include/ "msm8226.dtsi"
-/include/ "msm8226-camera.dtsi"
 
 / {
 	model = "Qualcomm MSM 8226 Simulator";
diff --git a/arch/arm/boot/dts/msm8226-v1-mtp.dts b/arch/arm/boot/dts/msm8226-v1-mtp.dts
index b1d46b1..6f03dca 100644
--- a/arch/arm/boot/dts/msm8226-v1-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-v1-mtp.dts
@@ -23,3 +23,10 @@
 		      <198 8 0>,
 		      <205 8 0>;
 };
+
+&cci {
+	/* Rotate rear camera to 0 degrees */
+	qcom,camera@6f {
+	qcom,mount-angle = <0>;
+	};
+};
diff --git a/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts b/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
index 4cdbada..0a3148b 100644
--- a/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
+++ b/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
@@ -13,7 +13,6 @@
 /dts-v1/;
 /include/ "msm8226-v2.dtsi"
 /include/ "msm8226-qrd.dtsi"
-/include/ "msm8226-camera-sensor-cdp.dtsi"
 /include/ "dsi-panel-hx8394a-720p-video.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts b/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts
index a2ad682..cb6c1af 100644
--- a/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts
+++ b/arch/arm/boot/dts/msm8226-v2-qrd-evt.dts
@@ -13,7 +13,6 @@
 /dts-v1/;
 /include/ "msm8226-v2.dtsi"
 /include/ "msm8226-qrd.dtsi"
-/include/ "msm8226-camera-sensor-cdp.dtsi"
 /include/ "dsi-panel-nt35590-720p-video.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index 1dab78a..2b3b7c2 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -17,7 +17,6 @@
  */
 
 /include/ "msm8226.dtsi"
-/include/ "msm8226-camera.dtsi"
 
 &pm8226_l3 {
 	regulator-min-microvolt = <750000>;
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index b836100..4b78238 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -182,11 +182,23 @@
 			<103000 134000>,
 			<268000 348000>,
 			<505000 657000>;
-		qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
-		qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
 		qcom,buffer-type-tz-usage-table = <0x1 0x1>,
-			<0x1fe 0x2>;
+			<0x2 0x2>,
+			<0x1f0 0x3>;
 		qcom,max-hw-load = <352800>; /* 720p @ 30 + 1080p @ 30 */
+		qcom,vidc-iommu-domains {
+			qcom,domain-ns {
+				qcom,vidc-domain-phandle = <&venus_domain_ns>;
+				qcom,vidc-partition-buffer-types = <0x1ff>,
+							<0x200>;
+			};
+
+			qcom,domain-cp {
+				qcom,vidc-domain-phandle = <&venus_domain_cp>;
+				qcom,vidc-partition-buffer-types = <0x2>,
+							<0x1f1>;
+			};
+		};
 	};
 
 	qcom,wfd {
@@ -275,10 +287,9 @@
 				<87 512 6000  6000>;
 	};
 
-	android_usb@fe8050c8 {
+	android_usb: android_usb@fe8050c8 {
 		compatible = "qcom,android-usb";
 		reg = <0xfe8050c8 0xc8>;
-		qcom,android-usb-cdrom;
 		qcom,android-usb-swfi-latency = <1>;
 	};
 
@@ -1072,16 +1083,17 @@
 		interrupts = <0 95 0>, <0 238 0>;
 		spi-max-frequency = <19200000>;
 
-		gpios = <&msmgpio 3 0>, /* CLK  */
-			<&msmgpio 1 0>, /* MISO */
-			<&msmgpio 0 0>; /* MOSI */
-		cs-gpios = <&msmgpio 22 0>;
+		qcom,gpio-mosi = <&msmgpio 0 0>;
+		qcom,gpio-miso = <&msmgpio 1 0>;
+		qcom,gpio-clk  = <&msmgpio 3 0>;
+		qcom,gpio-cs0  = <&msmgpio 22 0>;
 
 		qcom,infinite-mode = <0>;
 		qcom,use-bam;
 		qcom,ver-reg-exists;
 		qcom,bam-consumer-pipe-index = <12>;
 		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,bam_dmux@fc834000 {
diff --git a/arch/arm/boot/dts/msm8610-cdp.dts b/arch/arm/boot/dts/msm8610-cdp.dts
index b0c0191..f3470c2 100644
--- a/arch/arm/boot/dts/msm8610-cdp.dts
+++ b/arch/arm/boot/dts/msm8610-cdp.dts
@@ -247,6 +247,8 @@
 
 &sdhc_1 {
 	vdd-supply = <&pm8110_l17>;
+	qcom,vdd-always-on;
+	qcom,vdd-lpm-sup;
 	qcom,vdd-voltage-level = <2900000 2900000>;
 	qcom,vdd-current-level = <200 400000>;
 
diff --git a/arch/arm/boot/dts/msm8610-ion.dtsi b/arch/arm/boot/dts/msm8610-ion.dtsi
index d625b95..77cd582 100644
--- a/arch/arm/boot/dts/msm8610-ion.dtsi
+++ b/arch/arm/boot/dts/msm8610-ion.dtsi
@@ -38,14 +38,14 @@
 			compatible = "qcom,msm-ion-reserve";
 			reg = <23>;
 			qcom,heap-align = <0x1000>;
-			qcom,memory-fixed = <0x0bf00000 0x1A00000>;
+			qcom,memory-fixed = <0x0c500000 0x1300000>;
 		};
 
 		qcom,ion-heap@26 { /* MODEM HEAP */
 			compatible = "qcom,msm-ion-reserve";
 			reg = <26>;
 			qcom,heap-align = <0x1000>;
-			qcom,memory-fixed = <0x08000000 0x3F00000>;
+			qcom,memory-fixed = <0x08800000 0x3d00000>;
 		};
 
 	};
diff --git a/arch/arm/boot/dts/msm8610-mtp.dts b/arch/arm/boot/dts/msm8610-mtp.dts
index 221ace4..0d4c174 100644
--- a/arch/arm/boot/dts/msm8610-mtp.dts
+++ b/arch/arm/boot/dts/msm8610-mtp.dts
@@ -285,6 +285,8 @@
 
 &sdhc_1 {
 	vdd-supply = <&pm8110_l17>;
+	qcom,vdd-always-on;
+	qcom,vdd-lpm-sup;
 	qcom,vdd-voltage-level = <2900000 2900000>;
 	qcom,vdd-current-level = <200 400000>;
 
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuab.dts b/arch/arm/boot/dts/msm8610-qrd-skuab.dts
index c435038..947a312 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuab.dts
+++ b/arch/arm/boot/dts/msm8610-qrd-skuab.dts
@@ -76,7 +76,7 @@
 			vdd-supply = <&pm8110_l19>;
 			vio-supply = <&pm8110_l14>;
 			fsl,irq-gpio = <&msmgpio 81 0x00>;
-			fsl,sensors-position = <5>;
+			fsl,sensors-position = <1>;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/msm8610-qrd.dtsi b/arch/arm/boot/dts/msm8610-qrd.dtsi
index ea47d45..bd1705f 100644
--- a/arch/arm/boot/dts/msm8610-qrd.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd.dtsi
@@ -237,6 +237,8 @@
 
 &sdhc_1 {
 	vdd-supply = <&pm8110_l17>;
+	qcom,vdd-always-on;
+	qcom,vdd-lpm-sup;
 	qcom,vdd-voltage-level = <2900000 2900000>;
 	qcom,vdd-current-level = <200 400000>;
 
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 79543b1..00d0d15 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -173,13 +173,17 @@
 	qcom,vidc@fdc00000 {
 		compatible = "qcom,msm-vidc";
 		qcom,vidc-ns-map = <0x40000000 0x40000000>;
-		qcom,iommu-groups = <&q6_domain_ns>;
-		qcom,iommu-group-buffer-types = <0xfff>;
 		qcom,buffer-type-tz-usage-map = <0x1 0x1>,
 						<0x1fe 0x2>;
 		qcom,hfi = "q6";
 		qcom,max-hw-load = <108000>; /* 720p @ 30 * 1 */
-	};
+		qcom,vidc-iommu-domains {
+			qcom,domain-ns {
+				qcom,vidc-domain-phandle = <&q6_domain_ns>;
+				qcom,vidc-partition-buffer-types = <0xfff>;
+			};
+		};
+};
 
 	qcom,usbbam@f9a44000 {
 		compatible = "qcom,usb-bam-msm";
@@ -254,6 +258,8 @@
 		interrupt-names = "core_irq", "bam_irq";
 
 		vdd-supply = <&pm8110_l17>;
+		qcom,vdd-always-on;
+		qcom,vdd-lpm-sup;
 		qcom,vdd-voltage-level = <2900000 2900000>;
 		qcom,vdd-current-level = <9000 400000>;
 
@@ -420,7 +426,7 @@
 
 	qcom,msm-mem-hole {
 		compatible = "qcom,msm-mem-hole";
-		qcom,memblock-remove = <0x07b00000 0x6400000>; /* Address and Size of Hole */
+		qcom,memblock-remove = <0x08800000 0x5600000>; /* Address and Size of Hole */
 	};
 
 	qcom,wdt@f9017000 {
@@ -518,16 +524,17 @@
 		interrupts = <0 98 0>, <0 238 0>;
 		spi-max-frequency = <50000000>;
 
-		gpios = <&msmgpio 89 0>, /* CLK  */
-			<&msmgpio 87 0>, /* MISO */
-			<&msmgpio 86 0>; /* MOSI */
-		cs-gpios = <&msmgpio 88 0>;
+		qcom,gpio-mosi = <&msmgpio 86 0>;
+		qcom,gpio-miso = <&msmgpio 87 0>;
+		qcom,gpio-clk  = <&msmgpio 89 0>;
+		qcom,gpio-cs0  = <&msmgpio 88 0>;
 
 		qcom,infinite-mode = <0>;
 		qcom,use-bam;
 		qcom,ver-reg-exists;
 		qcom,bam-consumer-pipe-index = <18>;
 		qcom,bam-producer-pipe-index = <19>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,pronto@fb21b000 {
@@ -803,9 +810,9 @@
 		interrupts = <0 29 1>;
 	};
 
-	qcom,qseecom@7B00000 {
+	qcom,qseecom@da00000 {
 		compatible = "qcom,qseecom";
-		reg = <0x7B00000 0x500000>;
+		reg = <0xda00000 0x100000>;
 		reg-names = "secapp-region";
 		qcom,disk-encrypt-pipe-pair = <2>;
 		qcom,hlos-ce-hw-instance = <0>;
diff --git a/arch/arm/boot/dts/msm8926-qrd.dts b/arch/arm/boot/dts/msm8926-qrd.dts
index 8497ba2..e056b7e 100644
--- a/arch/arm/boot/dts/msm8926-qrd.dts
+++ b/arch/arm/boot/dts/msm8926-qrd.dts
@@ -13,7 +13,6 @@
 /dts-v1/;
 /include/ "msm8926.dtsi"
 /include/ "msm8226-qrd.dtsi"
-/include/ "msm8226-camera-sensor-cdp.dtsi"
 
 / {
 	model = "Qualcomm MSM 8926 QRD";
diff --git a/arch/arm/boot/dts/msm8926.dtsi b/arch/arm/boot/dts/msm8926.dtsi
index f46b714..013da90a 100644
--- a/arch/arm/boot/dts/msm8926.dtsi
+++ b/arch/arm/boot/dts/msm8926.dtsi
@@ -17,7 +17,6 @@
  */
 
 /include/ "msm8226.dtsi"
-/include/ "msm8226-camera.dtsi"
 
 / {
 	model = "Qualcomm MSM 8926";
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
index 4be2b38..1413e0c 100644
--- a/arch/arm/boot/dts/msm8974-camera.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -94,12 +94,13 @@
 
 	qcom,ispif@fda0A000 {
 		cell-index = <0>;
-		compatible = "qcom,ispif";
+		compatible = "qcom,ispif-v3.0", "qcom,ispif";
 		reg = <0xfda0A000 0x500>,
                       <0xfda00020 0x10>;
 		reg-names = "ispif", "csi_clk_mux";
 		interrupts = <0 55 0>;
 		interrupt-names = "ispif";
+		qcom,num-isps = <0x2>;
 	};
 
 	qcom,vfe@fda10000 {
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index e66ea25..9badf6d 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -234,6 +234,7 @@
 		hsic,ignore-cal-pad-config;
 		hsic,strobe-pad-offset = <0x2050>;
 		hsic,data-pad-offset = <0x2054>;
+		qcom,phy-susp-sof-workaround;
 
 		qcom,msm-bus,name = "hsic";
 		qcom,msm-bus,num-cases = <2>;
@@ -270,11 +271,11 @@
 			qcom,default-state = "on";
 			qcom,max-current = <25>;
 			qcom,ctrl-delay-us = <0>;
-			qcom,boost-curr-lim = <3>;
+			qcom,boost-curr-lim = <5>;
 			qcom,cp-sel = <0>;
-			qcom,switch-freq = <2>;
+			qcom,switch-freq = <11>;
 			qcom,ovp-val = <2>;
-			qcom,num-strings = <1>;
+			qcom,num-strings = <3>;
 			qcom,id = <0>;
 		};
 	};
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index 7f714e8..9c2509a 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -269,11 +269,11 @@
 			qcom,default-state = "on";
 			qcom,max-current = <25>;
 			qcom,ctrl-delay-us = <0>;
-			qcom,boost-curr-lim = <3>;
+			qcom,boost-curr-lim = <5>;
 			qcom,cp-sel = <0>;
-			qcom,switch-freq = <2>;
+			qcom,switch-freq = <11>;
 			qcom,ovp-val = <2>;
-			qcom,num-strings = <1>;
+			qcom,num-strings = <3>;
 			qcom,id = <0>;
 		};
 	};
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 51cb226..f90599a 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -403,6 +403,7 @@
 			hsic,ignore-cal-pad-config;
 			hsic,strobe-pad-offset = <0x2050>;
 			hsic,data-pad-offset = <0x2054>;
+			qcom,phy-susp-sof-workaround;
 
 			qcom,msm-bus,name = "hsic";
 			qcom,msm-bus,num-cases = <2>;
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 4ee56ad..6955597 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -210,11 +210,11 @@
 			qcom,default-state = "on";
 			qcom,max-current = <25>;
 			qcom,ctrl-delay-us = <0>;
-			qcom,boost-curr-lim = <3>;
+			qcom,boost-curr-lim = <5>;
 			qcom,cp-sel = <0>;
-			qcom,switch-freq = <2>;
+			qcom,switch-freq = <11>;
 			qcom,ovp-val = <2>;
-			qcom,num-strings = <1>;
+			qcom,num-strings = <3>;
 			qcom,id = <0>;
 		};
 	};
diff --git a/arch/arm/boot/dts/msm8974-rumi.dtsi b/arch/arm/boot/dts/msm8974-rumi.dtsi
index c01a4e5..152ac4d 100644
--- a/arch/arm/boot/dts/msm8974-rumi.dtsi
+++ b/arch/arm/boot/dts/msm8974-rumi.dtsi
@@ -13,6 +13,12 @@
 /include/ "msm8974-leds.dtsi"
 /include/ "msm8974-camera-sensor-cdp.dtsi"
 
+/ {
+	aliases {
+		spi0 = &spi_0;
+	};
+};
+
 &soc {
 	timer {
 		clock-frequency = <5000000>;
@@ -42,17 +48,28 @@
 		status = "disable";
 	};
 
-	spi@f9923000 {
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9923000 0x1000>;
-		interrupts = <0 95 0>;
-		spi-max-frequency = <24000000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		gpios = <&msmgpio 3 0>, /* CLK  */
-			<&msmgpio 1 0>, /* MISO */
-			<&msmgpio 0 0>; /* MOSI */
-		cs-gpios = <&msmgpio 9 0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xf000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
+		qcom,gpio-mosi = <&msmgpio 0 0>;
+		qcom,gpio-miso = <&msmgpio 1 0>;
+		qcom,gpio-clk  = <&msmgpio 3 0>;
+		qcom,gpio-cs0  = <&msmgpio 9 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 
 		ethernet-switch@2 {
 			compatible = "simtec,ks8851";
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index 7b801da..86a61cd 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -148,3 +148,20 @@
 &usb_otg {
 	qcom,hsusb-otg-pnoc-errata-fix;
 };
+
+&gdsc_venus {
+	qcom,skip-logic-collapse;
+	qcom,retain-periph;
+	qcom,retain-mem;
+};
+
+&gdsc_mdss {
+	qcom,skip-logic-collapse;
+	qcom,retain-periph;
+	qcom,retain-mem;
+};
+
+&gdsc_oxili_gx {
+	qcom,retain-mem;
+	qcom,retain-periph;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-cdp.dts b/arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-cdp.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
index f4014aa..875b3fc 100644
--- a/arch/arm/boot/dts/msm8974-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
 /include/ "msm8974-cdp.dtsi"
 
 / {
-	model = "Qualcomm MSM 8974v2 CDP";
+	model = "Qualcomm MSM 8974v2.0/1 CDP";
 	compatible = "qcom,msm8974-cdp", "qcom,msm8974", "qcom,cdp";
 	qcom,msm-id = <126 1 0x20000>,
 		      <185 1 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-fluid.dts b/arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-fluid.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
index 9c9e3c0..236593d 100644
--- a/arch/arm/boot/dts/msm8974-v2-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
 /include/ "msm8974-fluid.dtsi"
 
 / {
-	model = "Qualcomm MSM 8974v2 FLUID";
+	model = "Qualcomm MSM 8974v2.0/1 FLUID";
 	compatible = "qcom,msm8974-fluid", "qcom,msm8974", "qcom,fluid";
 	qcom,msm-id = <126 3 0x20000>,
 		      <185 3 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-liquid.dts b/arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-liquid.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
index ddae6fe..23292f6 100644
--- a/arch/arm/boot/dts/msm8974-v2-liquid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
 /include/ "msm8974-liquid.dtsi"
 
 / {
-	model = "Qualcomm MSM 8974v2 LIQUID";
+	model = "Qualcomm MSM 8974v2.0/1 LIQUID";
 	compatible = "qcom,msm8974-liquid", "qcom,msm8974", "qcom,liquid";
 	qcom,msm-id = <126 9 0x20000>,
 		      <185 9 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-mtp.dts b/arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
similarity index 91%
rename from arch/arm/boot/dts/msm8974-v2-mtp.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
index 021b626..de9e6a3 100644
--- a/arch/arm/boot/dts/msm8974-v2-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
 /include/ "msm8974-mtp.dtsi"
 
 / {
-	model = "Qualcomm MSM 8974v2 MTP";
+	model = "Qualcomm MSM 8974v2.0/1 MTP";
 	compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
 	qcom,msm-id = <126 8 0x20000>,
 		      <185 8 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2.0-1.dtsi b/arch/arm/boot/dts/msm8974-v2.0-1.dtsi
new file mode 100644
index 0000000..1fad868
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-v2.0-1.dtsi
@@ -0,0 +1,36 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi file.
+ */
+
+/include/ "msm8974-v2.dtsi"
+
+&gdsc_venus {
+	qcom,skip-logic-collapse;
+	qcom,retain-periph;
+	qcom,retain-mem;
+};
+
+&gdsc_mdss {
+	qcom,skip-logic-collapse;
+	qcom,retain-periph;
+	qcom,retain-mem;
+};
+
+&gdsc_oxili_gx {
+	qcom,retain-mem;
+	qcom,retain-periph;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2.2.dtsi b/arch/arm/boot/dts/msm8974-v2.2.dtsi
index 09455b1..0ca021b 100644
--- a/arch/arm/boot/dts/msm8974-v2.2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.2.dtsi
@@ -103,3 +103,8 @@
 		};
 	};
 };
+
+&gdsc_mdss {
+	qcom,retain-periph;
+	qcom,retain-mem;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 0b3b60f..9d5e50b 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -136,12 +136,29 @@
 		<3240000 1600000>,
 		<4048000 1600000>,
 		<4264000 1600000>;
-	qcom,iommu-groups = <&venus_domain_ns &venus_domain_sec_bitstream
-			&venus_domain_sec_pixel &venus_domain_sec_non_pixel>;
-	qcom,iommu-group-buffer-types = <0xfff 0x91 0x42 0x120>;
 	qcom,buffer-type-tz-usage-table = <0x91 0x1>,
 					<0x42 0x2>,
 					<0x120 0x3>;
+	qcom,vidc-iommu-domains {
+                qcom,domain-ns {
+                        qcom,vidc-domain-phandle = <&venus_domain_ns>;
+                        qcom,vidc-partition-buffer-types = <0x1ff>,
+							<0x200>;
+                };
+                qcom,domain-sec-bs {
+                        qcom,vidc-domain-phandle = <&venus_domain_sec_bitstream>;
+                        qcom,vidc-partition-buffer-types = <0x91>;
+                };
+                qcom,domain-sec-px {
+                        qcom,vidc-domain-phandle = <&venus_domain_sec_pixel>;
+                        qcom,vidc-partition-buffer-types = <0x42>;
+                };
+                qcom,domain-sec-np {
+                        qcom,vidc-domain-phandle = <&venus_domain_sec_non_pixel>;
+                        qcom,vidc-partition-buffer-types = <0x120>;
+                };
+        };
+
 };
 
 &krait_pdn {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 59e8dac..c8e3d96 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -580,18 +580,28 @@
 		qcom,bam-dma-res-pipes = <6>;
 	};
 
-	spi_7: spi_epm: spi@f9966000 {
+	spi_7: spi_epm: spi@f9966000 { /* BLSP2 QUP4 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9966000 0x1000>;
-		interrupts = <0 104 0>;
-		spi-max-frequency = <19200000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		qcom,master-id = <84>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9966000 0x1000>,
+		      <0xf9944000 0x15000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 104 0>, <0 239 0>;
+		spi-max-frequency = <19200000>;
+
 		qcom,gpio-mosi = <&msmgpio 53 0>;
 		qcom,gpio-miso = <&msmgpio 54 0>;
 		qcom,gpio-clk  = <&msmgpio 56 0>;
 		qcom,gpio-cs0  = <&msmgpio 55 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <18>;
+		qcom,bam-producer-pipe-index = <19>;
+		qcom,master-id = <84>;
 	};
 
 	tspp: msm_tspp@f99d8000 {
@@ -816,18 +826,28 @@
 		qcom,master-id = <86>;
 	};
 
-	spi_0: spi@f9923000 {
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9923000 0x1000>;
-		interrupts = <0 95 0>;
-		spi-max-frequency = <19200000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		qcom,master-id = <86>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xf000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
 		qcom,gpio-mosi = <&msmgpio 0 0>;
 		qcom,gpio-miso = <&msmgpio 1 0>;
 		qcom,gpio-clk  = <&msmgpio 3 0>;
-		qcom,gpio-cs2  = <&msmgpio 9 0>;
+		qcom,gpio-cs0  = <&msmgpio 9 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,acpuclk@f9000000 {
@@ -877,6 +897,7 @@
 		qcom,vdd-voltage-level = <1 5 7>;
 		qcom,dwc-hsphy-init = <0x00D191A4>;
 		qcom,misc-ref = <&pm8941_misc>;
+		dwc_usb3-adc_tm = <&pm8941_adc_tm>;
 
 		qcom,msm-bus,name = "usb3";
 		qcom,msm-bus,num-cases = <2>;
@@ -1651,17 +1672,11 @@
 
 &gdsc_venus {
 	qcom,clock-names = "core_clk";
-	qcom,skip-logic-collapse;
-	qcom,retain-periph;
-	qcom,retain-mem;
 	status = "ok";
 };
 
 &gdsc_mdss {
 	qcom,clock-names = "core_clk", "lut_clk";
-	qcom,skip-logic-collapse;
-	qcom,retain-periph;
-	qcom,retain-mem;
 	status = "ok";
 };
 
@@ -1678,8 +1693,6 @@
 
 &gdsc_oxili_gx {
 	qcom,clock-names = "core_clk";
-	qcom,retain-mem;
-	qcom,retain-periph;
 	status = "ok";
 };
 
diff --git a/arch/arm/boot/dts/msm8974pro-ac-mtp.dts b/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
index b8df576..9ac9aaf 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
@@ -27,3 +27,103 @@
 &sdhc_1 {
 	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000>;
 };
+
+&pma8084_vadc {
+	chan@73 {
+		label = "msm_therm";
+		reg = <0x73>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@75 {
+		label = "pa_therm0";
+		reg = <0x75>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@77 {
+		label = "pa_therm1";
+		reg = <0x77>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@78 {
+		label = "quiet_therm";
+		reg = <0x78>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pma8084_adc_tm {
+	chan@73 {
+		label = "msm_therm";
+		reg = <0x73>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x48>;
+		qcom,thermal-node;
+	};
+
+	chan@75 {
+		label = "pa_therm0";
+		reg = <0x75>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@77 {
+		label = "pa_therm1";
+		reg = <0x77>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@78 {
+		label = "quiet_therm";
+		reg = <0x78>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi
index 4d10ded..0989c34 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-ac-pm8941.dtsi
@@ -21,14 +21,6 @@
 };
 /include/ "msm-pm8941.dtsi"
 
-&pma8084_vadc {
-	status = "disabled";
-};
-
-&pma8084_adc_tm {
-	status = "disabled";
-};
-
 &pm8941_lsid0 {
 	qcom,power-on@800 {
 		status = "disabled";
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 59d7ba0..0e7baf1 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -328,18 +328,26 @@
 		interrupt-names = "bam_irq";
 	};
 
-	spi_0: spi@f9924000 {
+	spi_0: spi@f9924000 { /* BLSP1 QUP2 */
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9924000 0x1000>;
-		interrupts = <0 96 0>;
-		spi-max-frequency = <25000000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		gpios = <&msmgpio 7 0>, /* CLK  */
-			<&msmgpio 5 0>, /* MISO */
-			<&msmgpio 4 0>; /* MOSI */
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9924000 0x1000>,
+		      <0xf9904000 0x11000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 96 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+		qcom,gpio-mosi = <&msmgpio 4 0>;
+		qcom,gpio-miso = <&msmgpio 5 0>;
+		qcom,gpio-clk  = <&msmgpio 7 0>;
+		qcom,gpio-cs0  = <&msmgpio 6 0>;
 
-		cs-gpios = <&msmgpio 6 0>;
+		qcom,infinite-mode = <0>;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <14>;
+		qcom,bam-producer-pipe-index = <15>;
+		qcom,master-id = <86>;
 
 		ethernet-switch@0 {
 			compatible = "simtec,ks8851";
@@ -736,7 +744,23 @@
 			compatible = "qcom,msm-dai-q6-dev";
 			qcom,msm-dai-q6-dev-id = <240>;
 		};
+
+		qcom,msm-dai-q6-incall-record-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32771>;
+		};
+
+		qcom,msm-dai-q6-incall-record-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32772>;
+		};
+
+		qcom,msm-dai-q6-incall-music-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32773>;
+		};
 	};
+
 	qcom,msm-pcm-dtmf {
 		compatible = "qcom,msm-pcm-dtmf";
 	};
diff --git a/arch/arm/boot/dts/msmkrypton.dtsi b/arch/arm/boot/dts/msmkrypton.dtsi
index cdaf964..f3e9dbb 100644
--- a/arch/arm/boot/dts/msmkrypton.dtsi
+++ b/arch/arm/boot/dts/msmkrypton.dtsi
@@ -17,6 +17,10 @@
 	compatible = "qcom,msmkrypton";
 	interrupt-parent = <&intc>;
 
+	aliases {
+		spi6 = &spi_6;
+	};
+
 	soc: soc { };
 };
 
@@ -140,18 +144,27 @@
 	};
 
 	spi_6: spi@f9928000 { /* BLSP1 QUP6 */
-		cell-index = <0>;
 		compatible = "qcom,spi-qup-v2";
 		#address-cells = <1>;
 		#size-cells = <0>;
-		reg = <0xf9928000 0x1000>;
-		interrupts = <0 100 0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9928000 0x1000>,
+		      <0xf9904000 0x19000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 100 0>, <0 238 0>;
 		spi-max-frequency = <19200000>;
 
-		gpios = <&msmgpio 23 0>, /* CLK  */
-		<&msmgpio 21 0>, /* MISO */
-		<&msmgpio 20 0>; /* MOSI */
-		cs-gpios = <&msmgpio 22 0>;
+		qcom,gpio-mosi = <&msmgpio 20 0>;
+		qcom,gpio-miso = <&msmgpio 21 0>;
+		qcom,gpio-clk  = <&msmgpio 23 0>;
+		qcom,gpio-cs0  = <&msmgpio 22 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <22>;
+		qcom,bam-producer-pipe-index = <23>;
+		qcom,master-id = <86>;
 	};
 
 	qcom,ipc-spinlock@fd484000 {
diff --git a/arch/arm/boot/dts/msmsamarium.dtsi b/arch/arm/boot/dts/msmsamarium.dtsi
index 251bef2..d76c2c1 100644
--- a/arch/arm/boot/dts/msmsamarium.dtsi
+++ b/arch/arm/boot/dts/msmsamarium.dtsi
@@ -42,7 +42,7 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 		reg = <0xfd510000 0x4000>;
-		ngpio = <145>;
+		ngpio = <146>;
 		interrupts = <0 208 0>;
 		qcom,direct-connect-irqs = <8>;
 	};
@@ -89,6 +89,14 @@
 		status = "disabled";
 	};
 
+	qcom,sps@f9980000 {
+		compatible = "qcom,msm_sps";
+		reg = <0xf9984000 0x15000>,
+		      <0xf9999000 0xb000>;
+		reg-names = "bam_mem", "core_mem";
+		interrupts = <0 94 0>;
+	};
+
 	qcom,wdt@f9017000 {
 		compatible = "qcom,msm-watchdog";
 		reg = <0xf9017000 0x1000>;
@@ -234,4 +242,18 @@
 		cell-index = <0>;
 		qcom,not-wakeup;     /* Needed until MPM is fully configured. */
 	};
+
+	tsens: tsens@fc4a8000 {
+		compatible = "qcom,msm-tsens";
+		reg = <0xfc4a8000 0x2000>,
+		      <0xfc4bc000 0x1000>;
+		reg-names = "tsens_physical", "tsens_eeprom_physical";
+		interrupts = <0 184 0>;
+		qcom,sensors = <11>;
+		qcom,slope = <3200 3200 3200 3200 3200 3200 3200 3200 3200
+				3200 3200>;
+		qcom,calib-mode = "fuse_map1";
+	};
 };
+
+/include/ "msm-pma8084.dtsi"
diff --git a/arch/arm/configs/apq8084_defconfig b/arch/arm/configs/apq8084_defconfig
index fef6543..4315d3f 100644
--- a/arch/arm/configs/apq8084_defconfig
+++ b/arch/arm/configs/apq8084_defconfig
@@ -406,4 +406,5 @@
 CONFIG_CORESIGHT_FUNNEL=y
 CONFIG_CORESIGHT_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_ETM=y
 CONFIG_USB_BAM=y
diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
index f49b5f7..31d133a 100644
--- a/arch/arm/configs/msm8226-perf_defconfig
+++ b/arch/arm/configs/msm8226-perf_defconfig
@@ -291,6 +291,7 @@
 CONFIG_OV8825=y
 CONFIG_OV12830=y
 CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_EEPROM=y
 CONFIG_MSM_CPP=y
 CONFIG_MSM_CCI=y
 CONFIG_MSM_CSI30_HEADER=y
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index aced8f2..24ac0d8 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -295,6 +295,7 @@
 CONFIG_OV8825=y
 CONFIG_OV12830=y
 CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_EEPROM=y
 CONFIG_MSM_CPP=y
 CONFIG_MSM_CCI=y
 CONFIG_MSM_CSI30_HEADER=y
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index 0db08db..b9d37b8 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -391,3 +391,4 @@
 CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
 CONFIG_SENSORS_STK3X1X=y
 CONFIG_SENSORS_MMA8X5X=y
+CONFIG_LOGCAT_SIZE=64
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index cfd346e..a5f0704 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -71,7 +71,6 @@
 CONFIG_MSM_RTB_SEPARATE_CPUS=y
 CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
 CONFIG_MSM_BOOT_STATS=y
-CONFIG_MSM_XPU_ERR_FATAL=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_SMP=y
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index ad9dfb2..bb97140 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -68,7 +68,9 @@
 		smp_rmb();
 	} while (epoch_cyc != cd.epoch_cyc_copy);
 
-	return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+	cyc = read_sched_clock();
+	cyc = (cyc - epoch_cyc) & sched_clock_mask;
+	return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
 }
 
 /*
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index b80949c..5c0d2cf 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -284,6 +284,7 @@
 	select MSM_ULTRASOUND_B
 	select MSM_RPM_LOG
 	select ARCH_WANT_KMAP_ATOMIC_FLUSH
+	select KRAIT_REGULATOR
 
 config ARCH_APQ8084
 	bool "APQ8084"
@@ -521,6 +522,7 @@
 	select ARM_GIC
 	select CPU_V7
 	select MSM_SCM
+	select MSM_PIL
 	select MSM_GPIOMUX
 	select MULTI_IRQ_HANDLER
 	select MSM_SPM_V2
@@ -3060,4 +3062,11 @@
 	 Support the wallclk directory in sysfs filesystem to enable the
 	 wall clock simulation and read the current SFN.
 
+config KRAIT_REGULATOR
+	bool "Support Kraits powered via ganged regulators in the pmic"
+	help
+	 Certain MSMs have the Krait CPUs powered via a single supply
+	 line from the PMIC. This supply line is powered by multiple
+	 regulators running in ganged mode inside the PMIC. Enable
+	 this option to support such configurations.
 endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 8c42b8d..e4a647f 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -24,7 +24,7 @@
 
 obj-y += acpuclock.o
 obj-$(CONFIG_HW_PERF_EVENTS) += perf_trace_counters.o
-obj-$(CONFIG_ARCH_MSM_KRAIT) += acpuclock-krait.o
+obj-$(CONFIG_ARCH_MSM_KRAIT) += acpuclock-krait.o clock-krait.o
 ifdef CONFIG_ARCH_MSM_KRAIT
 obj-$(CONFIG_DEBUG_FS) += acpuclock-krait-debug.o
 endif
@@ -296,7 +296,7 @@
 obj-$(CONFIG_ARCH_MSM8610) += gdsc.o
 obj-$(CONFIG_ARCH_MPQ8092) += gdsc.o
 obj-$(CONFIG_ARCH_APQ8084) += gdsc.o
-obj-$(CONFIG_ARCH_MSM8974) += krait-regulator.o
+obj-$(CONFIG_KRAIT_REGULATOR) += krait-regulator.o
 obj-$(CONFIG_ARCH_MSMKRYPTON) += board-krypton.o board-krypton-gpiomux.o
 obj-$(CONFIG_ARCH_MSMSAMARIUM) += board-samarium.o board-samarium-gpiomux.o
 obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 3505afe..9296515 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -53,13 +53,25 @@
         dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v1-mtp.dtb
         dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v1-rumi.dtb
         dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v1-sim.dtb
-        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2-cdp.dtb
-        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2-fluid.dtb
-        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2-liquid.dtb
-        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2-mtp.dtb
-        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2-cdp.dtb
-        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2-liquid.dtb
-        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2-dragonboard.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.0-1-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.0-1-fluid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.0-1-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.0-1-mtp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.0-1-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.0-1-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.0-1-dragonboard.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.2-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.2-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= apq8074-v2.2-dragonboard.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-fluid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974-v2.2-mtp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-fluid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-liquid.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ab-mtp.dtb
+        dtb-$(CONFIG_ARCH_MSM8974)	+= msm8974pro-ac-mtp.dtb
 
 # APQ8084
    zreladdr-$(CONFIG_ARCH_APQ8084)	:= 0x00008000
@@ -85,6 +97,7 @@
 # MSM8226
    zreladdr-$(CONFIG_ARCH_MSM8226)	:= 0x00008000
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-sim.dtb
+        dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-fluid.dtb
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-v1-cdp.dtb
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-v1-mtp.dtb
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-v1-qrd-evt.dtb
@@ -120,6 +133,8 @@
 
 # MSM8610
    zreladdr-$(CONFIG_ARCH_MSM8610)	:= 0x00008000
+        dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-mtp.dtb
         dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-rumi.dtb
         dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-sim.dtb
         dtb-$(CONFIG_ARCH_MSM8610)	+= msm8610-qrd-skuaa.dtb
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 21a940d..1460f94 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -233,6 +233,7 @@
 #define UL_TIMEOUT_DELAY 1000	/* in ms */
 #define ENABLE_DISCONNECT_ACK	0x1
 #define SHUTDOWN_TIMEOUT_MS	500
+#define UL_WAKEUP_TIMEOUT_MS	2000
 static void toggle_apps_ack(void);
 static void reconnect_to_bam(void);
 static void disconnect_to_bam(void);
@@ -1663,7 +1664,8 @@
 	if (wait_for_ack) {
 		BAM_DMUX_LOG("%s waiting for previous ack\n", __func__);
 		ret = wait_for_completion_timeout(
-					&ul_wakeup_ack_completion, HZ);
+					&ul_wakeup_ack_completion,
+					msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
 		wait_for_ack = 0;
 		if (unlikely(ret == 0) && ssrestart_check()) {
 			mutex_unlock(&wakeup_lock);
@@ -1674,14 +1676,16 @@
 	INIT_COMPLETION(ul_wakeup_ack_completion);
 	power_vote(1);
 	BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__);
-	ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
+	ret = wait_for_completion_timeout(&ul_wakeup_ack_completion,
+					msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
 	if (unlikely(ret == 0) && ssrestart_check()) {
 		mutex_unlock(&wakeup_lock);
 		BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__);
 		return;
 	}
 	BAM_DMUX_LOG("%s waiting completion\n", __func__);
-	ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
+	ret = wait_for_completion_timeout(&bam_connection_completion,
+					msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
 	if (unlikely(ret == 0) && ssrestart_check()) {
 		mutex_unlock(&wakeup_lock);
 		BAM_DMUX_LOG("%s timeout power on\n", __func__);
diff --git a/arch/arm/mach-msm/board-8084.c b/arch/arm/mach-msm/board-8084.c
index 8f35a29..de6b50c 100644
--- a/arch/arm/mach-msm/board-8084.c
+++ b/arch/arm/mach-msm/board-8084.c
@@ -30,6 +30,7 @@
 #include <mach/clk-provider.h>
 #include <mach/msm_smem.h>
 #include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
 #include "spm.h"
 #include "board-dt.h"
 #include "clock.h"
@@ -91,6 +92,7 @@
 	msm_init_modem_notifier_list();
 	msm_smd_init();
 	msm_rpm_driver_init();
+	rpm_regulator_smd_driver_init();
 	msm_spm_device_init();
 	msm_clock_init(&msm8084_clock_init_data);
 	tsens_tm_init_driver();
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index a93cfe5..a29bb88 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -630,16 +630,27 @@
 	static struct rpm_regulator_init_data *rpm_data;
 	int i;
 
-	if (machine_is_msm8960_cdp()) {
+	if (machine_is_msm8960_cdp() || cpu_is_msm8960ab()) {
 		/* Only modify LVS6 consumers for CDP targets. */
 		for (i = 0; i < ARRAY_SIZE(msm_rpm_regulator_init_data); i++) {
 			rpm_data = &msm_rpm_regulator_init_data[i];
-			if (rpm_data->id == RPM_VREG_ID_PM8921_LVS6) {
+			if (machine_is_msm8960_cdp() &&
+				rpm_data->id == RPM_VREG_ID_PM8921_LVS6) {
 				rpm_data->init_data.consumer_supplies
 					= vreg_consumers_CDP_LVS6;
 				rpm_data->init_data.num_consumer_supplies
 					= ARRAY_SIZE(vreg_consumers_CDP_LVS6);
 			}
+			if (cpu_is_msm8960ab() &&
+				rpm_data->id == RPM_VREG_ID_PM8921_S7) {
+				rpm_data->init_data.constraints.min_uV =
+								1275000;
+				rpm_data->init_data.constraints.max_uV =
+								1275000;
+				rpm_data->init_data.constraints.input_uV =
+								1275000;
+				rpm_data->default_uV = 1275000;
+			}
 		}
 	}
 }
diff --git a/arch/arm/mach-msm/board-fsm9900-gpiomux.c b/arch/arm/mach-msm/board-fsm9900-gpiomux.c
index dede706..990aefc 100644
--- a/arch/arm/mach-msm/board-fsm9900-gpiomux.c
+++ b/arch/arm/mach-msm/board-fsm9900-gpiomux.c
@@ -17,6 +17,370 @@
 #include <mach/board.h>
 #include <mach/gpiomux.h>
 
+static struct gpiomux_setting blsp_uart_no_pull_config = {
+	.func = GPIOMUX_FUNC_2,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting blsp_uart_pull_up_config = {
+	.func = GPIOMUX_FUNC_2,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting blsp_i2c_config = {
+	.func = GPIOMUX_FUNC_3,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_blsp_configs[] __initdata = {
+	{
+		.gpio      = 0,	       /* BLSP UART1 TX */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_uart_no_pull_config,
+		},
+	},
+	{
+		.gpio      = 1,	       /* BLSP UART1 RX */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_uart_pull_up_config,
+		},
+	},
+	{
+		.gpio      = 2,	       /* BLSP I2C SDA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+		},
+	},
+	{
+		.gpio      = 3,	       /* BLSP I2C SCL */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+		},
+	},
+	{
+		.gpio      = 6,	       /* BLSP I2C SDA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+		},
+	},
+	{
+		.gpio      = 7,	       /* BLSP I2C SCL */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+		},
+	},
+	{
+		.gpio      = 36,       /* BLSP UART10 TX */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_uart_no_pull_config,
+		},
+	},
+	{
+		.gpio      = 37,       /* BLSP UART10 RX */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_uart_pull_up_config,
+		},
+	},
+	{
+		.gpio      = 38,       /* BLSP I2C10 SDA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+		},
+	},
+	{
+		.gpio      = 39,       /* BLSP I2C10 SCL */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+		},
+	},
+
+};
+
+static struct gpiomux_setting geni_func4_config = {
+	.func = GPIOMUX_FUNC_4,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting geni_func5_config = {
+	.func = GPIOMUX_FUNC_5,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct msm_gpiomux_config fsm_geni_configs[] __initdata = {
+	{
+		.gpio      = 8,	       /* GENI7 DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func4_config,
+		},
+	},
+	{
+		.gpio      = 9,	       /* GENI1 DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func4_config,
+		},
+	},
+	{
+		.gpio      = 10,       /* GENI2 DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func4_config,
+		},
+	},
+	{
+		.gpio      = 11,       /* GENI7 CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func4_config,
+		},
+	},
+	{
+		.gpio      = 20,       /* GENI3 DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func5_config,
+		},
+	},
+	{
+		.gpio      = 21,       /* GENI4 DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func5_config,
+		},
+	},
+	{
+		.gpio      = 22,       /* GENI6 DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func5_config,
+		},
+	},
+	{
+		.gpio      = 23,       /* GENI6 CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func5_config,
+		},
+	},
+	{
+		.gpio      = 30,       /* GENI5 DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func4_config,
+		},
+	},
+	{
+		.gpio      = 31,       /* GENI5 CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &geni_func4_config,
+		},
+	},
+
+};
+
+static struct gpiomux_setting dan_spi_func4_config = {
+	.func = GPIOMUX_FUNC_4,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting dan_spi_func1_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_dan_spi_configs[] __initdata = {
+	{
+		.gpio      = 12,       /* BLSP DAN0 SPI_MOSI */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 13,       /* BLSP DAN0 SPI_MISO */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 14,       /* BLSP DAN0 SPI_CS */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 15,       /* BLSP DAN0 SPI_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 16,       /* BLSP DAN1 SPI_MOSI */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 17,       /* BLSP DAN1 SPI_MISO */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 18,       /* BLSP DAN1 SPI_CS */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 19,       /* BLSP DAN1 SPI_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+		},
+	},
+	{
+		.gpio      = 81,       /* BLSP DAN1 SPI_CS0 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func1_config,
+		},
+	},
+	{
+		.gpio      = 82,       /* BLSP DAN1 SPI_CS1 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &dan_spi_func1_config,
+		},
+	},
+};
+
+static struct gpiomux_setting uim_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_4MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_uim_configs[] __initdata = {
+	{
+		.gpio      = 24,       /* UIM_DATA */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &uim_config,
+		},
+	},
+	{
+		.gpio      = 25,       /* UIM_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &uim_config,
+		},
+	},
+	{
+		.gpio      = 26,       /* UIM_RESET */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &uim_config,
+		},
+	},
+	{
+		.gpio      = 27,       /* UIM_PRESENT */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &uim_config,
+		},
+	},
+};
+
+static struct gpiomux_setting pcie_config = {
+	.func = GPIOMUX_FUNC_4,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_pcie_configs[] __initdata = {
+	{
+		.gpio      = 28,       /* BLSP PCIE1_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &pcie_config,
+		},
+	},
+	{
+		.gpio      = 32,       /* BLSP PCIE0_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &pcie_config,
+		},
+	},
+};
+
+static struct gpiomux_setting pps_out_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_4MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting pps_in_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting gps_clk_in_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting gps_nav_tlmm_blank_config = {
+	.func = GPIOMUX_FUNC_2,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+static struct msm_gpiomux_config fsm_gps_configs[] __initdata = {
+	{
+		.gpio      = 40,       /* GPS_PPS_OUT */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &pps_out_config,
+		},
+	},
+	{
+		.gpio      = 41,       /* GPS_PPS_IN */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &pps_in_config,
+		},
+	},
+	{
+		.gpio      = 43,       /* GPS_CLK_IN */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gps_clk_in_config,
+		},
+	},
+	{
+		.gpio      = 120,      /* GPS_NAV_TLMM_BLANK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gps_nav_tlmm_blank_config,
+		},
+	},
+};
+
+static struct gpiomux_setting sd_detect_config = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting sd_wp_config = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_sd_configs[] __initdata = {
+	{
+		.gpio      = 42,       /* SD_CARD_DET */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &sd_detect_config,
+		},
+	},
+	{
+		.gpio      = 122,      /* BLSP SD WRITE PROTECT */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &sd_wp_config,
+		},
+	},
+};
+
 void __init fsm9900_init_gpiomux(void)
 {
 	int rc;
@@ -26,4 +390,13 @@
 		pr_err("%s failed %d\n", __func__, rc);
 		return;
 	}
+
+	msm_gpiomux_install(fsm_blsp_configs, ARRAY_SIZE(fsm_blsp_configs));
+	msm_gpiomux_install(fsm_geni_configs, ARRAY_SIZE(fsm_geni_configs));
+	msm_gpiomux_install(fsm_dan_spi_configs,
+			    ARRAY_SIZE(fsm_dan_spi_configs));
+	msm_gpiomux_install(fsm_uim_configs, ARRAY_SIZE(fsm_uim_configs));
+	msm_gpiomux_install(fsm_pcie_configs, ARRAY_SIZE(fsm_pcie_configs));
+	msm_gpiomux_install(fsm_gps_configs, ARRAY_SIZE(fsm_gps_configs));
+	msm_gpiomux_install(fsm_sd_configs, ARRAY_SIZE(fsm_sd_configs));
 }
diff --git a/arch/arm/mach-msm/board-samarium.c b/arch/arm/mach-msm/board-samarium.c
index 6133983..9f865a9 100644
--- a/arch/arm/mach-msm/board-samarium.c
+++ b/arch/arm/mach-msm/board-samarium.c
@@ -16,6 +16,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/memory.h>
+#include <linux/msm_tsens.h>
 #include <asm/hardware/gic.h>
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
@@ -109,6 +110,7 @@
 	msm_init_modem_notifier_list();
 	msm_smd_init();
 	msm_clock_init(&msm_dummy_clock_init_data);
+	tsens_tm_init_driver();
 }
 
 static void __init msmsamarium_map_io(void)
diff --git a/arch/arm/mach-msm/clock-8084.c b/arch/arm/mach-msm/clock-8084.c
index 4e47064..fd1714c 100644
--- a/arch/arm/mach-msm/clock-8084.c
+++ b/arch/arm/mach-msm/clock-8084.c
@@ -397,6 +397,10 @@
 	CLK_DUMMY("core_clk", qdss_clk.c, "fc355000.funnel", OFF),
 	CLK_DUMMY("core_clk", qdss_clk.c, "fc36c000.funnel", OFF),
 	CLK_DUMMY("core_clk", qdss_clk.c, "fc302000.stm", OFF),
+	CLK_DUMMY("core_clk", qdss_clk.c, "fc34c000.etm", OFF),
+	CLK_DUMMY("core_clk", qdss_clk.c, "fc34d000.etm", OFF),
+	CLK_DUMMY("core_clk", qdss_clk.c, "fc34e000.etm", OFF),
+	CLK_DUMMY("core_clk", qdss_clk.c, "fc34f000.etm", OFF),
 	CLK_DUMMY("core_clk", qdss_clk.c, "fc310000.cti", OFF),
 	CLK_DUMMY("core_clk", qdss_clk.c, "fc311000.cti", OFF),
 	CLK_DUMMY("core_clk", qdss_clk.c, "fc312000.cti", OFF),
@@ -422,6 +426,10 @@
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc355000.funnel", OFF),
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc36c000.funnel", OFF),
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc302000.stm", OFF),
+	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34c000.etm", OFF),
+	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34d000.etm", OFF),
+	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34e000.etm", OFF),
+	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34f000.etm", OFF),
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc310000.cti", OFF),
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc311000.cti", OFF),
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc312000.cti", OFF),
@@ -436,6 +444,9 @@
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc342000.cti", OFF),
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc343000.cti", OFF),
 	CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc344000.cti", OFF),
+
+	CLK_DUMMY("iface_clk", gcc_prng_ahb_clk.c,
+					"f9bff000.qcom,msm-rng", OFF),
 };
 
 struct clock_init_data msm8084_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-8092.c b/arch/arm/mach-msm/clock-8092.c
index ec7a4b0..802cd1b 100644
--- a/arch/arm/mach-msm/clock-8092.c
+++ b/arch/arm/mach-msm/clock-8092.c
@@ -304,6 +304,9 @@
 	CLK_DUMMY("core_clk", NULL, "fdc84000.qcom,iommu", OFF),
 	CLK_DUMMY("iface_clk", NULL, "fdee4000.qcom,iommu", OFF),
 	CLK_DUMMY("core_clk", NULL, "fdee4000.qcom,iommu", OFF),
+	CLK_DUMMY("iface_clk", NULL, "fdfb6000.qcom,iommu", OFF),
+	CLK_DUMMY("core_clk", NULL, "fdfb6000.qcom,iommu", OFF),
+	CLK_DUMMY("alt_core_clk", NULL, "fdfb6000.qcom,iommu", OFF),
 	/* BCSS broadcast */
 	CLK_DUMMY("",	bcc_dem_core_b_clk_src.c,	"", OFF),
 	CLK_DUMMY("",	adc_01_clk_src.c,	"", OFF),
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 0cd5d55..383af54 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -3251,6 +3251,7 @@
 	CLK_LOOKUP("xo",                 cxo_otg_clk.c, "f9a55000.usb"),
 	CLK_LOOKUP("iface_clk",   gcc_usb_hs_ahb_clk.c, "f9a55000.usb"),
 	CLK_LOOKUP("core_clk", gcc_usb_hs_system_clk.c, "f9a55000.usb"),
+	CLK_LOOKUP("sleep_clk", gcc_usb2a_phy_sleep_clk.c, "f9a55000.usb"),
 
 	/* SPS CLOCKS */
 	CLK_LOOKUP("dfab_clk",            pnoc_sps_clk.c, "f9984000.qcom,sps"),
@@ -3309,10 +3310,6 @@
 	CLK_LOOKUP("iface_clk", gcc_sdcc3_ahb_clk.c, "msm_sdcc.3"),
 	CLK_LOOKUP("core_clk", gcc_sdcc3_apps_clk.c, "msm_sdcc.3"),
 
-	CLK_LOOKUP("sleep_a_clk", gcc_usb2a_phy_sleep_clk.c, "msm_dwc3"),
-	CLK_LOOKUP("ref_clk", diff_clk.c, "msm_dwc3"),
-
-
 	CLK_LOOKUP("bus_clk", pnoc_clk.c, ""),
 	CLK_LOOKUP("bus_clk", pnoc_a_clk.c, ""),
 	CLK_LOOKUP("bus_clk", snoc_clk.c, ""),
@@ -3616,6 +3613,13 @@
 	 */
 	clk_prepare_enable(&xo_a_clk.c);
 
+	/*
+	 * Handoff will override the prepare enable count as well as the rate
+	 * Set them again.
+	 */
+	clk_set_rate(&mmssnoc_ahb_a_clk.c, 40000000);
+	clk_prepare_enable(&mmssnoc_ahb_a_clk.c);
+
 	/* Set an initial rate (fmax at nominal) on the MMSSNOC AXI clock */
 	clk_set_rate(&axi_clk_src.c, 200000000);
 
@@ -3693,6 +3697,9 @@
 	if (IS_ERR(vdd_sr2_pll.regulator[1]))
 		panic("clock-8226: Unable to get the vdd_sr2_dig regulator!");
 
+
+	enable_rpm_scaling();
+
 	/*
 	 * Hold an active set vote at a rate of 40MHz for the MMSS NOC AHB
 	 * source. Sleep set vote is 0.
@@ -3700,8 +3707,7 @@
 	 * access mmss clock controller registers.
 	 */
 	clk_set_rate(&mmssnoc_ahb_a_clk.c, 40000000);
-
-	enable_rpm_scaling();
+	clk_prepare_enable(&mmssnoc_ahb_a_clk.c);
 
 	reg_init();
 
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index b1b9397..4fb3f43 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -2749,7 +2749,7 @@
 };
 
 static struct clk_lookup msm_clocks_8610[] = {
-	CLK_LOOKUP("xo",	cxo_otg_clk.c, "msm_otg"),
+	CLK_LOOKUP("xo",	cxo_otg_clk.c, "f9a55000.usb"),
 	CLK_LOOKUP("xo",	cxo_lpass_pil_clk.c, "fe200000.qcom,lpass"),
 	CLK_LOOKUP("xo",        cxo_lpm_clk.c, "fc4281d0.qcom,mpm"),
 
@@ -2924,7 +2924,7 @@
 	CLK_LOOKUP("core_clk",           gcc_sdcc1_apps_clk.c, "msm_sdcc.1"),
 	CLK_LOOKUP("iface_clk",            gcc_sdcc2_ahb_clk.c, "msm_sdcc.2"),
 	CLK_LOOKUP("core_clk",           gcc_sdcc2_apps_clk.c, "msm_sdcc.2"),
-	CLK_LOOKUP("core_clk",      gcc_usb2a_phy_sleep_clk.c, ""),
+	CLK_LOOKUP("sleep_clk",      gcc_usb2a_phy_sleep_clk.c, "f9a55000.usb"),
 	CLK_LOOKUP("iface_clk",           gcc_usb_hs_ahb_clk.c, "f9a55000.usb"),
 	CLK_LOOKUP("core_clk",        gcc_usb_hs_system_clk.c, "f9a55000.usb"),
 
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index a8cb46e..cd6d582 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -718,17 +718,38 @@
 DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a1_pin, cxo_a1_a_pin, A1_ID);
 DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a2_pin, cxo_a2_a_pin, A2_ID);
 
+static unsigned int soft_vote_gpll0;
+
+static struct pll_vote_clk gpll0_ao_clk_src = {
+	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
+	.en_mask = BIT(0),
+	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
+	.status_mask = BIT(17),
+	.soft_vote = &soft_vote_gpll0,
+	.soft_vote_mask = PLL_SOFT_VOTE_ACPU,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.parent = &cxo_a_clk_src.c,
+		.rate = 600000000,
+		.dbg_name = "gpll0_ao_clk_src",
+		.ops = &clk_ops_pll_acpu_vote,
+		CLK_INIT(gpll0_ao_clk_src.c),
+	},
+};
+
 static struct pll_vote_clk gpll0_clk_src = {
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
 	.en_mask = BIT(0),
 	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
 	.status_mask = BIT(17),
+	.soft_vote = &soft_vote_gpll0,
+	.soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
 		.parent = &cxo_clk_src.c,
 		.rate = 600000000,
 		.dbg_name = "gpll0_clk_src",
-		.ops = &clk_ops_pll_vote,
+		.ops = &clk_ops_pll_acpu_vote,
 		CLK_INIT(gpll0_clk_src.c),
 	},
 };
@@ -4895,6 +4916,9 @@
 
 	CLK_LOOKUP("measure",	measure_clk.c,	"debug"),
 
+	CLK_LOOKUP("gpll0", gpll0_clk_src.c, ""),
+	CLK_LOOKUP("gpll0_ao", gpll0_ao_clk_src.c, ""),
+
 	CLK_LOOKUP("dma_bam_pclk", gcc_bam_dma_ahb_clk.c, "msm_sps"),
 	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991f000.serial"),
 	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9924000.i2c"),
@@ -5173,6 +5197,25 @@
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
 		"fda0a000.qcom,ispif"),
 
+	CLK_LOOKUP("vfe0_clk_src", vfe0_clk_src.c, "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_vfe_vfe0_clk", camss_vfe_vfe0_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_csi_vfe0_clk", camss_csi_vfe0_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("vfe1_clk_src", vfe1_clk_src.c, "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_vfe_vfe1_clk", camss_vfe_vfe1_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_csi_vfe1_clk", camss_csi_vfe1_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_clk", camss_csi0_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c,
+			   "fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c,
+			   "fda0a000.qcom,ispif"),
+
 	/*VFE clocks*/
 	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
 					"fda10000.qcom,vfe"),
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
index fc32a59..35917c3 100644
--- a/arch/arm/mach-msm/clock-debug.c
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -232,6 +232,83 @@
 	.release	= seq_release,
 };
 
+#define clock_debug_output(m, c, fmt, ...)		\
+do {							\
+	if (m)						\
+		seq_printf(m, fmt, ##__VA_ARGS__);	\
+	else if (c)					\
+		pr_cont(fmt, ##__VA_ARGS__);		\
+	else						\
+		pr_info(fmt, ##__VA_ARGS__);		\
+} while (0)
+
+static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
+{
+	char *start = "";
+
+	if (!c || !c->prepare_count)
+		return 0;
+
+	clock_debug_output(m, 0, "\t");
+	do {
+		if (c->vdd_class)
+			clock_debug_output(m, 1, "%s%s:%u:%u [%ld, %lu]", start,
+				c->dbg_name, c->prepare_count, c->count,
+				c->rate, c->vdd_class->cur_level);
+		else
+			clock_debug_output(m, 1, "%s%s:%u:%u [%ld]", start,
+				c->dbg_name, c->prepare_count, c->count,
+				c->rate);
+		start = " -> ";
+	} while ((c = clk_get_parent(c)));
+
+	clock_debug_output(m, 1, "\n");
+
+	return 1;
+}
+
+/**
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ *
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *m)
+{
+	struct clk_table *table;
+	unsigned long flags;
+	int i, cnt = 0;
+
+	clock_debug_output(m, 0, "Enabled clocks:\n");
+	spin_lock_irqsave(&clk_list_lock, flags);
+	list_for_each_entry(table, &clk_list, node) {
+		for (i = 0; i < table->num_clocks; i++)
+			cnt += clock_debug_print_clock(table->clocks[i].clk, m);
+	}
+	spin_unlock_irqrestore(&clk_list_lock, flags);
+
+	if (cnt)
+		clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(m, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *m, void *unused)
+{
+	clock_debug_print_enabled_clocks(m);
+	return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations enabled_clocks_fops = {
+	.open		= enabled_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static int list_rates_show(struct seq_file *m, void *unused)
 {
 	struct clk *clock = m->private;
@@ -415,6 +492,10 @@
 		return -ENOMEM;
 	}
 
+	if (!debugfs_create_file("enabled_clocks", S_IRUGO, debugfs_base, NULL,
+				&enabled_clocks_fops))
+		return -ENOMEM;
+
 	measure = clk_get_sys("debug", "measure");
 	if (IS_ERR(measure))
 		measure = NULL;
@@ -461,55 +542,13 @@
 	return ret;
 }
 
-static int clock_debug_print_clock(struct clk *c)
-{
-	char *start = "";
-
-	if (!c || !c->prepare_count)
-		return 0;
-
-	pr_info("\t");
-	do {
-		if (c->vdd_class)
-			pr_cont("%s%s:%u:%u [%ld, %lu]", start, c->dbg_name,
-				c->prepare_count, c->count, c->rate,
-				c->vdd_class->cur_level);
-		else
-			pr_cont("%s%s:%u:%u [%ld]", start, c->dbg_name,
-				c->prepare_count, c->count, c->rate);
-		start = " -> ";
-	} while ((c = clk_get_parent(c)));
-
-	pr_cont("\n");
-
-	return 1;
-}
-
-/**
- * clock_debug_print_enabled() - Print names of enabled clocks for suspend debug
- *
+/*
  * Print the names of enabled clocks and their parents if debug_suspend is set
  */
 void clock_debug_print_enabled(void)
 {
-	struct clk_table *table;
-	unsigned long flags;
-	int i, cnt = 0;
-
 	if (likely(!debug_suspend))
 		return;
 
-	pr_info("Enabled clocks:\n");
-	spin_lock_irqsave(&clk_list_lock, flags);
-	list_for_each_entry(table, &clk_list, node) {
-		for (i = 0; i < table->num_clocks; i++)
-			cnt += clock_debug_print_clock(table->clocks[i].clk);
-	}
-	spin_unlock_irqrestore(&clk_list_lock, flags);
-
-	if (cnt)
-		pr_info("Enabled clock count: %d\n", cnt);
-	else
-		pr_info("No clocks enabled.\n");
-
+	clock_debug_print_enabled_clocks(NULL);
 }
diff --git a/arch/arm/mach-msm/clock-krait.c b/arch/arm/mach-msm/clock-krait.c
new file mode 100644
index 0000000..b96ba62
--- /dev/null
+++ b/arch/arm/mach-msm/clock-krait.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+
+#include <linux/clk.h>
+#include <mach/clk-provider.h>
+#include <mach/clk.h>
+#include <mach/clock-generic.h>
+#include <mach/msm-krait-l2-accessors.h>
+#include "clock-krait.h"
+#include "avs.h"
+
+static DEFINE_SPINLOCK(kpss_clock_reg_lock);
+
+static void __kpss_mux_set_sel(struct mux_clk *mux, int sel)
+{
+	unsigned long flags;
+	u32 regval;
+
+	spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+	regval = get_l2_indirect_reg(mux->offset);
+	regval &= ~(mux->mask << mux->shift);
+	regval |= (sel & mux->mask) << mux->shift;
+	set_l2_indirect_reg(mux->offset, regval);
+	spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+
+	/* Wait for switch to complete. */
+	mb();
+	udelay(1);
+}
+static int kpss_mux_set_sel(struct mux_clk *mux, int sel)
+{
+	mux->en_mask = sel;
+	if (mux->c.count)
+		__kpss_mux_set_sel(mux, sel);
+	return 0;
+}
+
+static int kpss_mux_get_sel(struct mux_clk *mux)
+{
+	u32 sel;
+
+	sel = get_l2_indirect_reg(mux->offset);
+	sel >>= mux->shift;
+	sel &= mux->mask;
+	mux->en_mask = sel;
+
+	return sel;
+}
+
+static int kpss_mux_enable(struct mux_clk *mux)
+{
+	__kpss_mux_set_sel(mux, mux->en_mask);
+	return 0;
+}
+
+static void kpss_mux_disable(struct mux_clk *mux)
+{
+	__kpss_mux_set_sel(mux, mux->safe_sel);
+}
+
+struct clk_mux_ops clk_mux_ops_kpss = {
+	.enable = kpss_mux_enable,
+	.disable = kpss_mux_disable,
+	.set_mux_sel = kpss_mux_set_sel,
+	.get_mux_sel = kpss_mux_get_sel,
+};
+
+/*
+ * The divider can divide by 2, 4, 6 and 8. But we only really need div-2. So
+ * force it to div-2 during handoff and treat it like a fixed div-2 clock.
+ */
+static int kpss_div2_get_div(struct div_clk *div)
+{
+	unsigned long flags;
+	u32 regval;
+	int val;
+
+	spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+	regval = get_l2_indirect_reg(div->offset);
+	val = (regval >> div->shift) && div->mask;
+	regval &= ~(div->mask << div->shift);
+	set_l2_indirect_reg(div->offset, regval);
+	spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+
+	val = (val + 1) * 2;
+	WARN(val != 2, "Divider %s was configured to div-%d instead of 2!\n",
+		div->c.dbg_name, val);
+
+	return 2;
+}
+
+struct clk_div_ops clk_div_ops_kpss_div2 = {
+	.get_div = kpss_div2_get_div,
+};
+
+#define LOCK_BIT	BIT(16)
+
+/* Initialize a HFPLL at a given rate and enable it. */
+static void __hfpll_clk_init_once(struct clk *c)
+{
+	struct hfpll_clk *h = to_hfpll_clk(c);
+	struct hfpll_data const *hd = h->d;
+
+	if (likely(h->init_done))
+		return;
+
+	/* Configure PLL parameters for integer mode. */
+	writel_relaxed(hd->config_val, h->base + hd->config_offset);
+	writel_relaxed(0, h->base + hd->m_offset);
+	writel_relaxed(1, h->base + hd->n_offset);
+
+	if (hd->user_offset) {
+		u32 regval = hd->user_val;
+		unsigned long rate;
+
+		rate = readl_relaxed(h->base + hd->l_offset) * h->src_rate;
+
+		/* Pick the right VCO. */
+		if (rate > hd->low_vco_max_rate)
+			regval |= hd->user_vco_mask;
+		writel_relaxed(regval, h->base + hd->user_offset);
+	}
+
+	if (hd->droop_offset)
+		writel_relaxed(hd->droop_val, h->base + hd->droop_offset);
+
+	h->init_done = true;
+}
+
+/* Enable an already-configured HFPLL. */
+static int hfpll_clk_enable(struct clk *c)
+{
+	struct hfpll_clk *h = to_hfpll_clk(c);
+	struct hfpll_data const *hd = h->d;
+
+	if (!h->base)
+		return -ENODEV;
+
+	__hfpll_clk_init_once(c);
+
+	/* Disable PLL bypass mode. */
+	writel_relaxed(0x2, h->base + hd->mode_offset);
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	writel_relaxed(0x6, h->base + hd->mode_offset);
+
+	/* Wait for PLL to lock. */
+	if (hd->status_offset) {
+		while (!(readl_relaxed(h->base + hd->status_offset) & LOCK_BIT))
+			;
+	} else {
+		mb();
+		udelay(60);
+	}
+
+	/* Enable PLL output. */
+	writel_relaxed(0x7, h->base + hd->mode_offset);
+
+	/* Make sure the enable is done before returning. */
+	mb();
+
+	return 0;
+}
+
+static void hfpll_clk_disable(struct clk *c)
+{
+	struct hfpll_clk *h = to_hfpll_clk(c);
+	struct hfpll_data const *hd = h->d;
+
+	/*
+	 * Disable the PLL output, disable test mode, enable the bypass mode,
+	 * and assert the reset.
+	 */
+	writel_relaxed(0, h->base + hd->mode_offset);
+}
+
+static long hfpll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct hfpll_clk *h = to_hfpll_clk(c);
+	struct hfpll_data const *hd = h->d;
+	unsigned long rrate;
+
+	if (!h->src_rate)
+		return 0;
+
+	rate = max(rate, hd->min_rate);
+	rate = min(rate, hd->max_rate);
+
+	rrate = DIV_ROUND_UP(rate, h->src_rate) * h->src_rate;
+	if (rrate > hd->max_rate)
+		rrate -= h->src_rate;
+
+	return rrate;
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int hfpll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct hfpll_clk *h = to_hfpll_clk(c);
+	struct hfpll_data const *hd = h->d;
+	unsigned long flags;
+	u32 l_val;
+
+	if (!h->base)
+		return -ENODEV;
+
+	if (rate != hfpll_clk_round_rate(c, rate))
+		return -EINVAL;
+
+	l_val = rate / h->src_rate;
+
+	spin_lock_irqsave(&c->lock, flags);
+
+	if (c->count)
+		hfpll_clk_disable(c);
+
+	/* Pick the right VCO. */
+	if (hd->user_offset) {
+		u32 regval;
+		regval = readl_relaxed(h->base + hd->user_offset);
+		if (rate <= hd->low_vco_max_rate)
+			regval &= ~hd->user_vco_mask;
+		else
+			regval |= hd->user_vco_mask;
+		writel_relaxed(regval, h->base  + hd->user_offset);
+	}
+
+	writel_relaxed(l_val, h->base + hd->l_offset);
+
+	if (c->count)
+		hfpll_clk_enable(c);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+
+	return 0;
+}
+
+static enum handoff hfpll_clk_handoff(struct clk *c)
+{
+	struct hfpll_clk *h = to_hfpll_clk(c);
+	struct hfpll_data const *hd = h->d;
+	u32 l_val, mode;
+
+	if (!hd)
+		return HANDOFF_DISABLED_CLK;
+
+	if (!h->base)
+		return HANDOFF_DISABLED_CLK;
+
+	/* Assume parent rate doesn't change and cache it. */
+	h->src_rate = clk_get_rate(c->parent);
+	l_val = readl_relaxed(h->base + hd->l_offset);
+	c->rate = l_val * h->src_rate;
+
+	mode = readl_relaxed(h->base + hd->mode_offset) & 0x7;
+	if (mode != 0x7) {
+		__hfpll_clk_init_once(c);
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if (hd->status_offset &&
+		!(readl_relaxed(h->base + hd->status_offset) & LOCK_BIT)) {
+		WARN(1, "HFPLL %s is ON, but not locked!\n", c->dbg_name);
+		hfpll_clk_disable(c);
+		__hfpll_clk_init_once(c);
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	WARN(c->rate < hd->min_rate || c->rate > hd->max_rate,
+		"HFPLL %s rate %lu outside spec!\n", c->dbg_name, c->rate);
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_hfpll = {
+	.enable = hfpll_clk_enable,
+	.disable = hfpll_clk_disable,
+	.round_rate = hfpll_clk_round_rate,
+	.set_rate = hfpll_clk_set_rate,
+	.handoff = hfpll_clk_handoff,
+};
+
+struct cpu_hwcg_action {
+	bool read;
+	bool enable;
+};
+
+static void cpu_hwcg_rw(void *info)
+{
+	struct cpu_hwcg_action *action = info;
+
+	u32 val;
+	asm volatile ("mrc p15, 7, %[cpmr0], c15, c0, 5\n\t"
+			: [cpmr0]"=r" (val));
+
+	if (action->read) {
+		action->enable = !(val & BIT(0));
+		return;
+	}
+
+	if (action->enable)
+		val &= ~BIT(0);
+	else
+		val |= BIT(0);
+
+	asm volatile ("mcr p15, 7, %[cpmr0], c15, c0, 5\n\t"
+			: : [cpmr0]"r" (val));
+}
+
+static void kpss_cpu_enable_hwcg(struct clk *c)
+{
+	struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+	struct cpu_hwcg_action action = { .enable = true };
+
+	smp_call_function_single(cpu->id, cpu_hwcg_rw, &action, 1);
+}
+
+static void kpss_cpu_disable_hwcg(struct clk *c)
+{
+	struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+	struct cpu_hwcg_action action = { .enable = false };
+
+	smp_call_function_single(cpu->id, cpu_hwcg_rw, &action, 1);
+}
+
+static int kpss_cpu_in_hwcg_mode(struct clk *c)
+{
+	struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+	struct cpu_hwcg_action action = { .read = true };
+
+	smp_call_function_single(cpu->id, cpu_hwcg_rw, &action, 1);
+	return action.enable;
+}
+
+static enum handoff kpss_cpu_handoff(struct clk *c)
+{
+	struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+
+	c->rate = clk_get_rate(c->parent);
+
+	/*
+	 * Don't unnecessarily turn on the parents for an offline CPU and
+	 * then have them turned off at late init.
+	 */
+	return (cpu_online(cpu->id) ?
+		HANDOFF_ENABLED_CLK : HANDOFF_DISABLED_CLK);
+}
+
+u32 find_dscr(struct avs_data *t, unsigned long rate)
+{
+	int i;
+
+	if (!t)
+		return 0;
+
+	for (i = 0; i < t->num; i++) {
+		if (t->rate[i] == rate)
+			return t->dscr[i];
+	}
+
+	return 0;
+}
+
+static int kpss_cpu_pre_set_rate(struct clk *c, unsigned long new_rate)
+{
+	struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+	u32 dscr = find_dscr(cpu->avs_tbl, c->rate);
+
+	if (!c->prepare_count)
+		return -ENODEV;
+
+	if (dscr)
+		AVS_DISABLE(cpu->id);
+	return 0;
+}
+
+static long kpss_core_round_rate(struct clk *c, unsigned long rate)
+{
+	if (c->fmax && c->num_fmax)
+		rate = min(rate, c->fmax[c->num_fmax-1]);
+
+	return clk_round_rate(c->parent, rate);
+}
+
+static int kpss_core_set_rate(struct clk *c, unsigned long rate)
+{
+	if (!c->prepare_count)
+		return -ENODEV;
+
+	return clk_set_rate(c->parent, rate);
+}
+
+static void kpss_cpu_post_set_rate(struct clk *c, unsigned long old_rate)
+{
+	struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+	u32 dscr = find_dscr(cpu->avs_tbl, c->rate);
+
+	/*
+	 * FIXME: If AVS enable/disable needs to be done in the
+	 * enable/disable op to correctly handle power collapse, then might
+	 * need to grab the spinlock here.
+	 */
+	if (dscr)
+		AVS_ENABLE(cpu->id, dscr);
+}
+
+static unsigned long kpss_core_get_rate(struct clk *c)
+{
+	return clk_get_rate(c->parent);
+}
+
+static long kpss_core_list_rate(struct clk *c, unsigned n)
+{
+	if (!c->fmax || c->num_fmax <= n)
+		return -ENXIO;
+
+	return c->fmax[n];
+}
+
+struct clk_ops clk_ops_kpss_cpu = {
+	.enable_hwcg = kpss_cpu_enable_hwcg,
+	.disable_hwcg = kpss_cpu_disable_hwcg,
+	.in_hwcg_mode = kpss_cpu_in_hwcg_mode,
+	.pre_set_rate = kpss_cpu_pre_set_rate,
+	.round_rate = kpss_core_round_rate,
+	.set_rate = kpss_core_set_rate,
+	.post_set_rate = kpss_cpu_post_set_rate,
+	.get_rate = kpss_core_get_rate,
+	.list_rate = kpss_core_list_rate,
+	.handoff = kpss_cpu_handoff,
+};
+
+#define SLPDLY_SHIFT		10
+#define SLPDLY_MASK		0x3
+static void kpss_l2_enable_hwcg(struct clk *c)
+{
+	struct kpss_core_clk *l2 = to_kpss_core_clk(c);
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+	regval = get_l2_indirect_reg(l2->cp15_iaddr);
+	regval &= ~(SLPDLY_MASK << SLPDLY_SHIFT);
+	regval |= l2->l2_slp_delay;
+	set_l2_indirect_reg(l2->cp15_iaddr, regval);
+	spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+}
+
+static void kpss_l2_disable_hwcg(struct clk *c)
+{
+	struct kpss_core_clk *l2 = to_kpss_core_clk(c);
+	u32 regval;
+	unsigned long flags;
+
+	/*
+	 * NOTE: Should not be called when HW clock gating is already
+	 * disabled.
+	 */
+	spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+	regval = get_l2_indirect_reg(l2->cp15_iaddr);
+	l2->l2_slp_delay = regval & (SLPDLY_MASK << SLPDLY_SHIFT);
+	regval |= (SLPDLY_MASK << SLPDLY_SHIFT);
+	set_l2_indirect_reg(l2->cp15_iaddr, regval);
+	spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+}
+
+static int kpss_l2_in_hwcg_mode(struct clk *c)
+{
+	struct kpss_core_clk *l2 = to_kpss_core_clk(c);
+	u32 regval;
+
+	regval = get_l2_indirect_reg(l2->cp15_iaddr);
+	regval >>= SLPDLY_SHIFT;
+	regval &= SLPDLY_MASK;
+	return (regval != SLPDLY_MASK);
+}
+
+static enum handoff kpss_l2_handoff(struct clk *c)
+{
+	c->rate = clk_get_rate(c->parent);
+	return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_kpss_l2 = {
+	.enable_hwcg = kpss_l2_enable_hwcg,
+	.disable_hwcg = kpss_l2_disable_hwcg,
+	.in_hwcg_mode = kpss_l2_in_hwcg_mode,
+	.round_rate = kpss_core_round_rate,
+	.set_rate = kpss_core_set_rate,
+	.get_rate = kpss_core_get_rate,
+	.list_rate = kpss_core_list_rate,
+	.handoff = kpss_l2_handoff,
+};
diff --git a/arch/arm/mach-msm/clock-krait.h b/arch/arm/mach-msm/clock-krait.h
new file mode 100644
index 0000000..2691a8c
--- /dev/null
+++ b/arch/arm/mach-msm/clock-krait.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_KRAIT_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_KRAIT_H
+
+#include <mach/clk-provider.h>
+#include <mach/clock-generic.h>
+
+extern struct clk_mux_ops clk_mux_ops_kpss;
+extern struct clk_div_ops clk_div_ops_kpss_div2;
+
+#define DEFINE_KPSS_DIV2_CLK(clk_name, _parent, _offset) \
+static struct div_clk clk_name = {		\
+	.div = 2,				\
+	.min_div = 2,				\
+	.max_div = 2,				\
+	.ops = &clk_div_ops_kpss_div2,		\
+	.offset = _offset,			\
+	.mask = 0x3,				\
+	.shift = 6,				\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_div,		\
+		.flags = CLKFLAG_NO_RATE_CACHE,	\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+struct hfpll_data {
+	const u32 mode_offset;
+	const u32 l_offset;
+	const u32 m_offset;
+	const u32 n_offset;
+	const u32 user_offset;
+	const u32 droop_offset;
+	const u32 config_offset;
+	const u32 status_offset;
+
+	const u32 droop_val;
+	const u32 config_val;
+	const u32 user_val;
+	const u32 user_vco_mask;
+	unsigned long low_vco_max_rate;
+
+	unsigned long min_rate;
+	unsigned long max_rate;
+};
+
+struct hfpll_clk {
+	void  * __iomem base;
+	struct hfpll_data const *d;
+	unsigned long	src_rate;
+	int		init_done;
+
+	struct clk	c;
+};
+
+static inline struct hfpll_clk *to_hfpll_clk(struct clk *c)
+{
+	return container_of(c, struct hfpll_clk, c);
+}
+
+extern struct clk_ops clk_ops_hfpll;
+
+struct avs_data {
+	unsigned long	*rate;
+	u32		*dscr;
+	int		num;
+};
+
+struct kpss_core_clk {
+	int		id;
+	u32		cp15_iaddr;
+	u32		l2_slp_delay;
+	struct avs_data	*avs_tbl;
+	struct clk	c;
+};
+
+static inline struct kpss_core_clk *to_kpss_core_clk(struct clk *c)
+{
+	return container_of(c, struct kpss_core_clk, c);
+}
+
+extern struct clk_ops clk_ops_kpss_cpu;
+extern struct clk_ops clk_ops_kpss_l2;
+
+#endif
diff --git a/arch/arm/mach-msm/clock-mdss-8974.c b/arch/arm/mach-msm/clock-mdss-8974.c
index aeb4e48..47332a4 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.c
+++ b/arch/arm/mach-msm/clock-mdss-8974.c
@@ -1135,7 +1135,7 @@
 	return rc;
 }
 
-static int vco_enable(struct clk *c)
+static int dsi_pll_enable(struct clk *c)
 {
 	int i, rc = 0;
 	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
@@ -1163,7 +1163,7 @@
 	return rc;
 }
 
-static void vco_disable(struct clk *c)
+static void dsi_pll_disable(struct clk *c)
 {
 	int rc = 0;
 
@@ -1384,19 +1384,32 @@
 
 static int vco_prepare(struct clk *c)
 {
-	return vco_set_rate(c, vco_cached_rate);
+	int rc = 0;
+
+	if (vco_cached_rate != 0) {
+		rc = vco_set_rate(c, vco_cached_rate);
+		if (rc) {
+			pr_err("%s: vco_set_rate failed. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+	}
+
+	rc = dsi_pll_enable(c);
+
+error:
+	return rc;
 }
 
 static void vco_unprepare(struct clk *c)
 {
 	vco_cached_rate = c->rate;
+	dsi_pll_disable(c);
 }
 
 /* Op structures */
 
 static struct clk_ops clk_ops_dsi_vco = {
-	.enable = vco_enable,
-	.disable = vco_disable,
 	.set_rate = vco_set_rate,
 	.round_rate = vco_round_rate,
 	.handoff = vco_handoff,
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 527d73d..e3c11c5 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -90,16 +90,17 @@
 			continue;
 
 		rc = regulator_set_optimum_mode(r[i], ua[lvl_base + i]);
-		if (rc < 0)
+		rc = rc > 0 ? 0 : rc;
+		if (rc)
 			goto set_mode_fail;
 	}
 	if (vdd_class->set_vdd && !vdd_class->num_regulators)
 		rc = vdd_class->set_vdd(vdd_class, level);
 
-	if (rc < 0)
+	if (!rc)
 		vdd_class->cur_level = level;
 
-	return 0;
+	return rc;
 
 set_mode_fail:
 	regulator_set_voltage(r[i], uv[vdd_class->cur_level * n_reg + i],
diff --git a/arch/arm/mach-msm/include/mach/msm_smem.h b/arch/arm/mach-msm/include/mach/msm_smem.h
index 64ab6bf..a121791 100644
--- a/arch/arm/mach-msm/include/mach/msm_smem.h
+++ b/arch/arm/mach-msm/include/mach/msm_smem.h
@@ -136,6 +136,7 @@
 	SMEM_BAM_PIPE_MEMORY,     /* 468 */
 	SMEM_IMAGE_VERSION_TABLE, /* 469 */
 	SMEM_LC_DEBUGGER, /* 470 */
+	SMEM_FLASH_NAND_DEV_INFO, /* 471 */
 	SMEM_NUM_ITEMS,
 };
 
diff --git a/arch/arm/mach-msm/include/mach/qseecomi.h b/arch/arm/mach-msm/include/mach/qseecomi.h
index a4021d4..688dea0 100644
--- a/arch/arm/mach-msm/include/mach/qseecomi.h
+++ b/arch/arm/mach-msm/include/mach/qseecomi.h
@@ -46,8 +46,12 @@
 	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
 	QSEOS_APP_REGION_NOTIFICATION,
 	QSEOS_REGISTER_LOG_BUF_COMMAND,
-	QSEE_RPMB_PROVISION_KEY_COMMAND,
-	QSEE_RPMB_ERASE_COMMAND,
+	QSEOS_RPMB_PROVISION_KEY_COMMAND,
+	QSEOS_RPMB_ERASE_COMMAND,
+	QSEOS_GENERATE_KEY  = 0x11,
+	QSEOS_DELETE_KEY,
+	QSEOS_MAX_KEY_COUNT,
+	QSEOS_SET_KEY,
 	QSEOS_CMD_MAX     = 0xEFFFFFFF
 };
 
@@ -57,15 +61,6 @@
 	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
 };
 
-/* Key Management requests */
-enum qseecom_qceos_key_gen_cmd_id {
-	QSEOS_GENERATE_KEY  = 0x11,
-	QSEOS_DELETE_KEY,
-	QSEOS_MAX_KEY_COUNT,
-	QSEOS_SET_KEY,
-	QSEOS_KEY_CMD_MAX   = 0xEFFFFFFF
-};
-
 enum qseecom_pipe_type {
 	QSEOS_PIPE_ENC = 0x1,
 	QSEOS_PIPE_ENC_XTS = 0x2,
diff --git a/arch/arm/mach-msm/ipc_router.c b/arch/arm/mach-msm/ipc_router.c
index c0cca1d..64986d0 100644
--- a/arch/arm/mach-msm/ipc_router.c
+++ b/arch/arm/mach-msm/ipc_router.c
@@ -1013,6 +1013,7 @@
 		return -EINVAL;
 	}
 
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
 
 	for (i = 0; i < SRV_HASH_SIZE; i++) {
@@ -1241,6 +1242,7 @@
 	mode = mode_info->mode;
 	xprt_info = mode_info->xprt_info;
 
+	memset(&msg, 0, sizeof(msg));
 	msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
 	msg.cli.node_id = node_id;
 	msg.cli.port_id = port_id;
@@ -1295,6 +1297,7 @@
 	D("Remove server %08x:%08x - %08x:%08x",
 	   server->name.service, server->name.instance,
 	   rport_ptr->node_id, rport_ptr->port_id);
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
 	ctl.srv.service = server->name.service;
 	ctl.srv.instance = server->name.instance;
@@ -1313,6 +1316,7 @@
 	union rr_control_msg ctl;
 	int j;
 
+	memset(&ctl, 0, sizeof(ctl));
 	for (j = 0; j < RP_HASH_SIZE; j++) {
 		list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
 				&rt_entry->remote_port_list[j], list) {
@@ -1847,7 +1851,7 @@
 process_done:
 		if (resume_tx) {
 			union rr_control_msg msg;
-
+			memset(&msg, 0, sizeof(msg));
 			msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
 			msg.cli.node_id = resume_tx_node_id;
 			msg.cli.port_id = resume_tx_port_id;
@@ -1900,6 +1904,7 @@
 		return -EINVAL;
 	}
 
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
 	ctl.srv.service = server->name.service;
 	ctl.srv.instance = server->name.instance;
@@ -1948,6 +1953,7 @@
 		return -ENODEV;
 	}
 
+	memset(&ctl, 0, sizeof(ctl));
 	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
 	ctl.srv.service = server->name.service;
 	ctl.srv.instance = server->name.instance;
@@ -2433,6 +2439,7 @@
 		up_write(&local_ports_lock_lha2);
 
 		if (port_ptr->type == SERVER_PORT) {
+			memset(&msg, 0, sizeof(msg));
 			msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
 			msg.srv.service = port_ptr->port_name.service;
 			msg.srv.instance = port_ptr->port_name.instance;
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 4d7c3d4..180d277 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -71,6 +71,10 @@
 static ssize_t msm_lpm_levels_attr_store(struct kobject *kobj,
 	struct kobj_attribute *attr, const char *buf, size_t count);
 
+#define ADJUST_LATENCY(x)	\
+	((x == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE) ?\
+		(num_online_cpus()) / 2 : 0)
+
 static int msm_lpm_lvl_dbg_msk;
 
 module_param_named(
@@ -343,6 +347,9 @@
 	int i;
 	bool modify_event_timer;
 	uint32_t next_wakeup_us = time_param->sleep_us;
+	uint32_t lvl_latency_us = 0;
+	uint32_t lvl_overhead_us = 0;
+	uint32_t lvl_overhead_energy = 0;
 
 	if (!msm_lpm_levels)
 		return NULL;
@@ -358,24 +365,36 @@
 		if (sleep_mode != level->sleep_mode)
 			continue;
 
-		if (time_param->latency_us < level->latency_us)
+		lvl_latency_us =
+			level->latency_us + (level->latency_us *
+						ADJUST_LATENCY(sleep_mode));
+
+		lvl_overhead_us =
+			level->time_overhead_us + (level->time_overhead_us *
+						ADJUST_LATENCY(sleep_mode));
+
+		lvl_overhead_energy =
+			level->energy_overhead + level->energy_overhead *
+						ADJUST_LATENCY(sleep_mode);
+
+		if (time_param->latency_us < lvl_latency_us)
 			continue;
 
 		if (time_param->next_event_us &&
-			time_param->next_event_us < level->latency_us)
+			time_param->next_event_us < lvl_latency_us)
 			continue;
 
 		if (time_param->next_event_us) {
 			if ((time_param->next_event_us < time_param->sleep_us)
-			|| ((time_param->next_event_us - level->latency_us) <
+			|| ((time_param->next_event_us - lvl_latency_us) <
 				time_param->sleep_us)) {
 				modify_event_timer = true;
 				next_wakeup_us = time_param->next_event_us -
-						level->latency_us;
+						lvl_latency_us;
 			}
 		}
 
-		if (next_wakeup_us <= level->time_overhead_us)
+		if (next_wakeup_us <= lvl_overhead_us)
 			continue;
 
 		if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == sleep_mode)
@@ -384,22 +403,21 @@
 					break;
 
 		if (next_wakeup_us <= 1) {
-			pwr = level->energy_overhead;
-		} else if (next_wakeup_us <= level->time_overhead_us) {
-			pwr = level->energy_overhead / next_wakeup_us;
+			pwr = lvl_overhead_energy;
+		} else if (next_wakeup_us <= lvl_overhead_us) {
+			pwr = lvl_overhead_energy / next_wakeup_us;
 		} else if ((next_wakeup_us >> 10)
-				> level->time_overhead_us) {
+				> lvl_overhead_us) {
 			pwr = level->steady_state_power;
 		} else {
 			pwr = level->steady_state_power;
-			pwr -= (level->time_overhead_us *
+			pwr -= (lvl_overhead_us *
 				level->steady_state_power) /
 						next_wakeup_us;
-			pwr += level->energy_overhead / next_wakeup_us;
+			pwr += lvl_overhead_energy / next_wakeup_us;
 		}
 
 		if (!best_level || (best_level_pwr >= pwr)) {
-
 			best_level = level;
 			best_level_pwr = pwr;
 			if (power)
@@ -409,7 +427,7 @@
 					MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
 				time_param->modified_time_us =
 					time_param->next_event_us -
-						best_level->latency_us;
+						lvl_latency_us;
 			else
 				time_param->modified_time_us = 0;
 		}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
index 3a996eb..4538c4f 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -524,6 +524,7 @@
 		.mode = NOC_QOS_MODE_FIXED,
 		.qport = qports_ipa,
 		.mas_hw_id = MAS_IPA,
+		.hw_sel = MSM_BUS_NOC,
 	},
 	{
 		.id = MSM_BUS_MASTER_QDSS_ETR,
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 4adbdc0..94281b1 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -285,6 +285,12 @@
 		return ERR_PTR(-EPERM);
 	}
 
+	if (phdr->p_filesz > phdr->p_memsz) {
+		pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
+			num, phdr->p_filesz, phdr->p_memsz);
+		return ERR_PTR(-EINVAL);
+	}
+
 	seg = kmalloc(sizeof(*seg), GFP_KERNEL);
 	if (!seg)
 		return ERR_PTR(-ENOMEM);
@@ -513,51 +519,29 @@
 	int ret = 0, count;
 	phys_addr_t paddr;
 	char fw_name[30];
-	const struct firmware *fw = NULL;
-	const u8 *data;
 	int num = seg->num;
 
 	if (seg->filesz) {
 		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
 				desc->name, num);
-		ret = request_firmware(&fw, fw_name, desc->dev);
-		if (ret) {
-			pil_err(desc, "Failed to locate blob %s\n", fw_name);
+		ret = request_firmware_direct(fw_name, desc->dev, seg->paddr,
+					      seg->filesz);
+		if (ret < 0) {
+			pil_err(desc, "Failed to locate blob %s or blob is too big.\n",
+				fw_name);
 			return ret;
 		}
 
-		if (fw->size != seg->filesz) {
+		if (ret != seg->filesz) {
 			pil_err(desc, "Blob size %u doesn't match %lu\n",
-					fw->size, seg->filesz);
-			ret = -EPERM;
-			goto release_fw;
+					ret, seg->filesz);
+			return -EPERM;
 		}
-	}
-
-	/* Load the segment into memory */
-	count = seg->filesz;
-	paddr = seg->paddr;
-	data = fw ? fw->data : NULL;
-	while (count > 0) {
-		int size;
-		u8 __iomem *buf;
-
-		size = min_t(size_t, IOMAP_SIZE, count);
-		buf = ioremap(paddr, size);
-		if (!buf) {
-			pil_err(desc, "Failed to map memory\n");
-			ret = -ENOMEM;
-			goto release_fw;
-		}
-		memcpy(buf, data, size);
-		iounmap(buf);
-
-		count -= size;
-		paddr += size;
-		data += size;
+		ret = 0;
 	}
 
 	/* Zero out trailing memory */
+	paddr = seg->paddr + seg->filesz;
 	count = seg->sz - seg->filesz;
 	while (count > 0) {
 		int size;
@@ -567,8 +551,7 @@
 		buf = ioremap(paddr, size);
 		if (!buf) {
 			pil_err(desc, "Failed to map memory\n");
-			ret = -ENOMEM;
-			goto release_fw;
+			return -ENOMEM;
 		}
 		memset(buf, 0, size);
 		iounmap(buf);
@@ -583,8 +566,6 @@
 			pil_err(desc, "Blob%u failed verification\n", num);
 	}
 
-release_fw:
-	release_firmware(fw);
 	return ret;
 }
 
@@ -592,7 +573,8 @@
 {
 	int clk_ready = 0;
 
-	if (of_find_property(desc->dev->of_node,
+	if (desc->ops->proxy_unvote &&
+		of_find_property(desc->dev->of_node,
 				"qcom,gpio-proxy-unvote",
 				NULL)) {
 		clk_ready = of_get_named_gpio(desc->dev->of_node,
@@ -799,7 +781,7 @@
 		 "Invalid proxy unvote callback or a proxy timeout of 0"
 		 " was specified or no proxy unvote IRQ was specified.\n");
 
-	if (desc->proxy_unvote_irq > 0 && desc->ops->proxy_unvote) {
+	if (desc->proxy_unvote_irq) {
 		ret = request_threaded_irq(desc->proxy_unvote_irq,
 				  NULL,
 				  proxy_unvote_intr_handler,
diff --git a/arch/arm/mach-msm/pm-stats.c b/arch/arm/mach-msm/pm-stats.c
index 1bd9b46..ac4ed25 100644
--- a/arch/arm/mach-msm/pm-stats.c
+++ b/arch/arm/mach-msm/pm-stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 
 #include "pm.h"
 
@@ -83,46 +84,19 @@
 }
 
 /*
- * Helper function of snprintf where buf is auto-incremented, size is auto-
- * decremented, and there is no return value.
- *
- * NOTE: buf and size must be l-values (e.g. variables)
- */
-#define SNPRINTF(buf, size, format, ...) \
-	do { \
-		if (size > 0) { \
-			int ret; \
-			ret = snprintf(buf, size, format, ## __VA_ARGS__); \
-			if (ret > size) { \
-				buf += size; \
-				size = 0; \
-			} else { \
-				buf += ret; \
-				size -= ret; \
-			} \
-		} \
-	} while (0)
-
-/*
  * Write out the power management statistics.
  */
-static int msm_pm_read_proc
-	(char *page, char **start, off_t off, int count, int *eof, void *data)
+
+static int msm_pm_stats_show(struct seq_file *m, void *v)
 {
-	unsigned int cpu = off / MSM_PM_STAT_COUNT;
-	int id = off % MSM_PM_STAT_COUNT;
-	char *p = page;
+	int cpu;
+	int bucket_count = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+	int bucket_shift = CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
 
-	if (count < 1024) {
-		*start = (char *) 0;
-		*eof = 0;
-		return 0;
-	}
-
-	if (cpu < num_possible_cpus()) {
+	for_each_possible_cpu(cpu) {
 		unsigned long flags;
 		struct msm_pm_time_stats *stats;
-		int i;
+		int i, id;
 		int64_t bucket_time;
 		int64_t s;
 		uint32_t ns;
@@ -130,59 +104,52 @@
 		spin_lock_irqsave(&msm_pm_stats_lock, flags);
 		stats = per_cpu(msm_pm_stats, cpu).stats;
 
-		/* Skip the disabled ones */
-		if (!stats[id].enabled) {
-			*p = '\0';
-			p++;
-			goto again;
-		}
+		for (id = 0; id < MSM_PM_STAT_COUNT; id++) {
+			/* Skip the disabled ones */
+			if (!stats[id].enabled)
+				continue;
 
-		s = stats[id].total_time;
-		ns = do_div(s, NSEC_PER_SEC);
-		SNPRINTF(p, count,
-			"[cpu %u] %s:\n"
-			"  count: %7d\n"
-			"  total_time: %lld.%09u\n",
-			cpu, stats[id].name,
-			stats[id].count,
-			s, ns);
-
-		bucket_time = stats[id].first_bucket_time;
-		for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
-			s = bucket_time;
+			s = stats[id].total_time;
 			ns = do_div(s, NSEC_PER_SEC);
-			SNPRINTF(p, count,
-				"   <%6lld.%09u: %7d (%lld-%lld)\n",
+			seq_printf(m,
+				"[cpu %u] %s:\n"
+				"  count: %7d\n"
+				"  total_time: %lld.%09u\n",
+				cpu, stats[id].name,
+				stats[id].count,
+				s, ns);
+
+			bucket_time = stats[id].first_bucket_time;
+			for (i = 0; i < bucket_count; i++) {
+				s = bucket_time;
+				ns = do_div(s, NSEC_PER_SEC);
+				seq_printf(m,
+					"   <%6lld.%09u: %7d (%lld-%lld)\n",
+					s, ns, stats[id].bucket[i],
+					stats[id].min_time[i],
+					stats[id].max_time[i]);
+
+				bucket_time <<= bucket_shift;
+			}
+
+			seq_printf(m, "  >=%6lld.%09u: %7d (%lld-%lld)\n",
 				s, ns, stats[id].bucket[i],
 				stats[id].min_time[i],
 				stats[id].max_time[i]);
-
-			bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
 		}
 
-		SNPRINTF(p, count, "  >=%6lld.%09u: %7d (%lld-%lld)\n",
-			s, ns, stats[id].bucket[i],
-			stats[id].min_time[i],
-			stats[id].max_time[i]);
-
-again:
-		*start = (char *) 1;
-		*eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
-
 		spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
 	}
 
-	return p - page;
+	return 0;
 }
-#undef SNPRINTF
 
 #define MSM_PM_STATS_RESET "reset"
-
 /*
  * Reset the power management statistics values.
  */
-static int msm_pm_write_proc(struct file *file, const char __user *buffer,
-	unsigned long count, void *data)
+static ssize_t msm_pm_write_proc(struct file *file, const char __user *buffer,
+	size_t count, loff_t *off)
 {
 	char buf[sizeof(MSM_PM_STATS_RESET)];
 	int ret;
@@ -231,6 +198,19 @@
 }
 #undef MSM_PM_STATS_RESET
 
+static int msm_pm_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, msm_pm_stats_show, NULL);
+}
+
+static const struct file_operations msm_pm_stats_fops = {
+	.open		= msm_pm_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= msm_pm_write_proc,
+};
+
 void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size)
 {
 	unsigned int cpu;
@@ -296,11 +276,6 @@
 
 	}
 
-	d_entry = create_proc_entry("msm_pm_stats",
-			S_IRUGO | S_IWUSR | S_IWGRP, NULL);
-	if (d_entry) {
-		d_entry->read_proc = msm_pm_read_proc;
-		d_entry->write_proc = msm_pm_write_proc;
-		d_entry->data = NULL;
-	}
+	d_entry = proc_create_data("msm_pm_stats", S_IRUGO | S_IWUSR | S_IWGRP,
+			NULL, &msm_pm_stats_fops, NULL);
 }
diff --git a/block/row-iosched.c b/block/row-iosched.c
index 8e19c94..dfb46b4 100644
--- a/block/row-iosched.c
+++ b/block/row-iosched.c
@@ -396,7 +396,6 @@
 				"added urgent request (total on queue=%d)",
 				rqueue->nr_req);
 			rq->cmd_flags |= REQ_URGENT;
-			WARN_ON(rqueue->nr_req > 1);
 			rd->pending_urgent_rq = rq;
 		}
 	} else
diff --git a/drivers/bif/qpnp-bsi.c b/drivers/bif/qpnp-bsi.c
index 5068a21..9d0abd2 100644
--- a/drivers/bif/qpnp-bsi.c
+++ b/drivers/bif/qpnp-bsi.c
@@ -56,6 +56,7 @@
 	atomic_t		irq_flag[QPNP_BSI_IRQ_COUNT];
 	int			batt_present_irq;
 	enum qpnp_vadc_channels	batt_id_adc_channel;
+	struct qpnp_vadc_chip	*vadc_dev;
 };
 
 #define QPNP_BSI_DRIVER_NAME	"qcom,qpnp-bsi"
@@ -1343,7 +1344,8 @@
 		return -ENXIO;
 	}
 
-	rc = qpnp_vadc_read(chip->batt_id_adc_channel, &adc_result);
+	rc = qpnp_vadc_read(chip->vadc_dev, chip->batt_id_adc_channel,
+								&adc_result);
 	if (!rc) {
 		vid_uV = adc_result.physical;
 
@@ -1672,8 +1674,11 @@
 
 	/* Ensure that ADC channel is available if it was specified. */
 	if (chip->batt_id_adc_channel < ADC_MAX_NUM) {
-		rc = qpnp_vadc_is_ready();
-		if (rc) {
+		chip->vadc_dev = qpnp_get_vadc(dev, "bsi");
+		if (IS_ERR(chip->vadc_dev)) {
+			rc = PTR_ERR(chip->vadc_dev);
+			if (rc != -EPROBE_DEFER)
+				pr_err("missing vadc property, rc=%d\n", rc);
 			/* Probe retry, do not print an error message */
 			goto cleanup_irqs;
 		}
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 0c67ed8..a779b24 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -54,7 +54,7 @@
 struct diag_dci_data_info *dci_data_smd;
 struct mutex dci_stat_mutex;
 
-void diag_dci_smd_record_info(int read_bytes)
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type)
 {
 	static int curr_dci_data_smd;
 	static unsigned long iteration;
@@ -67,13 +67,14 @@
 	temp_data += curr_dci_data_smd;
 	temp_data->iteration = iteration + 1;
 	temp_data->data_size = read_bytes;
+	temp_data->ch_type = ch_type;
 	diag_get_timestamp(temp_data->time_stamp);
 	curr_dci_data_smd++;
 	iteration++;
 	mutex_unlock(&dci_stat_mutex);
 }
 #else
-void diag_dci_smd_record_info(int read_bytes) { }
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type) { }
 #endif
 
 /* Process the data read from the smd dci channel */
@@ -83,7 +84,7 @@
 	int read_bytes, dci_pkt_len, i;
 	uint8_t recv_pkt_cmd_code;
 
-	diag_dci_smd_record_info(recd_bytes);
+	diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type);
 	/* Each SMD read can have multiple DCI packets */
 	read_bytes = 0;
 	while (read_bytes < recd_bytes) {
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 520995b..e2c4158 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -101,6 +101,7 @@
 	unsigned long iteration;
 	int data_size;
 	char time_stamp[DIAG_TS_SIZE];
+	uint8_t ch_type;
 };
 
 extern struct diag_dci_data_info *dci_data_smd;
@@ -135,7 +136,7 @@
 void create_dci_event_mask_tbl(unsigned char *tbl_buf);
 int diag_dci_clear_event_mask(void);
 int diag_dci_query_event_mask(uint16_t event_id);
-void diag_dci_smd_record_info(int read_bytes);
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type);
 uint8_t diag_dci_get_cumulative_real_time(void);
 int diag_dci_set_real_time(int client_id, uint8_t real_time);
 /* Functions related to DCI wakeup sources */
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 3d1a6cd..a24fc54 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -81,6 +81,11 @@
 		"LPASS STM requested state: %d\n"
 		"RIVA STM requested state: %d\n"
 		"APPS STM requested state: %d\n"
+		"supports apps hdlc encoding: %d\n"
+		"Modem hdlc encoding: %d\n"
+		"Lpass hdlc encoding: %d\n"
+		"RIVA hdlc encoding: %d\n"
+		"Modem CMD hdlc encoding: %d\n"
 		"logging_mode: %d\n"
 		"real_time_mode: %d\n",
 		(unsigned int)driver->smd_data[MODEM_DATA].ch,
@@ -123,6 +128,11 @@
 		driver->stm_state_requested[LPASS_DATA],
 		driver->stm_state_requested[WCNSS_DATA],
 		driver->stm_state_requested[APPS_DATA],
+		driver->supports_apps_hdlc_encoding,
+		driver->smd_data[MODEM_DATA].encode_hdlc,
+		driver->smd_data[LPASS_DATA].encode_hdlc,
+		driver->smd_data[WCNSS_DATA].encode_hdlc,
+		driver->smd_cmd[MODEM_DATA].encode_hdlc,
 		driver->logging_mode,
 		driver->real_time_mode);
 
@@ -202,11 +212,13 @@
 		if (temp_data->iteration != 0) {
 			bytes_written = scnprintf(
 				buf + bytes_in_buf, bytes_remaining,
-				"i %-20ld\t"
-				"s %-20d\t"
-				"t %-20s\n",
+				"i %-10ld\t"
+				"s %-10d\t"
+				"c %-10d\t"
+				"t %-15s\n",
 				temp_data->iteration,
 				temp_data->data_size,
+				temp_data->ch_type,
 				temp_data->time_stamp);
 			bytes_in_buf += bytes_written;
 			bytes_remaining -= bytes_written;
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index aa1d847..c91095e 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -491,6 +491,8 @@
 	feature_bytes[0] |= F_DIAG_LOG_ON_DEMAND_RSP_ON_MASTER;
 	feature_bytes[0] |= driver->supports_separate_cmdrsp ?
 				F_DIAG_REQ_RSP_CHANNEL : 0;
+	feature_bytes[0] |= driver->supports_apps_hdlc_encoding ?
+				F_DIAG_HDLC_ENCODE_IN_APPS_MASK : 0;
 	feature_bytes[1] |= F_DIAG_OVER_STM;
 	memcpy(buf+header_size, &feature_bytes, FEATURE_MASK_LEN_BYTES);
 	total_len = header_size + FEATURE_MASK_LEN_BYTES;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 7154942..7ef1d80 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -217,6 +217,7 @@
 	int peripheral;	/* The peripheral this smd channel communicates with */
 	int type;	/* The type of smd channel (data, control, dci) */
 	uint16_t peripheral_mask;
+	int encode_hdlc; /* Whether data is raw and needs to be hdlc encoded */
 
 	smd_channel_t *ch;
 	smd_channel_t *ch_save;
@@ -229,11 +230,16 @@
 	unsigned char *buf_in_1;
 	unsigned char *buf_in_2;
 
+	unsigned char *buf_in_1_raw;
+	unsigned char *buf_in_2_raw;
+
 	struct diag_request *write_ptr_1;
 	struct diag_request *write_ptr_2;
 
 	struct diag_nrt_wake_lock nrt_lock;
 
+	struct workqueue_struct *wq;
+
 	struct work_struct diag_read_smd_work;
 	struct work_struct diag_notify_update_smd_work;
 	int notify_context;
@@ -270,6 +276,7 @@
 	unsigned int buf_tbl_size;
 	int use_device_tree;
 	int supports_separate_cmdrsp;
+	int supports_apps_hdlc_encoding;
 	/* The state requested in the STM command */
 	int stm_state_requested[NUM_STM_PROCESSORS];
 	/* The current STM state */
@@ -301,7 +308,7 @@
 	mempool_t *diag_hdlc_pool;
 	mempool_t *diag_user_pool;
 	mempool_t *diag_write_struct_pool;
-	struct mutex diagmem_mutex;
+	spinlock_t diag_mem_lock;
 	int count;
 	int count_hdlc_pool;
 	int count_user_pool;
@@ -384,7 +391,6 @@
 	struct diag_request *write_ptr_mdm;
 #endif
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
-	spinlock_t hsic_ready_spinlock;
 	/* common for all bridges */
 	struct work_struct diag_connect_work;
 	struct work_struct diag_disconnect_work;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 6e70062..24d7fac 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -131,12 +131,8 @@
 	mutex_lock(&driver->diagchar_mutex);
 	if (buf_hdlc) {
 		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
-		if (err) {
-			/*Free the buffer right away if write failed */
+		if (err)
 			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
-			diagmem_free(driver, (unsigned char *)driver->
-				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
-		}
 		buf_hdlc = NULL;
 #ifdef DIAG_DEBUG
 		pr_debug("diag: Number of bytes written "
@@ -543,58 +539,10 @@
 	return exit_stat;
 }
 
-static void diag_update_data_ready(int index)
-{
-	int clear_bit = 1;
-	unsigned long hsic_lock_flags;
-	unsigned long ready_lock_flags;
-	int i;
-
-	/*
-	 * Determine whether the data_ready USER_SPACE_DATA_TYPE bit
-	 * should be updated/cleared or not. There is a race condition that
-	 * can occur when in MEMORY_DEVICE_MODE with the hsic data.
-	 * When new hsic data arrives we prepare the data so it can
-	 * later be copied to userspace.  We set the USER_SPACE_DATA_TYPE
-	 * bit in data ready at that time. We later copy the hsic data
-	 * to userspace and clear the USER_SPACE_DATA_TYPE bit in
-	 * data ready. The race condition occurs if new data arrives (bit set)
-	 * while we are processing the current data and sending
-	 * it to userspace (bit clear).  The clearing of the bit can
-	 * overwrite the setting of the bit.
-	 */
-
-	spin_lock_irqsave(&driver->hsic_ready_spinlock, ready_lock_flags);
-	for (i = 0; i < MAX_HSIC_CH; i++) {
-		if (diag_hsic[i].hsic_inited) {
-			spin_lock_irqsave(&diag_hsic[i].hsic_spinlock,
-							hsic_lock_flags);
-			if ((diag_hsic[i].num_hsic_buf_tbl_entries > 0) &&
-				diag_hsic[i].hsic_device_enabled &&
-				diag_hsic[i].hsic_ch) {
-				/* New data do not clear the bit */
-				clear_bit = 0;
-			}
-			spin_unlock_irqrestore(&diag_hsic[i].hsic_spinlock,
-							hsic_lock_flags);
-			if (!clear_bit)
-				break;
-		}
-	}
-
-	if (clear_bit)
-		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
-
-	spin_unlock_irqrestore(&driver->hsic_ready_spinlock, ready_lock_flags);
-}
 #else
 inline uint16_t diag_get_remote_device_mask(void) { return 0; }
 inline int diag_copy_remote(char __user *buf, size_t count, int *pret,
 			    int *pnum_data) { return 0; }
-static void diag_update_data_ready(int index)
-{
-	driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
-}
 #endif
 
 int diag_command_reg(unsigned long ioarg)
@@ -1216,8 +1164,8 @@
 		return -EINVAL;
 	}
 
-	wait_event_interruptible(driver->wait_q,
-				  driver->data_ready[index]);
+	wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
+
 	mutex_lock(&driver->diagchar_mutex);
 
 	clear_read_wakelock = 0;
@@ -1227,6 +1175,7 @@
 		pr_debug("diag: process woken up\n");
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
 		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
 		/* place holder for number of data field */
 		ret += 4;
@@ -1362,10 +1311,9 @@
 		/* copy number of data fields */
 		COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4);
 		ret -= 4;
-		diag_update_data_ready(index);
 		for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) {
 			if (driver->smd_data[i].ch)
-				queue_work(driver->diag_wq,
+				queue_work(driver->smd_data[i].wq,
 				&(driver->smd_data[i].diag_read_smd_work));
 		}
 #ifdef CONFIG_DIAG_SDIO_PIPE
@@ -1866,10 +1814,6 @@
 	if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
 		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
 		if (err) {
-			/*Free the buffer right away if write failed */
-			if (driver->logging_mode == USB_MODE)
-				diagmem_free(driver, (unsigned char *)driver->
-					write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
 			ret = -EIO;
 			goto fail_free_hdlc;
 		}
@@ -1894,10 +1838,6 @@
 		 (unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
 		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
 		if (err) {
-			/*Free the buffer right away if write failed */
-			if (driver->logging_mode == USB_MODE)
-				diagmem_free(driver, (unsigned char *)driver->
-					write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
 			ret = -EIO;
 			goto fail_free_hdlc;
 		}
@@ -1919,10 +1859,6 @@
 	if (pkt_type == DATA_TYPE_RESPONSE) {
 		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
 		if (err) {
-			/*Free the buffer right away if write failed */
-			if (driver->logging_mode == USB_MODE)
-				diagmem_free(driver, (unsigned char *)driver->
-					write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
 			ret = -EIO;
 			goto fail_free_hdlc;
 		}
@@ -2176,7 +2112,6 @@
 		diag_masks_init();
 		diagfwd_init();
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
-		spin_lock_init(&driver->hsic_ready_spinlock);
 		diagfwd_bridge_init(HSIC);
 		diagfwd_bridge_init(HSIC_2);
 		/* register HSIC device */
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 379dc4d..a1f6b2c 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -61,13 +61,13 @@
 /* Number of entries in table of buffers */
 static unsigned int buf_tbl_size = 10;
 struct diag_master_table entry;
-struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
-struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
 int wrap_enabled;
 uint16_t wrap_count;
 
 void encode_rsp_and_send(int buf_length)
 {
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
 	struct diag_smd_info *data = &(driver->smd_data[MODEM_DATA]);
 
 	if (buf_length > APPS_BUF_SIZE) {
@@ -244,6 +244,124 @@
 		}
 	}
 }
+int diag_add_hdlc_encoding(struct diag_smd_info *smd_info, void *buf,
+			   int total_recd, uint8_t *encode_buf,
+			   int *encoded_length)
+{
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	struct data_header {
+		uint8_t control_char;
+		uint8_t version;
+		uint16_t length;
+	};
+	struct data_header *header;
+	int header_size = sizeof(struct data_header);
+	uint8_t *end_control_char;
+	uint8_t *payload;
+	uint8_t *temp_buf;
+	uint8_t *temp_encode_buf;
+	int src_pkt_len;
+	int encoded_pkt_length;
+	int max_size;
+	int total_processed = 0;
+	int bytes_remaining;
+	int success = 1;
+
+	temp_buf = buf;
+	temp_encode_buf = encode_buf;
+	bytes_remaining = *encoded_length;
+	while (total_processed < total_recd) {
+		header = (struct data_header *)temp_buf;
+		/* Perform initial error checking */
+		if (header->control_char != CONTROL_CHAR ||
+			header->version != 1) {
+			success = 0;
+			break;
+		}
+		payload = temp_buf + header_size;
+		end_control_char = payload + header->length;
+		if (*end_control_char != CONTROL_CHAR) {
+			success = 0;
+			break;
+		}
+
+		max_size = 2 * header->length + 3;
+		if (bytes_remaining < max_size) {
+			pr_err("diag: In %s, Not enough room to encode remaining data for peripheral: %d, bytes available: %d, max_size: %d\n",
+				__func__, smd_info->peripheral,
+				bytes_remaining, max_size);
+			success = 0;
+			break;
+		}
+
+		/* Prepare for encoding the data */
+		send.state = DIAG_STATE_START;
+		send.pkt = payload;
+		send.last = (void *)(payload + header->length - 1);
+		send.terminate = 1;
+
+		enc.dest = temp_encode_buf;
+		enc.dest_last = (void *)(temp_encode_buf + max_size);
+		enc.crc = 0;
+		diag_hdlc_encode(&send, &enc);
+
+		/* Prepare for next packet */
+		src_pkt_len = (header_size + header->length + 1);
+		total_processed += src_pkt_len;
+		temp_buf += src_pkt_len;
+
+		encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+		bytes_remaining -= encoded_pkt_length;
+		temp_encode_buf = enc.dest;
+	}
+
+	*encoded_length = (int)(temp_encode_buf - encode_buf);
+
+	return success;
+}
+
+static int check_bufsize_for_encoding(struct diag_smd_info *smd_info, void *buf,
+					int total_recd)
+{
+	int buf_size = IN_BUF_SIZE;
+	int max_size = 2 * total_recd + 3;
+	unsigned char *temp_buf;
+
+	if (max_size > IN_BUF_SIZE) {
+		if (max_size < MAX_IN_BUF_SIZE) {
+			pr_err("diag: In %s, SMD sending packet of %d bytes that may expand to %d bytes, peripheral: %d\n",
+				__func__, total_recd, max_size,
+				smd_info->peripheral);
+			if (buf == smd_info->buf_in_1_raw) {
+				temp_buf = krealloc(smd_info->buf_in_1,
+							max_size, GFP_KERNEL);
+				if (temp_buf) {
+					smd_info->buf_in_1 = temp_buf;
+					buf_size = max_size;
+				} else {
+					buf_size = 0;
+				}
+			} else {
+				temp_buf = krealloc(smd_info->buf_in_2,
+							max_size, GFP_KERNEL);
+				if (temp_buf) {
+					smd_info->buf_in_2 = temp_buf;
+					buf_size = max_size;
+				} else {
+					buf_size = 0;
+				}
+			}
+		} else {
+			pr_err("diag: In %s, SMD sending packet of size %d. HDCL encoding can expand to more than %d bytes, peripheral: %d. Discarding.\n",
+				__func__, max_size, MAX_IN_BUF_SIZE,
+				smd_info->peripheral);
+			buf_size = 0;
+		}
+	}
+
+	return buf_size;
+}
 
 void process_lock_enabling(struct diag_nrt_wake_lock *lock, int real_time)
 {
@@ -334,7 +452,7 @@
 
 /* Process the data read from the smd data channel */
 int diag_process_smd_read_data(struct diag_smd_info *smd_info, void *buf,
-								int total_recd)
+			       int total_recd)
 {
 	struct diag_request *write_ptr_modem = NULL;
 	int *in_busy_ptr = 0;
@@ -352,26 +470,74 @@
 		return 0;
 	}
 
-	if (smd_info->buf_in_1 == buf) {
-		write_ptr_modem = smd_info->write_ptr_1;
-		in_busy_ptr = &smd_info->in_busy_1;
-	} else if (smd_info->buf_in_2 == buf) {
-		write_ptr_modem = smd_info->write_ptr_2;
-		in_busy_ptr = &smd_info->in_busy_2;
-	} else {
-		pr_err("diag: In %s, no match for in_busy_1\n", __func__);
-	}
+	/* If the data is already hdlc encoded */
+	if (!smd_info->encode_hdlc) {
+		if (smd_info->buf_in_1 == buf) {
+			write_ptr_modem = smd_info->write_ptr_1;
+			in_busy_ptr = &smd_info->in_busy_1;
+		} else if (smd_info->buf_in_2 == buf) {
+			write_ptr_modem = smd_info->write_ptr_2;
+			in_busy_ptr = &smd_info->in_busy_2;
+		} else {
+			pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n",
+				__func__, smd_info->peripheral);
+		}
 
-	if (write_ptr_modem) {
-		write_ptr_modem->length = total_recd;
-		*in_busy_ptr = 1;
-		err = diag_device_write(buf, smd_info->peripheral,
-					write_ptr_modem);
-		if (err) {
-			/* Free up the buffer for future use */
-			*in_busy_ptr = 0;
-			pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
-				__func__, err);
+		if (write_ptr_modem) {
+			write_ptr_modem->length = total_recd;
+			*in_busy_ptr = 1;
+			err = diag_device_write(buf, smd_info->peripheral,
+						write_ptr_modem);
+			if (err) {
+				/* Free up the buffer for future use */
+				*in_busy_ptr = 0;
+				pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
+					__func__, err);
+			}
+		}
+	} else {
+		/* The data is raw and needs to be hdlc encoded */
+		if (smd_info->buf_in_1_raw == buf) {
+			write_ptr_modem = smd_info->write_ptr_1;
+			in_busy_ptr = &smd_info->in_busy_1;
+		} else if (smd_info->buf_in_2_raw == buf) {
+			write_ptr_modem = smd_info->write_ptr_2;
+			in_busy_ptr = &smd_info->in_busy_2;
+		} else {
+			pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n",
+				__func__, smd_info->peripheral);
+		}
+
+		if (write_ptr_modem) {
+			int success = 0;
+			int write_length = 0;
+			unsigned char *write_buf = NULL;
+
+			write_length = check_bufsize_for_encoding(smd_info, buf,
+								total_recd);
+			if (write_length) {
+				write_buf = (buf == smd_info->buf_in_1_raw) ?
+					smd_info->buf_in_1 : smd_info->buf_in_2;
+				success = diag_add_hdlc_encoding(smd_info, buf,
+							total_recd, write_buf,
+							&write_length);
+				if (success) {
+					write_ptr_modem->length = write_length;
+					*in_busy_ptr = 1;
+					err = diag_device_write(write_buf,
+							smd_info->peripheral,
+							write_ptr_modem);
+					if (err) {
+						/*
+						 * Free up the buffer for
+						 * future use
+						 */
+						*in_busy_ptr = 0;
+						pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
+								__func__, err);
+					}
+				}
+			}
 		}
 	}
 
@@ -391,11 +557,32 @@
 		return;
 	}
 
-	if (!smd_info->in_busy_1)
+	/* Determine the buffer to read the data into. */
+	if (smd_info->type == SMD_DATA_TYPE) {
+		/* If the data is raw and not hdlc encoded */
+		if (smd_info->encode_hdlc) {
+			if (!smd_info->in_busy_1)
+				buf = smd_info->buf_in_1_raw;
+			else if (!smd_info->in_busy_2)
+				buf = smd_info->buf_in_2_raw;
+		} else {
+			if (!smd_info->in_busy_1)
+				buf = smd_info->buf_in_1;
+			else if (!smd_info->in_busy_2)
+				buf = smd_info->buf_in_2;
+		}
+	} else if (smd_info->type == SMD_CMD_TYPE) {
+		/* If the data is raw and not hdlc encoded */
+		if (smd_info->encode_hdlc) {
+			if (!smd_info->in_busy_1)
+				buf = smd_info->buf_in_1_raw;
+		} else {
+			if (!smd_info->in_busy_1)
+				buf = smd_info->buf_in_1;
+		}
+	} else if (!smd_info->in_busy_1) {
 		buf = smd_info->buf_in_1;
-	else if (!smd_info->in_busy_2 &&
-			(smd_info->type == SMD_DATA_TYPE))
-		buf = smd_info->buf_in_2;
+	}
 
 	if (!buf && (smd_info->type == SMD_DCI_TYPE ||
 					smd_info->type == SMD_DCI_CMD_TYPE))
@@ -494,32 +681,12 @@
 	diag_smd_send_req(smd_info);
 }
 
-#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
-static void diag_mem_dev_mode_ready_update(int index, int hsic_updated)
-{
-	if (hsic_updated) {
-		unsigned long flags;
-		spin_lock_irqsave(&driver->hsic_ready_spinlock, flags);
-		driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
-		spin_unlock_irqrestore(&driver->hsic_ready_spinlock, flags);
-	} else {
-		driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
-	}
-}
-#else
-static void diag_mem_dev_mode_ready_update(int index, int hsic_updated)
-{
-	(void) hsic_updated;
-	driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
-}
-#endif
 int diag_device_write(void *buf, int data_type, struct diag_request *write_ptr)
 {
 	int i, err = 0, index;
 	index = 0;
 
 	if (driver->logging_mode == MEMORY_DEVICE_MODE) {
-		int hsic_updated = 0;
 		if (data_type == APPS_DATA) {
 			for (i = 0; i < driver->buf_tbl_size; i++)
 				if (driver->buf_tbl[i].length == 0) {
@@ -540,7 +707,6 @@
 		else if (data_type == HSIC_DATA || data_type == HSIC_2_DATA) {
 			unsigned long flags;
 			int foundIndex = -1;
-			hsic_updated = 1;
 			index = data_type - HSIC_DATA;
 			spin_lock_irqsave(&diag_hsic[index].hsic_spinlock,
 									flags);
@@ -573,8 +739,8 @@
 						 driver->logging_process_id)
 				break;
 		if (i < driver->num_clients) {
-			diag_mem_dev_mode_ready_update(i, hsic_updated);
 			pr_debug("diag: wake up logging process\n");
+			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
 			wake_up_interruptible(&driver->wait_q);
 		} else
 			return -EINVAL;
@@ -582,7 +748,7 @@
 		if ((data_type >= MODEM_DATA) && (data_type <= WCNSS_DATA)) {
 			driver->smd_data[data_type].in_busy_1 = 0;
 			driver->smd_data[data_type].in_busy_2 = 0;
-			queue_work(driver->diag_wq,
+			queue_work(driver->smd_data[data_type].wq,
 				&(driver->smd_data[data_type].
 							diag_read_smd_work));
 			if (data_type == MODEM_DATA &&
@@ -623,8 +789,16 @@
 				driver->write_ptr_svc->buf = buf;
 				err = usb_diag_write(driver->legacy_ch,
 						driver->write_ptr_svc);
-			} else
-				err = -1;
+				/* Free the buffer if write failed */
+				if (err) {
+					diagmem_free(driver,
+						     (unsigned char *)driver->
+						     write_ptr_svc,
+						     POOL_TYPE_WRITE_STRUCT);
+				}
+			} else {
+				err = -ENOMEM;
+			}
 		} else if ((data_type >= MODEM_DATA) &&
 				(data_type <= WCNSS_DATA)) {
 			write_ptr->buf = buf;
@@ -1439,7 +1613,7 @@
 		driver->smd_data[i].in_busy_2 = 0;
 		if (queue)
 			/* Poll SMD data channels to check for data */
-			queue_work(driver->diag_wq,
+			queue_work(driver->smd_data[i].wq,
 				&(driver->smd_data[i].diag_read_smd_work));
 	}
 
@@ -1549,19 +1723,24 @@
 	for (i = 0; i < num_channels; i++) {
 		if (buf == (void *)data[i].buf_in_1) {
 			data[i].in_busy_1 = 0;
-			queue_work(driver->diag_wq,
-				&(data[i].diag_read_smd_work));
 			found_it = 1;
 			break;
 		} else if (buf == (void *)data[i].buf_in_2) {
 			data[i].in_busy_2 = 0;
-			queue_work(driver->diag_wq,
-				&(data[i].diag_read_smd_work));
 			found_it = 1;
 			break;
 		}
 	}
 
+	if (found_it) {
+		if (data[i].type == SMD_DATA_TYPE)
+			queue_work(data[i].wq,
+					&(data[i].diag_read_smd_work));
+		else
+			queue_work(driver->diag_wq,
+					&(data[i].diag_read_smd_work));
+	}
+
 	return found_it;
 }
 
@@ -1591,6 +1770,9 @@
 	}
 #endif
 	if (!found_it) {
+		if (driver->logging_mode != USB_MODE)
+			pr_debug("diag: freeing buffer when not in usb mode\n");
+
 		diagmem_free(driver, (unsigned char *)buf,
 						POOL_TYPE_HDLC);
 		diagmem_free(driver, (unsigned char *)diag_write_ptr,
@@ -1734,8 +1916,12 @@
 			diag_dci_try_activate_wakeup_source(smd_info->ch);
 		queue_work(driver->diag_dci_wq,
 				&(smd_info->diag_read_smd_work));
-	} else
+	} else if (smd_info->type == SMD_DATA_TYPE) {
+		queue_work(smd_info->wq,
+				&(smd_info->diag_read_smd_work));
+	} else {
 		queue_work(driver->diag_wq, &(smd_info->diag_read_smd_work));
+	}
 }
 
 static int diag_smd_probe(struct platform_device *pdev)
@@ -1863,8 +2049,10 @@
 
 void diag_smd_destructor(struct diag_smd_info *smd_info)
 {
-	if (smd_info->type == SMD_DATA_TYPE)
+	if (smd_info->type == SMD_DATA_TYPE) {
 		wake_lock_destroy(&smd_info->nrt_lock.read_lock);
+		destroy_workqueue(smd_info->wq);
+	}
 
 	if (smd_info->ch)
 		smd_close(smd_info->ch);
@@ -1875,6 +2063,8 @@
 	kfree(smd_info->buf_in_2);
 	kfree(smd_info->write_ptr_1);
 	kfree(smd_info->write_ptr_2);
+	kfree(smd_info->buf_in_1_raw);
+	kfree(smd_info->buf_in_2_raw);
 }
 
 int diag_smd_constructor(struct diag_smd_info *smd_info, int peripheral,
@@ -1882,6 +2072,7 @@
 {
 	smd_info->peripheral = peripheral;
 	smd_info->type = type;
+	smd_info->encode_hdlc = 0;
 	mutex_init(&smd_info->smd_ch_mutex);
 
 	switch (peripheral) {
@@ -1934,6 +2125,58 @@
 				goto err;
 			kmemleak_not_leak(smd_info->write_ptr_2);
 		}
+		if (driver->supports_apps_hdlc_encoding) {
+			/* In support of hdlc encoding */
+			if (smd_info->buf_in_1_raw == NULL) {
+				smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE,
+								GFP_KERNEL);
+				if (smd_info->buf_in_1_raw == NULL)
+					goto err;
+				kmemleak_not_leak(smd_info->buf_in_1_raw);
+			}
+			if (smd_info->buf_in_2_raw == NULL) {
+				smd_info->buf_in_2_raw = kzalloc(IN_BUF_SIZE,
+								GFP_KERNEL);
+				if (smd_info->buf_in_2_raw == NULL)
+					goto err;
+				kmemleak_not_leak(smd_info->buf_in_2_raw);
+			}
+		}
+	}
+
+	if (smd_info->type == SMD_CMD_TYPE &&
+		driver->supports_apps_hdlc_encoding) {
+		/* In support of hdlc encoding */
+		if (smd_info->buf_in_1_raw == NULL) {
+			smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE,
+								GFP_KERNEL);
+			if (smd_info->buf_in_1_raw == NULL)
+				goto err;
+			kmemleak_not_leak(smd_info->buf_in_1_raw);
+		}
+	}
+
+	/* The smd data type needs separate work queues for reads */
+	if (type == SMD_DATA_TYPE) {
+		switch (peripheral) {
+		case MODEM_DATA:
+			smd_info->wq = create_singlethread_workqueue(
+						"diag_modem_data_read_wq");
+			break;
+		case LPASS_DATA:
+			smd_info->wq = create_singlethread_workqueue(
+						"diag_lpass_data_read_wq");
+			break;
+		case WCNSS_DATA:
+			smd_info->wq = create_singlethread_workqueue(
+						"diag_wcnss_data_read_wq");
+			break;
+		default:
+			smd_info->wq = NULL;
+			break;
+		}
+	} else {
+		smd_info->wq = NULL;
 	}
 
 	INIT_WORK(&(smd_info->diag_read_smd_work), diag_read_smd_work_fn);
@@ -2026,6 +2269,8 @@
 	kfree(smd_info->buf_in_2);
 	kfree(smd_info->write_ptr_1);
 	kfree(smd_info->write_ptr_2);
+	kfree(smd_info->buf_in_1_raw);
+	kfree(smd_info->buf_in_2_raw);
 
 	return 0;
 }
@@ -2048,6 +2293,7 @@
 	driver->buf_tbl_size = (buf_tbl_size < driver->poolsize_hdlc) ?
 				driver->poolsize_hdlc : buf_tbl_size;
 	driver->supports_separate_cmdrsp = device_supports_separate_cmdrsp();
+	driver->supports_apps_hdlc_encoding = 0;
 	mutex_init(&driver->diag_hdlc_mutex);
 	mutex_init(&driver->diag_cntl_mutex);
 
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index a832cb3..e0deef3 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -89,6 +89,31 @@
 	}
 }
 
+static void process_hdlc_encoding_feature(struct diag_smd_info *smd_info,
+					uint8_t feature_mask)
+{
+	/*
+	 * Check if apps supports hdlc encoding and the
+	 * peripheral supports apps hdlc encoding
+	 */
+	if (driver->supports_apps_hdlc_encoding &&
+		(feature_mask & F_DIAG_HDLC_ENCODE_IN_APPS_MASK)) {
+		driver->smd_data[smd_info->peripheral].encode_hdlc =
+						ENABLE_APPS_HDLC_ENCODING;
+		if (driver->separate_cmdrsp[smd_info->peripheral] &&
+			smd_info->peripheral < NUM_SMD_CMD_CHANNELS)
+			driver->smd_cmd[smd_info->peripheral].encode_hdlc =
+						ENABLE_APPS_HDLC_ENCODING;
+	} else {
+		driver->smd_data[smd_info->peripheral].encode_hdlc =
+						DISABLE_APPS_HDLC_ENCODING;
+		if (driver->separate_cmdrsp[smd_info->peripheral] &&
+			smd_info->peripheral < NUM_SMD_CMD_CHANNELS)
+			driver->smd_cmd[smd_info->peripheral].encode_hdlc =
+						DISABLE_APPS_HDLC_ENCODING;
+	}
+}
+
 /* Process the data read from the smd control channel */
 int diag_process_smd_cntl_read_data(struct diag_smd_info *smd_info, void *buf,
 								int total_recd)
@@ -187,6 +212,12 @@
 				else
 					driver->separate_cmdrsp[periph] =
 							DISABLE_SEPARATE_CMDRSP;
+				/*
+				 * Check if apps supports hdlc encoding and the
+				 * peripheral supports apps hdlc encoding
+				 */
+				process_hdlc_encoding_feature(smd_info,
+								feature_mask);
 				if (feature_mask_len > 1) {
 					feature_mask = *(uint8_t *)(buf+13);
 					process_stm_feature(smd_info,
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index c90c132..d79195c 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -48,6 +48,9 @@
 /* Denotes we support diag over stm */
 #define F_DIAG_OVER_STM			0x02
 
+ /* Perform hdlc encoding of data coming from smd channel */
+#define F_DIAG_HDLC_ENCODE_IN_APPS_MASK	0x40
+
 #define ENABLE_SEPARATE_CMDRSP	1
 #define DISABLE_SEPARATE_CMDRSP	0
 
@@ -57,6 +60,9 @@
 #define UPDATE_PERIPHERAL_STM_STATE	1
 #define CLEAR_PERIPHERAL_STM_STATE	2
 
+#define ENABLE_APPS_HDLC_ENCODING	1
+#define DISABLE_APPS_HDLC_ENCODING	0
+
 struct cmd_code_range {
 	uint16_t cmd_code_lo;
 	uint16_t cmd_code_hi;
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index a6ef3ca..4ceca4f 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -25,22 +25,24 @@
 void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
 {
 	void *buf = NULL;
+	unsigned long flags;
 	int index;
 
+	spin_lock_irqsave(&driver->diag_mem_lock, flags);
 	index = 0;
 	if (pool_type == POOL_TYPE_COPY) {
 		if (driver->diagpool) {
-			mutex_lock(&driver->diagmem_mutex);
-			if (driver->count < driver->poolsize) {
+			if ((driver->count < driver->poolsize) &&
+				(size <= driver->itemsize)) {
 				atomic_add(1, (atomic_t *)&driver->count);
 				buf = mempool_alloc(driver->diagpool,
 								 GFP_ATOMIC);
 			}
-			mutex_unlock(&driver->diagmem_mutex);
 		}
 	} else if (pool_type == POOL_TYPE_HDLC) {
 		if (driver->diag_hdlc_pool) {
-			if (driver->count_hdlc_pool < driver->poolsize_hdlc) {
+			if ((driver->count_hdlc_pool < driver->poolsize_hdlc) &&
+				(size <= driver->itemsize_hdlc)) {
 				atomic_add(1,
 					 (atomic_t *)&driver->count_hdlc_pool);
 				buf = mempool_alloc(driver->diag_hdlc_pool,
@@ -49,7 +51,8 @@
 		}
 	} else if (pool_type == POOL_TYPE_USER) {
 		if (driver->diag_user_pool) {
-			if (driver->count_user_pool < driver->poolsize_user) {
+			if ((driver->count_user_pool < driver->poolsize_user) &&
+				(size <= driver->itemsize_user)) {
 				atomic_add(1,
 					(atomic_t *)&driver->count_user_pool);
 				buf = mempool_alloc(driver->diag_user_pool,
@@ -58,8 +61,9 @@
 		}
 	} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
 		if (driver->diag_write_struct_pool) {
-			if (driver->count_write_struct_pool <
-					 driver->poolsize_write_struct) {
+			if ((driver->count_write_struct_pool <
+			     driver->poolsize_write_struct) &&
+			     (size <= driver->itemsize_write_struct)) {
 				atomic_add(1,
 				 (atomic_t *)&driver->count_write_struct_pool);
 				buf = mempool_alloc(
@@ -71,8 +75,9 @@
 				pool_type == POOL_TYPE_HSIC_2) {
 		index = pool_type - POOL_TYPE_HSIC;
 		if (diag_hsic[index].diag_hsic_pool) {
-			if (diag_hsic[index].count_hsic_pool <
-					diag_hsic[index].poolsize_hsic) {
+			if ((diag_hsic[index].count_hsic_pool <
+			     diag_hsic[index].poolsize_hsic) &&
+			     (size <= diag_hsic[index].itemsize_hsic)) {
 				atomic_add(1, (atomic_t *)
 					&diag_hsic[index].count_hsic_pool);
 				buf = mempool_alloc(
@@ -85,7 +90,8 @@
 		index = pool_type - POOL_TYPE_HSIC_WRITE;
 		if (diag_hsic[index].diag_hsic_write_pool) {
 			if (diag_hsic[index].count_hsic_write_pool <
-				diag_hsic[index].poolsize_hsic_write) {
+			    diag_hsic[index].poolsize_hsic_write &&
+			    (size <= diag_hsic[index].itemsize_hsic_write)) {
 				atomic_add(1, (atomic_t *)
 					&diag_hsic[index].
 					count_hsic_write_pool);
@@ -96,14 +102,17 @@
 		}
 #endif
 	}
+	spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
 	return buf;
 }
 
 void diagmem_exit(struct diagchar_dev *driver, int pool_type)
 {
 	int index;
+	unsigned long flags;
 	index = 0;
 
+	spin_lock_irqsave(&driver->diag_mem_lock, flags);
 	if (driver->diagpool) {
 		if (driver->count == 0 && driver->ref_count == 0) {
 			mempool_destroy(driver->diagpool);
@@ -176,12 +185,18 @@
 		}
 	}
 #endif
+	spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
 }
 
 void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
 {
 	int index;
+	unsigned long flags;
 
+	if (!buf)
+		return;
+
+	spin_lock_irqsave(&driver->diag_mem_lock, flags);
 	index = 0;
 	if (pool_type == POOL_TYPE_COPY) {
 		if (driver->diagpool != NULL && driver->count > 0) {
@@ -246,13 +261,13 @@
 			__func__, pool_type);
 
 	}
-
+	spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
 	diagmem_exit(driver, pool_type);
 }
 
 void diagmem_init(struct diagchar_dev *driver)
 {
-	mutex_init(&driver->diagmem_mutex);
+	spin_lock_init(&driver->diag_mem_lock);
 
 	if (driver->count == 0) {
 		driver->diagpool = mempool_create_kmalloc_pool(
diff --git a/drivers/coresight/coresight-cti.c b/drivers/coresight/coresight-cti.c
index d0900d1..2a06f0a 100644
--- a/drivers/coresight/coresight-cti.c
+++ b/drivers/coresight/coresight-cti.c
@@ -107,22 +107,37 @@
 	return 0;
 }
 
-static void __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+static int __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
 {
 	uint32_t ctien;
+	int ret;
+
+	if (drvdata->refcnt == 0) {
+		ret = cti_enable(drvdata);
+		if (ret)
+			return ret;
+	}
 
 	CTI_UNLOCK(drvdata);
 
 	ctien = cti_readl(drvdata, CTIINEN(trig));
+	if (ctien & (0x1 << ch))
+		goto out;
 	cti_writel(drvdata, (ctien | 0x1 << ch), CTIINEN(trig));
 
 	CTI_LOCK(drvdata);
+
+	drvdata->refcnt++;
+	return 0;
+out:
+	CTI_LOCK(drvdata);
+	return 0;
 }
 
 int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
 {
 	struct cti_drvdata *drvdata;
-	int ret = 0;
+	int ret;
 
 	if (IS_ERR_OR_NULL(cti))
 		return -EINVAL;
@@ -134,36 +149,43 @@
 	drvdata = to_cti_drvdata(cti);
 
 	mutex_lock(&drvdata->mutex);
-	if (drvdata->refcnt == 0) {
-		ret = cti_enable(drvdata);
-		if (ret)
-			goto err;
-	}
-	drvdata->refcnt++;
-
-	__cti_map_trigin(drvdata, trig, ch);
-err:
+	ret = __cti_map_trigin(drvdata, trig, ch);
 	mutex_unlock(&drvdata->mutex);
 	return ret;
 }
 EXPORT_SYMBOL(coresight_cti_map_trigin);
 
-static void __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+static int __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
 {
 	uint32_t ctien;
+	int ret;
+
+	if (drvdata->refcnt == 0) {
+		ret = cti_enable(drvdata);
+		if (ret)
+			return ret;
+	}
 
 	CTI_UNLOCK(drvdata);
 
 	ctien = cti_readl(drvdata, CTIOUTEN(trig));
+	if (ctien & (0x1 << ch))
+		goto out;
 	cti_writel(drvdata, (ctien | 0x1 << ch), CTIOUTEN(trig));
 
 	CTI_LOCK(drvdata);
+
+	drvdata->refcnt++;
+	return 0;
+out:
+	CTI_LOCK(drvdata);
+	return 0;
 }
 
 int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
 {
 	struct cti_drvdata *drvdata;
-	int ret = 0;
+	int ret;
 
 	if (IS_ERR_OR_NULL(cti))
 		return -EINVAL;
@@ -175,15 +197,7 @@
 	drvdata = to_cti_drvdata(cti);
 
 	mutex_lock(&drvdata->mutex);
-	if (drvdata->refcnt == 0) {
-		ret = cti_enable(drvdata);
-		if (ret)
-			goto err;
-	}
-	drvdata->refcnt++;
-
-	__cti_map_trigout(drvdata, trig, ch);
-err:
+	ret = __cti_map_trigout(drvdata, trig, ch);
 	mutex_unlock(&drvdata->mutex);
 	return ret;
 }
@@ -193,9 +207,11 @@
 {
 	CTI_UNLOCK(drvdata);
 
-	cti_writel(drvdata, 0x1, CTICONTROL);
+	cti_writel(drvdata, 0x0, CTICONTROL);
 
 	CTI_LOCK(drvdata);
+
+	clk_disable_unprepare(drvdata->clk);
 }
 
 static void __cti_unmap_trigin(struct cti_drvdata *drvdata, int trig, int ch)
@@ -205,9 +221,19 @@
 	CTI_UNLOCK(drvdata);
 
 	ctien = cti_readl(drvdata, CTIINEN(trig));
+	if (!(ctien & (0x1 << ch)))
+		goto out;
 	cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIINEN(trig));
 
 	CTI_LOCK(drvdata);
+
+	if (drvdata->refcnt == 1)
+		cti_disable(drvdata);
+	drvdata->refcnt--;
+	return;
+out:
+	CTI_LOCK(drvdata);
+	return;
 }
 
 void coresight_cti_unmap_trigin(struct coresight_cti *cti, int trig, int ch)
@@ -224,13 +250,8 @@
 
 	mutex_lock(&drvdata->mutex);
 	__cti_unmap_trigin(drvdata, trig, ch);
-
-	if (drvdata->refcnt == 1)
-		cti_disable(drvdata);
-	drvdata->refcnt--;
 	mutex_unlock(&drvdata->mutex);
 
-	clk_disable_unprepare(drvdata->clk);
 }
 EXPORT_SYMBOL(coresight_cti_unmap_trigin);
 
@@ -241,9 +262,19 @@
 	CTI_UNLOCK(drvdata);
 
 	ctien = cti_readl(drvdata, CTIOUTEN(trig));
+	if (!(ctien & (0x1 << ch)))
+		goto out;
 	cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIOUTEN(trig));
 
 	CTI_LOCK(drvdata);
+
+	if (drvdata->refcnt == 1)
+		cti_disable(drvdata);
+	drvdata->refcnt--;
+	return;
+out:
+	CTI_LOCK(drvdata);
+	return;
 }
 
 void coresight_cti_unmap_trigout(struct coresight_cti *cti, int trig, int ch)
@@ -260,13 +291,7 @@
 
 	mutex_lock(&drvdata->mutex);
 	__cti_unmap_trigout(drvdata, trig, ch);
-
-	if (drvdata->refcnt == 1)
-		cti_disable(drvdata);
-	drvdata->refcnt--;
 	mutex_unlock(&drvdata->mutex);
-
-	clk_disable_unprepare(drvdata->clk);
 }
 EXPORT_SYMBOL(coresight_cti_unmap_trigout);
 
@@ -362,11 +387,77 @@
 }
 static DEVICE_ATTR(unmap_trigout, S_IWUSR, NULL, cti_store_unmap_trigout);
 
+static ssize_t cti_show_trigin(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long trig, ch;
+	uint32_t ctien;
+	ssize_t size = 0;
+
+	for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+		ctien = cti_readl(drvdata, CTIINEN(trig));
+		for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+			if (ctien & (1 << ch)) {
+				/* Ensure we do not write more than PAGE_SIZE
+				 * bytes of data including \n character and null
+				 * terminator
+				 */
+				size += scnprintf(&buf[size], PAGE_SIZE - size -
+						  1, " %#lx %#lx,", trig, ch);
+				if (size >= PAGE_SIZE - 2) {
+					dev_err(dev, "show buffer full\n");
+					goto err;
+				}
+
+			}
+		}
+	}
+err:
+	size += scnprintf(&buf[size], 2, "\n");
+	return size;
+}
+static DEVICE_ATTR(show_trigin, S_IRUGO, cti_show_trigin, NULL);
+
+static ssize_t cti_show_trigout(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long trig, ch;
+	uint32_t ctien;
+	ssize_t size = 0;
+
+	for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+		ctien = cti_readl(drvdata, CTIOUTEN(trig));
+		for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+			if (ctien & (1 << ch)) {
+				/* Ensure we do not write more than PAGE_SIZE
+				 * bytes of data including \n character and null
+				 * terminator
+				 */
+				size += scnprintf(&buf[size], PAGE_SIZE - size -
+						  1, " %#lx %#lx,", trig, ch);
+				if (size >= PAGE_SIZE - 2) {
+					dev_err(dev, "show buffer full\n");
+					goto err;
+				}
+
+			}
+		}
+	}
+err:
+	size += scnprintf(&buf[size], 2, "\n");
+	return size;
+}
+static DEVICE_ATTR(show_trigout, S_IRUGO, cti_show_trigout, NULL);
+
 static struct attribute *cti_attrs[] = {
 	&dev_attr_map_trigin.attr,
 	&dev_attr_map_trigout.attr,
 	&dev_attr_unmap_trigin.attr,
 	&dev_attr_unmap_trigout.attr,
+	&dev_attr_show_trigin.attr,
+	&dev_attr_show_trigout.attr,
 	NULL,
 };
 
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 96e759b..418c488 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -1198,7 +1198,28 @@
 }
 
 static const struct input_device_id dbs_ids[] = {
-	{ .driver_info = 1 },
+	/* multi-touch touchscreen */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	/* touchpad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		.absbit = { [BIT_WORD(ABS_X)] =
+			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+	},
+	/* Keypad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_KEY) },
+	},
 	{ },
 };
 
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index a4d2c1b..1ea3cd2 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -52,7 +52,7 @@
 #define MAX_VMAP_RETRIES 10
 #define BAD_ORDER	-1
 
-static const unsigned int orders[] = {8, 4, 0};
+static const unsigned int orders[] = {9, 8, 4, 0};
 static const int num_orders = ARRAY_SIZE(orders);
 static unsigned int low_gfp_flags = __GFP_HIGHMEM | GFP_KERNEL | __GFP_ZERO;
 static unsigned int high_gfp_flags = (__GFP_HIGHMEM | __GFP_NORETRY
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index fc66328..aac183b 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -17,15 +17,16 @@
 msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
 msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
 msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o
-msm_kgsl_core-$(CONFIG_MSM_DCVS) += kgsl_pwrscale_msm.o
 msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
 
 msm_adreno-y += \
 	adreno_ringbuffer.o \
 	adreno_drawctxt.o \
+	adreno_dispatch.o \
 	adreno_postmortem.o \
 	adreno_snapshot.o \
 	adreno_coresight.o \
+	adreno_trace.o \
 	adreno_a2xx.o \
 	adreno_a2xx_trace.o \
 	adreno_a2xx_snapshot.o \
@@ -34,7 +35,7 @@
 	adreno_a3xx_snapshot.o \
 	adreno.o
 
-msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o
 
 msm_z180-y += \
 	z180.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index baf335f..a28bf33 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -23,8 +23,6 @@
 #include <mach/socinfo.h>
 #include <mach/msm_bus_board.h>
 #include <mach/msm_bus.h>
-#include <mach/msm_dcvs.h>
-#include <mach/msm_dcvs_scm.h>
 
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
@@ -34,6 +32,7 @@
 
 #include "adreno.h"
 #include "adreno_pm4types.h"
+#include "adreno_trace.h"
 
 #include "a2xx_reg.h"
 #include "a3xx_reg.h"
@@ -115,18 +114,6 @@
 	.long_ib_detect = 1,
 };
 
-/* This set of registers are used for Hang detection
- * If the values of these registers are same after
- * KGSL_TIMEOUT_PART time, GPU hang is reported in
- * kernel log.
- * *****ALERT******ALERT********ALERT*************
- * Order of registers below is important, registers
- * from LONG_IB_DETECT_REG_INDEX_START to
- * LONG_IB_DETECT_REG_INDEX_END are used in long ib detection.
- */
-#define LONG_IB_DETECT_REG_INDEX_START 1
-#define LONG_IB_DETECT_REG_INDEX_END 5
-
 unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
 
 /*
@@ -215,7 +202,7 @@
 		512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
 };
 
-static unsigned int adreno_isidle(struct kgsl_device *device);
+static bool adreno_isidle(struct kgsl_device *device);
 
 /**
  * adreno_perfcounter_init: Reserve kernel performance counters
@@ -276,7 +263,7 @@
 }
 
 /**
- * adreno_perfcounter_read_group: Determine which countables are in counters
+ * adreno_perfcounter_read_group() - Determine which countables are in counters
  * @adreno_dev: Adreno device to configure
  * @reads: List of kgsl_perfcounter_read_groups
  * @count: Length of list
@@ -353,6 +340,61 @@
 }
 
 /**
+ * adreno_perfcounter_get_groupid() - Get the performance counter ID
+ * @adreno_dev: Adreno device
+ * @name: Performance counter group name string
+ *
+ * Get the groupid based on the name and return this ID
+ */
+
+int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
+					const char *name)
+{
+
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	int i;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	/* perfcounter get/put/query not allowed on a2xx */
+	if (adreno_is_a2xx(adreno_dev))
+		return -EINVAL;
+
+	for (i = 0; i < counters->group_count; ++i) {
+		group = &(counters->groups[i]);
+		if (!strcmp(group->name, name))
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * adreno_perfcounter_get_name() - Get the group name
+ * @adreno_dev: Adreno device
+ * @groupid: Desired performance counter groupid
+ *
+ * Get the name based on the groupid and return it
+ */
+
+const char *adreno_perfcounter_get_name(struct adreno_device *adreno_dev,
+		unsigned int groupid)
+{
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+
+	/* perfcounter get/put/query not allowed on a2xx */
+	if (adreno_is_a2xx(adreno_dev))
+		return NULL;
+
+	if (groupid >= counters->group_count)
+		return NULL;
+
+	return counters->groups[groupid].name;
+}
+
+/**
  * adreno_perfcounter_query_group: Determine which countables are in counters
  * @adreno_dev: Adreno device to configure
  * @groupid: Desired performance counter group
@@ -445,8 +487,11 @@
 	for (i = 0; i < group->reg_count; i++) {
 		if (group->regs[i].countable == countable) {
 			/* Countable already associated with counter */
-			group->regs[i].refcount++;
-			group->regs[i].flags |= flags;
+			if (flags & PERFCOUNTER_FLAG_KERNEL)
+				group->regs[i].kernelcount++;
+			else
+				group->regs[i].usercount++;
+
 			if (offset)
 				*offset = group->regs[i].offset;
 			return 0;
@@ -463,14 +508,20 @@
 
 	/* initialize the new counter */
 	group->regs[empty].countable = countable;
-	group->regs[empty].refcount = 1;
+
+	/* set initial kernel and user count */
+	if (flags & PERFCOUNTER_FLAG_KERNEL) {
+		group->regs[empty].kernelcount = 1;
+		group->regs[empty].usercount = 0;
+	} else {
+		group->regs[empty].kernelcount = 0;
+		group->regs[empty].usercount = 1;
+	}
 
 	/* enable the new counter */
 	adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
 		countable);
 
-	group->regs[empty].flags = flags;
-
 	if (offset)
 		*offset = group->regs[empty].offset;
 
@@ -483,12 +534,13 @@
  * @adreno_dev: Adreno device to configure
  * @groupid: Desired performance counter group
  * @countable: Countable desired to be freed from a  counter
+ * @flags: Flag to determine if kernel or user space request
  *
  * Put a performance counter/countable pair that was previously received.  If
  * noone else is using the countable, free up the counter for others.
  */
 int adreno_perfcounter_put(struct adreno_device *adreno_dev,
-	unsigned int groupid, unsigned int countable)
+	unsigned int groupid, unsigned int countable, unsigned int flags)
 {
 	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
 	struct adreno_perfcount_group *group;
@@ -504,24 +556,27 @@
 
 	group = &(counters->groups[groupid]);
 
+	/*
+	 * Find if the counter/countable pair is used currently.
+	 * Start cycling through registers in the bank.
+	 */
 	for (i = 0; i < group->reg_count; i++) {
+		/* check if countable assigned is what we are looking for */
 		if (group->regs[i].countable == countable) {
-			if (group->regs[i].refcount > 0) {
-				group->regs[i].refcount--;
+			/* found pair, book keep count based on request type */
+			if (flags & PERFCOUNTER_FLAG_KERNEL &&
+					group->regs[i].kernelcount > 0)
+				group->regs[i].kernelcount--;
+			else if (group->regs[i].usercount > 0)
+				group->regs[i].usercount--;
+			else
+				break;
 
-				/*
-				 * book keeping to ensure we never free a
-				 * perf counter used by kernel
-				 */
-				if (group->regs[i].flags &&
-					group->regs[i].refcount == 0)
-					group->regs[i].refcount++;
-
-				/* make available if not used */
-				if (group->regs[i].refcount == 0)
-					group->regs[i].countable =
-						KGSL_PERFCOUNTER_NOT_USED;
-			}
+			/* mark available if not used anymore */
+			if (group->regs[i].kernelcount == 0 &&
+					group->regs[i].usercount == 0)
+				group->regs[i].countable =
+					KGSL_PERFCOUNTER_NOT_USED;
 
 			return 0;
 		}
@@ -532,23 +587,9 @@
 
 static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 {
-	irqreturn_t result;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
-	result = adreno_dev->gpudev->irq_handler(adreno_dev);
-
-	device->pwrctrl.irq_last = 1;
-	if (device->requested_state == KGSL_STATE_NONE) {
-		kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
-		queue_work(device->work_queue, &device->idle_check_ws);
-	}
-
-	/* Reset the time-out in our idle timer */
-	mod_timer_pending(&device->idle_timer,
-		jiffies + device->pwrctrl.interval_timeout);
-	mod_timer_pending(&device->hang_timer,
-		(jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
-	return result;
+	return adreno_dev->gpudev->irq_handler(adreno_dev);
 }
 
 static void adreno_cleanup_pt(struct kgsl_device *device,
@@ -563,6 +604,8 @@
 
 	kgsl_mmu_unmap(pagetable, &device->memstore);
 
+	kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
+
 	kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
 }
 
@@ -585,6 +628,11 @@
 	if (result)
 		goto unmap_memptrs_desc;
 
+	result = kgsl_mmu_map_global(pagetable,
+					&adreno_dev->profile.shared_buffer);
+	if (result)
+		goto unmap_profile_shared;
+
 	result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory);
 	if (result)
 		goto unmap_memstore_desc;
@@ -598,6 +646,9 @@
 				device->mmu.setstate_memory.size;
 	return result;
 
+unmap_profile_shared:
+	kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
+
 unmap_memstore_desc:
 	kgsl_mmu_unmap(pagetable, &device->memstore);
 
@@ -845,7 +896,7 @@
 		adreno_dev->dev.cff_dump_enable);
 }
 
-static void adreno_iommu_setstate(struct kgsl_device *device,
+static int adreno_iommu_setstate(struct kgsl_device *device,
 					unsigned int context_id,
 					uint32_t flags)
 {
@@ -858,22 +909,24 @@
 	struct kgsl_context *context;
 	struct adreno_context *adreno_ctx = NULL;
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int result;
 
 	if (adreno_use_default_setstate(adreno_dev)) {
 		kgsl_mmu_device_setstate(&device->mmu, flags);
-		return;
+		return 0;
 	}
 	num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
 
 	context = kgsl_context_get(device, context_id);
 	if (context == NULL)
-		return;
+		return -EINVAL;
 
 	adreno_ctx = ADRENO_CONTEXT(context);
 
-	if (kgsl_mmu_enable_clk(&device->mmu,
-				KGSL_IOMMU_CONTEXT_USER))
-		return;
+	result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
+
+	if (result)
+		goto done;
 
 	pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
 				device->mmu.hwpagetable);
@@ -907,14 +960,24 @@
 	 * This returns the per context timestamp but we need to
 	 * use the global timestamp for iommu clock disablement
 	 */
-	adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
-			&link[0], sizedwords);
+	result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
+			KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
 
-	kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+	/*
+	 * On error disable the IOMMU clock right away otherwise turn it off
+	 * after the command has been retired
+	 */
+	if (result)
+		kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+	else
+		kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+
+done:
 	kgsl_context_put(context);
+	return result;
 }
 
-static void adreno_gpummu_setstate(struct kgsl_device *device,
+static int adreno_gpummu_setstate(struct kgsl_device *device,
 					unsigned int context_id,
 					uint32_t flags)
 {
@@ -925,6 +988,7 @@
 	unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
 	struct kgsl_context *context;
 	struct adreno_context *adreno_ctx = NULL;
+	int ret = 0;
 
 	/*
 	 * Fix target freeze issue by adding TLB flush for each submit
@@ -941,7 +1005,8 @@
 	if (!adreno_use_default_setstate(adreno_dev)) {
 		context = kgsl_context_get(device, context_id);
 		if (context == NULL)
-			return;
+			return -EINVAL;
+
 		adreno_ctx = ADRENO_CONTEXT(context);
 
 		if (flags & KGSL_MMUFLAGS_PTUPDATE) {
@@ -1016,7 +1081,7 @@
 			sizedwords += 2;
 		}
 
-		adreno_ringbuffer_issuecmds(device, adreno_ctx,
+		ret = adreno_ringbuffer_issuecmds(device, adreno_ctx,
 					KGSL_CMD_FLAGS_PMODE,
 					&link[0], sizedwords);
 
@@ -1024,9 +1089,11 @@
 	} else {
 		kgsl_mmu_device_setstate(&device->mmu, flags);
 	}
+
+	return ret;
 }
 
-static void adreno_setstate(struct kgsl_device *device,
+static int adreno_setstate(struct kgsl_device *device,
 			unsigned int context_id,
 			uint32_t flags)
 {
@@ -1035,6 +1102,8 @@
 		return adreno_gpummu_setstate(device, context_id, flags);
 	else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
 		return adreno_iommu_setstate(device, context_id, flags);
+
+	return 0;
 }
 
 static unsigned int
@@ -1268,172 +1337,6 @@
 
 }
 
-static struct msm_dcvs_core_info *adreno_of_get_dcvs(struct device_node *parent)
-{
-	struct device_node *node, *child;
-	struct msm_dcvs_core_info *info = NULL;
-	int count = 0;
-	int ret = -EINVAL;
-
-	node = adreno_of_find_subnode(parent, "qcom,dcvs-core-info");
-	if (node == NULL)
-		return ERR_PTR(-EINVAL);
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-
-	if (info == NULL) {
-		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*info));
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	for_each_child_of_node(node, child)
-		count++;
-
-	info->power_param.num_freq = count;
-
-	info->freq_tbl = kzalloc(info->power_param.num_freq *
-			sizeof(struct msm_dcvs_freq_entry),
-			GFP_KERNEL);
-
-	if (info->freq_tbl == NULL) {
-		KGSL_CORE_ERR("kzalloc(%d) failed\n",
-			info->power_param.num_freq *
-			sizeof(struct msm_dcvs_freq_entry));
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	for_each_child_of_node(node, child) {
-		unsigned int index;
-
-		if (adreno_of_read_property(child, "reg", &index))
-			goto err;
-
-		if (index >= info->power_param.num_freq) {
-			KGSL_CORE_ERR("DCVS freq entry %d is out of range\n",
-				index);
-			continue;
-		}
-
-		if (adreno_of_read_property(child, "qcom,freq",
-			&info->freq_tbl[index].freq))
-			goto err;
-
-		if (adreno_of_read_property(child, "qcom,voltage",
-			&info->freq_tbl[index].voltage))
-			info->freq_tbl[index].voltage = 0;
-
-		if (adreno_of_read_property(child, "qcom,is_trans_level",
-			&info->freq_tbl[index].is_trans_level))
-			info->freq_tbl[index].is_trans_level = 0;
-
-		if (adreno_of_read_property(child, "qcom,active-energy-offset",
-			&info->freq_tbl[index].active_energy_offset))
-			info->freq_tbl[index].active_energy_offset = 0;
-
-		if (adreno_of_read_property(child, "qcom,leakage-energy-offset",
-			&info->freq_tbl[index].leakage_energy_offset))
-			info->freq_tbl[index].leakage_energy_offset = 0;
-	}
-
-	if (adreno_of_read_property(node, "qcom,num-cores", &info->num_cores))
-		goto err;
-
-	info->sensors = kzalloc(info->num_cores *
-			sizeof(int),
-			GFP_KERNEL);
-
-	for (count = 0; count < info->num_cores; count++) {
-		if (adreno_of_read_property(node, "qcom,sensors",
-			&(info->sensors[count])))
-			goto err;
-	}
-
-	if (adreno_of_read_property(node, "qcom,core-core-type",
-		&info->core_param.core_type))
-		goto err;
-
-	if (adreno_of_read_property(node, "qcom,algo-disable-pc-threshold",
-		&info->algo_param.disable_pc_threshold))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-em-win-size-min-us",
-		&info->algo_param.em_win_size_min_us))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-em-win-size-max-us",
-		&info->algo_param.em_win_size_max_us))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-em-max-util-pct",
-		&info->algo_param.em_max_util_pct))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-group-id",
-		&info->algo_param.group_id))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-max-freq-chg-time-us",
-		&info->algo_param.max_freq_chg_time_us))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-slack-mode-dynamic",
-		&info->algo_param.slack_mode_dynamic))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-slack-weight-thresh-pct",
-		&info->algo_param.slack_weight_thresh_pct))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-slack-time-min-us",
-		&info->algo_param.slack_time_min_us))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-slack-time-max-us",
-		&info->algo_param.slack_time_max_us))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-ss-win-size-min-us",
-		&info->algo_param.ss_win_size_min_us))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-ss-win-size-max-us",
-		&info->algo_param.ss_win_size_max_us))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-ss-util-pct",
-		&info->algo_param.ss_util_pct))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,algo-ss-no-corr-below-freq",
-		&info->algo_param.ss_no_corr_below_freq))
-		goto err;
-
-	if (adreno_of_read_property(node, "qcom,energy-active-coeff-a",
-		&info->energy_coeffs.active_coeff_a))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,energy-active-coeff-b",
-		&info->energy_coeffs.active_coeff_b))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,energy-active-coeff-c",
-		&info->energy_coeffs.active_coeff_c))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-a",
-		&info->energy_coeffs.leakage_coeff_a))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-b",
-		&info->energy_coeffs.leakage_coeff_b))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-c",
-		&info->energy_coeffs.leakage_coeff_c))
-		goto err;
-	if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-d",
-		&info->energy_coeffs.leakage_coeff_d))
-		goto err;
-
-	if (adreno_of_read_property(node, "qcom,power-current-temp",
-		&info->power_param.current_temp))
-		goto err;
-
-	return info;
-
-err:
-	if (info)
-		kfree(info->freq_tbl);
-
-	kfree(info);
-
-	return ERR_PTR(ret);
-}
-
 static int adreno_of_get_iommu(struct device_node *parent,
 	struct kgsl_device_platform_data *pdata)
 {
@@ -1575,12 +1478,6 @@
 		goto err;
 	}
 
-	pdata->core_info = adreno_of_get_dcvs(pdev->dev.of_node);
-	if (IS_ERR_OR_NULL(pdata->core_info)) {
-		ret = PTR_ERR(pdata->core_info);
-		goto err;
-	}
-
 	ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
 	if (ret)
 		goto err;
@@ -1593,10 +1490,6 @@
 
 err:
 	if (pdata) {
-		if (pdata->core_info)
-			kfree(pdata->core_info->freq_tbl);
-		kfree(pdata->core_info);
-
 		if (pdata->iommu_data)
 			kfree(pdata->iommu_data->iommu_ctxs);
 
@@ -1686,7 +1579,12 @@
 	if (status)
 		goto error_close_rb;
 
+	status = adreno_dispatcher_init(adreno_dev);
+	if (status)
+		goto error_close_device;
+
 	adreno_debugfs_init(device);
+	adreno_profile_init(device);
 
 	adreno_ft_init_sysfs(device);
 
@@ -1700,6 +1598,8 @@
 
 	return 0;
 
+error_close_device:
+	kgsl_device_platform_remove(device);
 error_close_rb:
 	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
 error:
@@ -1717,10 +1617,12 @@
 	adreno_dev = ADRENO_DEVICE(device);
 
 	adreno_coresight_remove(pdev);
+	adreno_profile_close(device);
 
 	kgsl_pwrscale_detach_policy(device);
 	kgsl_pwrscale_close(device);
 
+	adreno_dispatcher_close(adreno_dev);
 	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
 	kgsl_device_platform_remove(device);
 
@@ -1732,8 +1634,7 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int i;
 
-	if (KGSL_STATE_DUMP_AND_FT != device->state)
-		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+	kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
 
 	/* Power up the device */
 	kgsl_pwrctrl_enable(device);
@@ -1803,13 +1704,15 @@
 
 	kgsl_cffdump_open(device);
 
-	if (KGSL_STATE_DUMP_AND_FT != device->state)
-		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+	kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
 
 	regulator_left_on = (regulator_is_enabled(device->pwrctrl.gpu_reg) ||
 				(device->pwrctrl.gpu_cx &&
 				regulator_is_enabled(device->pwrctrl.gpu_cx)));
 
+	/* Clear any GPU faults that might have been left over */
+	adreno_set_gpu_fault(adreno_dev, 0);
+
 	/* Power up the device */
 	kgsl_pwrctrl_enable(device);
 
@@ -1855,11 +1758,11 @@
 	if (status)
 		goto error_irq_off;
 
-	mod_timer(&device->hang_timer,
-		(jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
-
 	adreno_perfcounter_start(adreno_dev);
 
+	/* Start the dispatcher */
+	adreno_dispatcher_start(adreno_dev);
+
 	device->reset_counter++;
 
 	return 0;
@@ -1889,6 +1792,7 @@
 
 	adreno_dev->drawctxt_active = NULL;
 
+	adreno_dispatcher_stop(adreno_dev);
 	adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
 
 	kgsl_mmu_stop(&device->mmu);
@@ -1896,7 +1800,6 @@
 	device->ftbl->irqctrl(device, 0);
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 	del_timer_sync(&device->idle_timer);
-	del_timer_sync(&device->hang_timer);
 
 	adreno_ocmem_gmem_free(adreno_dev);
 
@@ -1908,918 +1811,48 @@
 	return 0;
 }
 
-/*
- * Set the reset status of all contexts to
- * INNOCENT_CONTEXT_RESET_EXT except for the bad context
- * since thats the guilty party, if fault tolerance failed then
- * mark all as guilty
- */
-
-static int _mark_context_status(int id, void *ptr, void *data)
-{
-	unsigned int ft_status = *((unsigned int *) data);
-	struct kgsl_context *context = ptr;
-	struct adreno_context *adreno_context = ADRENO_CONTEXT(context);
-
-	if (ft_status) {
-		context->reset_status =
-				KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
-		adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
-	} else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
-		context->reset_status) {
-		if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG |
-			CTXT_FLAGS_GPU_HANG_FT))
-			context->reset_status =
-			KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
-		else
-			context->reset_status =
-			KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
-	}
-
-	return 0;
-}
-
-static void adreno_mark_context_status(struct kgsl_device *device,
-					int ft_status)
-{
-	/* Mark the status for all the contexts in the device */
-
-	read_lock(&device->context_lock);
-	idr_for_each(&device->context_idr, _mark_context_status, &ft_status);
-	read_unlock(&device->context_lock);
-}
-
-/*
- * For hung contexts set the current memstore value to the most recent issued
- * timestamp - this resets the status and lets the system continue on
- */
-
-static int _set_max_ts(int id, void *ptr, void *data)
-{
-	struct kgsl_device *device = data;
-	struct kgsl_context *context = ptr;
-	struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
-
-	if (drawctxt && drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
-		kgsl_sharedmem_writel(device, &device->memstore,
-			KGSL_MEMSTORE_OFFSET(context->id,
-			soptimestamp), drawctxt->timestamp);
-		kgsl_sharedmem_writel(device, &device->memstore,
-			KGSL_MEMSTORE_OFFSET(context->id,
-			eoptimestamp), drawctxt->timestamp);
-	}
-
-	return 0;
-}
-
-static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
-{
-	read_lock(&device->context_lock);
-	idr_for_each(&device->context_idr, _set_max_ts, device);
-	read_unlock(&device->context_lock);
-}
-
-static void adreno_destroy_ft_data(struct adreno_ft_data *ft_data)
-{
-	vfree(ft_data->rb_buffer);
-	vfree(ft_data->bad_rb_buffer);
-	vfree(ft_data->good_rb_buffer);
-}
-
-static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
-					unsigned int *ptr,
-					bool inc)
-{
-	int status = -EINVAL;
-	unsigned int val1;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int start_ptr = *ptr;
-
-	while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
-		if (inc)
-			start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
-									size);
-		else
-			start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
-									size);
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
-		/* Ensure above read is finished before next read */
-		rmb();
-		if (KGSL_CMD_IDENTIFIER == val1) {
-			if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
-				start_ptr = adreno_ringbuffer_dec_wrapped(
-							start_ptr, size);
-				*ptr = start_ptr;
-				status = 0;
-				break;
-		}
-	}
-	return status;
-}
-
-static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
-					unsigned int *rb_rptr,
-					unsigned int global_eop,
-					bool inc)
-{
-	int status = -EINVAL;
-	unsigned int temp_rb_rptr = *rb_rptr;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int val[3];
-	int i = 0;
-	bool check = false;
-
-	if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
-		return status;
-
-	do {
-		/*
-		 * when decrementing we need to decrement first and
-		 * then read make sure we cover all the data
-		 */
-		if (!inc)
-			temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
-					temp_rb_rptr, size);
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
-					temp_rb_rptr);
-		/* Ensure above read is finished before next read */
-		rmb();
-
-		if (check && ((inc && val[i] == global_eop) ||
-			(!inc && (val[i] ==
-			cp_type3_packet(CP_MEM_WRITE, 2) ||
-			val[i] == CACHE_FLUSH_TS)))) {
-			/* decrement i, i.e i = (i - 1 + 3) % 3 if
-			 * we are going forward, else increment i */
-			i = (i + 2) % 3;
-			if (val[i] == rb->device->memstore.gpuaddr +
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-						eoptimestamp)) {
-				int j = ((i + 2) % 3);
-				if ((inc && (val[j] == CACHE_FLUSH_TS ||
-						val[j] == cp_type3_packet(
-							CP_MEM_WRITE, 2))) ||
-					(!inc && val[j] == global_eop)) {
-						/* Found the global eop */
-						status = 0;
-						break;
-				}
-			}
-			/* if no match found then increment i again
-			 * since we decremented before matching */
-			i = (i + 1) % 3;
-		}
-		if (inc)
-			temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
-						temp_rb_rptr, size);
-
-		i = (i + 1) % 3;
-		if (2 == i)
-			check = true;
-	} while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
-	/* temp_rb_rptr points to the command stream after global eop,
-	 * move backward till the start of command sequence */
-	if (!status) {
-		status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
-		if (!status) {
-			*rb_rptr = temp_rb_rptr;
-			KGSL_FT_INFO(rb->device,
-			"Offset of cmd sequence after eop timestamp: 0x%x\n",
-			temp_rb_rptr / sizeof(unsigned int));
-		}
-	}
-	if (status)
-		KGSL_FT_ERR(rb->device,
-		"Failed to find the command sequence after eop timestamp %x\n",
-		global_eop);
-	return status;
-}
-
-static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
-				unsigned int *rb_rptr,
-				unsigned int ib1)
-{
-	int status = -EINVAL;
-	unsigned int temp_rb_rptr = *rb_rptr;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int val[2];
-	int i = 0;
-	bool check = false;
-	bool ctx_switch = false;
-
-	while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
-		/* Ensure above read is finished before next read */
-		rmb();
-
-		if (check && val[i] == ib1) {
-			/* decrement i, i.e i = (i - 1 + 2) % 2 */
-			i = (i + 1) % 2;
-			if (adreno_cmd_is_ib(val[i])) {
-				/* go till start of command sequence */
-				status = _find_start_of_cmd_seq(rb,
-						&temp_rb_rptr, false);
-
-				KGSL_FT_INFO(rb->device,
-				"Found the hanging IB at offset 0x%x\n",
-				temp_rb_rptr / sizeof(unsigned int));
-				break;
-			}
-			/* if no match the increment i since we decremented
-			 * before checking */
-			i = (i + 1) % 2;
-		}
-		/* Make sure you do not encounter a context switch twice, we can
-		 * encounter it once for the bad context as the start of search
-		 * can point to the context switch */
-		if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
-			if (ctx_switch) {
-				KGSL_FT_ERR(rb->device,
-				"Context switch encountered before bad "
-				"IB found\n");
-				break;
-			}
-			ctx_switch = true;
-		}
-		i = (i + 1) % 2;
-		if (1 == i)
-			check = true;
-		temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
-								size);
-	}
-	if  (!status)
-		*rb_rptr = temp_rb_rptr;
-	return status;
-}
-
-static void adreno_setup_ft_data(struct kgsl_device *device,
-					struct adreno_ft_data *ft_data)
-{
-	int ret = 0;
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-	struct kgsl_context *context;
-	struct adreno_context *adreno_context;
-	unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
-
-	memset(ft_data, 0, sizeof(*ft_data));
-	ft_data->start_of_replay_cmds = 0xFFFFFFFF;
-	ft_data->replay_for_snapshot = 0xFFFFFFFF;
-
-	adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ft_data->ib1);
-
-	kgsl_sharedmem_readl(&device->memstore, &ft_data->context_id,
-			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-			current_context));
-
-	kgsl_sharedmem_readl(&device->memstore,
-			&ft_data->global_eop,
-			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-			eoptimestamp));
-
-	/* Ensure context id and global eop ts read complete */
-	rmb();
-
-	ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
-	if (!ft_data->rb_buffer) {
-		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
-				rb->buffer_desc.size);
-		return;
-	}
-
-	ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
-	if (!ft_data->bad_rb_buffer) {
-		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
-				rb->buffer_desc.size);
-		return;
-	}
-
-	ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
-	if (!ft_data->good_rb_buffer) {
-		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
-				rb->buffer_desc.size);
-		return;
-	}
-	ft_data->status = 0;
-
-	/* find the start of bad command sequence in rb */
-	context = kgsl_context_get(device, ft_data->context_id);
-
-	ft_data->ft_policy = adreno_dev->ft_policy;
-
-	if (!ft_data->ft_policy)
-		ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
-
-	/* Look for the command stream that is right after the global eop */
-	ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
-					ft_data->global_eop + 1, false);
-	if (ret) {
-		ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
-		goto done;
-	} else {
-		ft_data->start_of_replay_cmds = rb_rptr;
-		ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
-	}
-
-	if (context) {
-		adreno_context = ADRENO_CONTEXT(context);
-		if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
-			if (ft_data->ib1) {
-				ret = _find_hanging_ib_sequence(rb,
-						&rb_rptr, ft_data->ib1);
-				if (ret) {
-					KGSL_FT_ERR(device,
-					"Start not found for replay IB seq\n");
-					goto done;
-				}
-				ft_data->start_of_replay_cmds = rb_rptr;
-				ft_data->replay_for_snapshot = rb_rptr;
-			}
-		}
-	}
-
-done:
-	kgsl_context_put(context);
-}
-
-static int
-_adreno_check_long_ib(struct kgsl_device *device)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int curr_global_ts = 0;
-
-	/* check if the global ts is still the same */
-	kgsl_sharedmem_readl(&device->memstore,
-			&curr_global_ts,
-			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-			eoptimestamp));
-	/* Ensure above read is finished before long ib check */
-	rmb();
-
-	/* Mark long ib as handled */
-	adreno_dev->long_ib = 0;
-
-	if (curr_global_ts == adreno_dev->long_ib_ts) {
-		KGSL_FT_ERR(device,
-			"IB ran too long, invalidate ctxt\n");
-		return 1;
-	} else {
-		/* Do nothing GPU has gone ahead */
-		KGSL_FT_INFO(device, "false long ib detection return\n");
-		return 0;
-	}
-}
-
 /**
- * adreno_soft_reset() -  Do a soft reset of the GPU hardware
- * @device: KGSL device to soft reset
+ * adreno_reset() - Helper function to reset the GPU
+ * @device: Pointer to the KGSL device structure for the GPU
  *
- * "soft reset" the GPU hardware - this is a fast path GPU reset
- * The GPU hardware is reset but we never pull power so we can skip
- * a lot of the standard adreno_stop/adreno_start sequence
+ * Try to reset the GPU to recover from a fault.  First, try to do a low latency
+ * soft reset.  If the soft reset fails for some reason, then bring out the big
+ * guns and toggle the footswitch.
  */
-int adreno_soft_reset(struct kgsl_device *device)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int ret;
-
-	/* If the jump table index is 0 soft reset is not supported */
-	if ((!adreno_dev->pm4_jt_idx) || (!adreno_dev->gpudev->soft_reset)) {
-		dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
-		return -EINVAL;
-	}
-
-	if (adreno_dev->drawctxt_active)
-		kgsl_context_put(&adreno_dev->drawctxt_active->base);
-
-	adreno_dev->drawctxt_active = NULL;
-
-	/* Stop the ringbuffer */
-	adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
-
-	/* Delete the idle timer */
-	del_timer_sync(&device->idle_timer);
-
-	/* Make sure we are totally awake */
-	kgsl_pwrctrl_enable(device);
-
-	/* Reset the GPU */
-	adreno_dev->gpudev->soft_reset(adreno_dev);
-
-	/* Reinitialize the GPU */
-	adreno_dev->gpudev->start(adreno_dev);
-
-	/* Enable IRQ */
-	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
-	device->ftbl->irqctrl(device, 1);
-
-	/*
-	 * Restart the ringbuffer - we can go down the warm start path because
-	 * power was never yanked
-	 */
-	ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
-	if (ret)
-		return ret;
-
-	device->reset_counter++;
-
-	return 0;
-}
-
-static int
-_adreno_ft_restart_device(struct kgsl_device *device,
-			   struct kgsl_context *context)
-{
-	/* If device soft reset fails try hard reset */
-	if (adreno_soft_reset(device))
-		KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
-	else
-		/* Soft reset is successful */
-		goto reset_done;
-
-	/* restart device */
-	if (adreno_stop(device)) {
-		KGSL_FT_ERR(device, "Device stop failed\n");
-		return 1;
-	}
-
-	if (adreno_init(device)) {
-		KGSL_FT_ERR(device, "Device init failed\n");
-		return 1;
-	}
-
-	if (adreno_start(device)) {
-		KGSL_FT_ERR(device, "Device start failed\n");
-		return 1;
-	}
-
-reset_done:
-	if (context)
-		kgsl_mmu_setstate(&device->mmu, context->pagetable,
-				KGSL_MEMSTORE_GLOBAL);
-
-	/* If iommu is used then we need to make sure that the iommu clocks
-	 * are on since there could be commands in pipeline that touch iommu */
-	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
-		if (kgsl_mmu_enable_clk(&device->mmu,
-				KGSL_IOMMU_CONTEXT_USER))
-			return 1;
-	}
-
-	return 0;
-}
-
-static inline void
-_adreno_debug_ft_info(struct kgsl_device *device,
-			struct adreno_ft_data *ft_data)
-{
-
-	/*
-	 * Dumping rb is a very useful tool to debug FT.
-	 * It will tell us if we are extracting the rb correctly
-	 * NOP'ing the right IB, skipping the EOF correctly etc.
-	 */
-	if (device->ft_log >= 7)  {
-
-		/* Print fault tolerance data here */
-		KGSL_FT_INFO(device, "Temp RB buffer size 0x%X\n",
-			ft_data->rb_size);
-		adreno_dump_rb(device, ft_data->rb_buffer,
-			ft_data->rb_size<<2, 0, ft_data->rb_size);
-
-		KGSL_FT_INFO(device, "Bad RB buffer size 0x%X\n",
-			ft_data->bad_rb_size);
-		adreno_dump_rb(device, ft_data->bad_rb_buffer,
-			ft_data->bad_rb_size<<2, 0, ft_data->bad_rb_size);
-
-		KGSL_FT_INFO(device, "Good RB buffer size 0x%X\n",
-			ft_data->good_rb_size);
-		adreno_dump_rb(device, ft_data->good_rb_buffer,
-			ft_data->good_rb_size<<2, 0, ft_data->good_rb_size);
-
-	}
-}
-
-static int
-_adreno_ft_resubmit_rb(struct kgsl_device *device,
-			struct adreno_ringbuffer *rb,
-			struct kgsl_context *context,
-			struct adreno_ft_data *ft_data,
-			unsigned int *buff, unsigned int size)
-{
-	unsigned int ret = 0;
-	unsigned int retry_num = 0;
-
-	_adreno_debug_ft_info(device, ft_data);
-
-	do {
-		ret = _adreno_ft_restart_device(device, context);
-		if (ret == 0)
-			break;
-		/*
-		 * If device restart fails sleep for 20ms before
-		 * attempting restart. This allows GPU HW to settle
-		 * and improve the chances of next restart to be
-		 * successful.
-		 */
-		msleep(20);
-		KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
-		retry_num++;
-	} while (retry_num < 4);
-
-	if (ret) {
-		KGSL_FT_ERR(device, "Device restart failed\n");
-		BUG_ON(1);
-		goto done;
-	}
-
-	if (size) {
-
-		/* submit commands and wait for them to pass */
-		adreno_ringbuffer_restore(rb, buff, size);
-
-		ret = adreno_idle(device);
-	}
-
-done:
-	return ret;
-}
-
-
-static int
-_adreno_ft(struct kgsl_device *device,
-			struct adreno_ft_data *ft_data)
-{
-	int ret = 0, i;
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-	struct kgsl_context *context;
-	struct adreno_context *adreno_context = NULL;
-	struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
-	unsigned int long_ib = 0;
-	static int no_context_ft;
-	struct kgsl_mmu *mmu = &device->mmu;
-
-	context = kgsl_context_get(device, ft_data->context_id);
-
-	if (context == NULL) {
-		KGSL_FT_ERR(device, "Last context unknown id:%d\n",
-			ft_data->context_id);
-		if (no_context_ft) {
-			/*
-			 * If 2 consecutive no context ft occurred then
-			 * just reset GPU
-			 */
-			no_context_ft = 0;
-			goto play_good_cmds;
-		}
-	} else {
-		no_context_ft = 0;
-		adreno_context = ADRENO_CONTEXT(context);
-		adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
-		/*
-		 * set the invalid ts flag to 0 for this context since we have
-		 * detected a hang for it
-		 */
-		context->wait_on_invalid_ts = false;
-
-		if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
-			ft_data->status = 1;
-			KGSL_FT_ERR(device, "Fault tolerance not supported\n");
-			goto play_good_cmds;
-		}
-
-		/*
-		 *  This flag will be set by userspace for contexts
-		 *  that do not want to be fault tolerant (ex: OPENCL)
-		 */
-		if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
-			ft_data->status = 1;
-			KGSL_FT_ERR(device,
-			"No FT set for this context play good cmds\n");
-			goto play_good_cmds;
-		}
-
-	}
-
-	/* Check if we detected a long running IB, if false return */
-	if ((adreno_context) && (adreno_dev->long_ib)) {
-		long_ib = _adreno_check_long_ib(device);
-		if (!long_ib) {
-			adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
-			return 0;
-		}
-	}
-
-	/*
-	 * Extract valid contents from rb which can still be executed after
-	 * hang
-	 */
-	adreno_ringbuffer_extract(rb, ft_data);
-
-	/* If long IB detected do not attempt replay of bad cmds */
-	if (long_ib) {
-		ft_data->status = 1;
-		_adreno_debug_ft_info(device, ft_data);
-		goto play_good_cmds;
-	}
-
-	if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
-		(ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
-		KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
-		ft_data->status = 1;
-		goto play_good_cmds;
-	}
-
-	/* Do not try to replay if hang is due to a pagefault */
-	if (context && test_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv)) {
-		/* Resume MMU */
-		mmu->mmu_ops->mmu_pagefault_resume(mmu);
-		if ((ft_data->context_id == context->id) &&
-			(ft_data->global_eop == context->pagefault_ts)) {
-			ft_data->ft_policy &= ~KGSL_FT_REPLAY;
-			KGSL_FT_ERR(device, "MMU fault skipping replay\n");
-		}
-		clear_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv);
-	}
-
-	if (ft_data->ft_policy & KGSL_FT_REPLAY) {
-		ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
-				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
-		if (ret) {
-			KGSL_FT_ERR(device, "Replay status: 1\n");
-			ft_data->status = 1;
-		} else
-			goto play_good_cmds;
-	}
-
-	if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
-		for (i = 0; i < ft_data->bad_rb_size; i++) {
-			if ((ft_data->bad_rb_buffer[i] ==
-					CP_HDR_INDIRECT_BUFFER_PFD) &&
-				(ft_data->bad_rb_buffer[i+1] == ft_data->ib1)) {
-
-				ft_data->bad_rb_buffer[i] = cp_nop_packet(2);
-				ft_data->bad_rb_buffer[i+1] =
-							KGSL_NOP_IB_IDENTIFIER;
-				ft_data->bad_rb_buffer[i+2] =
-							KGSL_NOP_IB_IDENTIFIER;
-				break;
-			}
-		}
-
-		if ((i == (ft_data->bad_rb_size)) || (!ft_data->ib1)) {
-			KGSL_FT_ERR(device, "Bad IB to NOP not found\n");
-			ft_data->status = 1;
-			goto play_good_cmds;
-		}
-
-		ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
-				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
-		if (ret) {
-			KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
-			ft_data->status = 1;
-		} else {
-			ft_data->status = 0;
-			goto play_good_cmds;
-		}
-	}
-
-	if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
-		for (i = 0; i < ft_data->bad_rb_size; i++) {
-			if (ft_data->bad_rb_buffer[i] ==
-					KGSL_END_OF_FRAME_IDENTIFIER) {
-				ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
-				break;
-			}
-		}
-
-		/* EOF not found in RB, discard till EOF in
-		   next IB submission */
-		if (adreno_context && (i == ft_data->bad_rb_size)) {
-			adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
-			KGSL_FT_INFO(device,
-			"EOF not found in RB, skip next issueib till EOF\n");
-			ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
-		}
-
-		ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
-				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
-		if (ret) {
-			KGSL_FT_ERR(device, "Skip EOF status: 1\n");
-			ft_data->status = 1;
-		} else {
-			ft_data->status = 0;
-			goto play_good_cmds;
-		}
-	}
-
-play_good_cmds:
-
-	if (ft_data->status)
-		KGSL_FT_ERR(device, "Bad context commands failed\n");
-	else {
-		KGSL_FT_INFO(device, "Bad context commands success\n");
-
-		if (adreno_context) {
-			adreno_context->flags = (adreno_context->flags &
-				~CTXT_FLAGS_GPU_HANG) | CTXT_FLAGS_GPU_HANG_FT;
-		}
-
-		if (last_active_ctx)
-			_kgsl_context_get(&last_active_ctx->base);
-
-		adreno_dev->drawctxt_active = last_active_ctx;
-	}
-
-	ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
-			ft_data->good_rb_buffer, ft_data->good_rb_size);
-
-	if (ret) {
-		/*
-		 * If we fail here we can try to invalidate another
-		 * context and try fault tolerance again, although
-		 * we will only try ft with no context once to avoid
-		 * going into continuous loop of trying ft with no context
-		 */
-		if (!context)
-			no_context_ft = 1;
-		ret = -EAGAIN;
-		KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
-		goto done;
-	} else
-		KGSL_FT_INFO(device, "Playing good commands successful\n");
-
-	/* ringbuffer now has data from the last valid context id,
-	 * so restore the active_ctx to the last valid context */
-	if (ft_data->last_valid_ctx_id) {
-		struct kgsl_context *last_ctx = kgsl_context_get(device,
-			ft_data->last_valid_ctx_id);
-
-		adreno_dev->drawctxt_active = ADRENO_CONTEXT(last_ctx);
-	}
-
-done:
-	/* Turn off iommu clocks */
-	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
-		kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
-
-	kgsl_context_put(context);
-	return ret;
-}
-
-static int
-adreno_ft(struct kgsl_device *device,
-			struct adreno_ft_data *ft_data)
+int adreno_reset(struct kgsl_device *device)
 {
 	int ret = 0;
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	/* Try soft reset first */
+	if (adreno_soft_reset(device) != 0) {
+		KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
+
+		/* If it failed, then pull the power */
+		ret = adreno_stop(device);
+		if (ret)
+			return ret;
+
+		ret = adreno_start(device);
+
+		if (ret)
+			return ret;
+	}
 
 	/*
-	 * If GPU FT is turned off do not run FT.
-	 * If GPU stall detection is suspected to be false,
-	 * we can use this option to confirm stall detection.
+	 * If active_cnt is non-zero then the system was active before
+	 * going into a reset - put it back in that state
 	 */
-	if (ft_data->ft_policy & KGSL_FT_OFF) {
-		KGSL_FT_ERR(device, "GPU FT turned off\n");
-		return 0;
-	}
 
-	KGSL_FT_INFO(device,
-	"Start Parameters: IB1: 0x%X, "
-	"Bad context_id: %u, global_eop: 0x%x\n",
-	ft_data->ib1, ft_data->context_id, ft_data->global_eop);
+	if (atomic_read(&device->active_cnt))
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 
-	KGSL_FT_INFO(device, "Last issued global timestamp: %x\n",
-			rb->global_ts);
+	/* Set the page table back to the default page table */
+	kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+			KGSL_MEMSTORE_GLOBAL);
 
-	/* We may need to replay commands multiple times based on whether
-	 * multiple contexts hang the GPU */
-	while (true) {
-
-		ret = _adreno_ft(device, ft_data);
-
-		if (-EAGAIN == ret) {
-			/* setup new fault tolerance parameters and retry, this
-			 * means more than 1 contexts are causing hang */
-			adreno_destroy_ft_data(ft_data);
-			adreno_setup_ft_data(device, ft_data);
-			KGSL_FT_INFO(device,
-			"Retry. Parameters: "
-			"IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
-			ft_data->ib1, ft_data->context_id,
-			ft_data->global_eop);
-		} else {
-			break;
-		}
-	}
-
-	if (ret)
-		goto done;
-
-	/* Restore correct states after fault tolerance */
-	if (adreno_dev->drawctxt_active)
-		device->mmu.hwpagetable =
-			adreno_dev->drawctxt_active->base.pagetable;
-	else
-		device->mmu.hwpagetable = device->mmu.defaultpagetable;
-	kgsl_sharedmem_writel(device, &device->memstore,
-			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-			eoptimestamp), rb->global_ts);
-
-	/* switch to NULL ctxt */
-	if (adreno_dev->drawctxt_active != NULL)
-		adreno_drawctxt_switch(adreno_dev, NULL, 0);
-
-done:
-	adreno_set_max_ts_for_bad_ctxs(device);
-	adreno_mark_context_status(device, ret);
-	KGSL_FT_ERR(device, "policy 0x%X status 0x%x\n",
-			ft_data->ft_policy, ret);
 	return ret;
 }
 
-int
-adreno_dump_and_exec_ft(struct kgsl_device *device)
-{
-	int result = -ETIMEDOUT;
-	struct adreno_ft_data ft_data;
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-	unsigned int curr_pwrlevel;
-
-	if (device->state == KGSL_STATE_HUNG)
-		goto done;
-	if (device->state == KGSL_STATE_DUMP_AND_FT) {
-		mutex_unlock(&device->mutex);
-		wait_for_completion(&device->ft_gate);
-		mutex_lock(&device->mutex);
-		if (device->state != KGSL_STATE_HUNG)
-			result = 0;
-	} else {
-		/*
-		 * While fault tolerance is happening we do not want the
-		 * idle_timer to fire and attempt to change any device state
-		 */
-		del_timer_sync(&device->idle_timer);
-
-		kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_FT);
-		INIT_COMPLETION(device->ft_gate);
-		/* Detected a hang */
-
-		kgsl_cffdump_hang(device);
-		/* Run fault tolerance at max power level */
-		curr_pwrlevel = pwr->active_pwrlevel;
-		kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
-
-		/* Get the fault tolerance data as soon as hang is detected */
-		adreno_setup_ft_data(device, &ft_data);
-
-		/*
-		 * If long ib is detected, do not attempt postmortem or
-		 * snapshot, if GPU is still executing commands
-		 * we will get errors
-		 */
-		if (!adreno_dev->long_ib) {
-			/*
-			 * Trigger an automatic dump of the state to
-			 * the console
-			 */
-			kgsl_postmortem_dump(device, 0);
-
-			/*
-			* Make a GPU snapshot.  For now, do it after the
-			* PM dump so we can at least be sure the PM dump
-			* will work as it always has
-			*/
-			kgsl_device_snapshot(device, 1);
-		}
-
-		result = adreno_ft(device, &ft_data);
-		adreno_destroy_ft_data(&ft_data);
-
-		/* restore power level */
-		kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
-
-		if (result) {
-			kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
-		} else {
-			kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
-			mod_timer(&device->hang_timer,
-				(jiffies +
-				msecs_to_jiffies(KGSL_TIMEOUT_PART)));
-		}
-		complete_all(&device->ft_gate);
-	}
-done:
-	return result;
-}
-EXPORT_SYMBOL(adreno_dump_and_exec_ft);
-
 /**
  * _ft_sysfs_store() -  Common routine to write to FT sysfs files
  * @buf: value to write
@@ -3217,140 +2250,181 @@
 	return status;
 }
 
-static int adreno_ringbuffer_drain(struct kgsl_device *device,
-	unsigned int *regs)
+/**
+ * adreno_hw_isidle() - Check if the GPU core is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the RBBM status register for the GPU type indicates that the
+ * hardware is idle
+ */
+static bool adreno_hw_isidle(struct kgsl_device *device)
+{
+	unsigned int reg_rbbm_status;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* Don't consider ourselves idle if there is an IRQ pending */
+	if (adreno_dev->gpudev->irq_pending(adreno_dev))
+		return false;
+
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
+		&reg_rbbm_status);
+
+	if (adreno_is_a2xx(adreno_dev)) {
+		if (reg_rbbm_status == 0x110)
+			return true;
+	} else if (adreno_is_a3xx(adreno_dev)) {
+		if (!(reg_rbbm_status & 0x80000000))
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * adreno_soft_reset() -  Do a soft reset of the GPU hardware
+ * @device: KGSL device to soft reset
+ *
+ * "soft reset" the GPU hardware - this is a fast path GPU reset
+ * The GPU hardware is reset but we never pull power so we can skip
+ * a lot of the standard adreno_stop/adreno_start sequence
+ */
+int adreno_soft_reset(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-	unsigned long wait = jiffies;
-	unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
-	unsigned int rptr;
+	int ret;
 
-	do {
-		/*
-		 * Wait is "jiffies" first time in the loop to start
-		 * GPU stall detection immediately.
-		 */
-		if (time_after(jiffies, wait)) {
-			/* Check to see if the core is hung */
-			if (adreno_ft_detect(device, regs))
-				return -ETIMEDOUT;
+	/* If the jump table index is 0 soft reset is not supported */
+	if ((!adreno_dev->pm4_jt_idx) || (!adreno_dev->gpudev->soft_reset)) {
+		dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
+		return -EINVAL;
+	}
 
-			wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
-		}
-		rptr = adreno_get_rptr(rb);
-		if (time_after(jiffies, timeout)) {
-			KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
-				rptr, rb->wptr);
-			return -ETIMEDOUT;
-		}
-	} while (rptr != rb->wptr);
+	if (adreno_dev->drawctxt_active)
+		kgsl_context_put(&adreno_dev->drawctxt_active->base);
+
+	adreno_dev->drawctxt_active = NULL;
+
+	/* Stop the ringbuffer */
+	adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+	if (kgsl_pwrctrl_isenabled(device))
+		device->ftbl->irqctrl(device, 0);
+
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+	adreno_set_gpu_fault(adreno_dev, 0);
+
+	/* Delete the idle timer */
+	del_timer_sync(&device->idle_timer);
+
+	/* Make sure we are totally awake */
+	kgsl_pwrctrl_enable(device);
+
+	/* Reset the GPU */
+	adreno_dev->gpudev->soft_reset(adreno_dev);
+
+	/* Reinitialize the GPU */
+	adreno_dev->gpudev->start(adreno_dev);
+
+	/* Enable IRQ */
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+	device->ftbl->irqctrl(device, 1);
+
+	/*
+	 * Restart the ringbuffer - we can go down the warm start path because
+	 * power was never yanked
+	 */
+	ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
+	if (ret)
+		return ret;
+
+	device->reset_counter++;
 
 	return 0;
 }
 
-/* Caller must hold the device mutex. */
+/*
+ * adreno_isidle() - return true if the GPU hardware is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the GPU hardware is idle and there are no commands pending in
+ * the ringbuffer
+ */
+static bool adreno_isidle(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int rptr;
+
+	if (!kgsl_pwrctrl_isenabled(device))
+		return true;
+
+	rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
+
+	if (rptr == adreno_dev->ringbuffer.wptr)
+		return adreno_hw_isidle(device);
+
+	return false;
+}
+
+/**
+ * adreno_idle() - wait for the GPU hardware to go idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
+ */
+
 int adreno_idle(struct kgsl_device *device)
 {
-	unsigned long wait_time;
-	unsigned long wait_time_part;
-	unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
 
-	memset(prev_reg_val, 0, sizeof(prev_reg_val));
+	/*
+	 * Make sure the device mutex is held so the dispatcher can't send any
+	 * more commands to the hardware
+	 */
 
-	kgsl_cffdump_regpoll(device,
-		adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
-		0x00000000, 0x80000000);
+	BUG_ON(!mutex_is_locked(&device->mutex));
 
-retry:
-	/* First, wait for the ringbuffer to drain */
-	if (adreno_ringbuffer_drain(device, prev_reg_val))
-		goto err;
+	if (adreno_is_a3xx(adreno_dev))
+		kgsl_cffdump_regpoll(device,
+			adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+			0x00000000, 0x80000000);
+	else
+		kgsl_cffdump_regpoll(device,
+			adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+			0x110, 0x110);
 
-	/* now, wait for the GPU to finish its operations */
-	wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
-	wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
+	while (time_before(jiffies, wait)) {
+		/*
+		 * If we fault, stop waiting and return an error. The dispatcher
+		 * will clean up the fault from the work queue, but we need to
+		 * make sure we don't block it by waiting for an idle that
+		 * will never come.
+		 */
 
-	while (time_before(jiffies, wait_time)) {
+		if (adreno_gpu_fault(adreno_dev) != 0)
+			return -EDEADLK;
+
 		if (adreno_isidle(device))
 			return 0;
-
-		/* Dont wait for timeout, detect hang faster.  */
-		if (time_after(jiffies, wait_time_part)) {
-			wait_time_part = jiffies +
-				msecs_to_jiffies(KGSL_TIMEOUT_PART);
-			if ((adreno_ft_detect(device, prev_reg_val)))
-				goto err;
-		}
-
 	}
 
-err:
-	KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
-	if (KGSL_STATE_DUMP_AND_FT != device->state &&
-		!adreno_dump_and_exec_ft(device)) {
-		wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
-		goto retry;
-	}
 	return -ETIMEDOUT;
 }
 
 /**
- * is_adreno_rbbm_status_idle - Check if GPU core is idle by probing
- * rbbm_status register
- * @device - Pointer to the GPU device whose idle status is to be
- * checked
- * @returns - Returns whether the core is idle (based on rbbm_status)
- * false if the core is active, true if the core is idle
+ * adreno_drain() - Drain the dispatch queue
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Tell the dispatcher to pause - this has the effect of draining the inflight
+ * command batches
  */
-static bool is_adreno_rbbm_status_idle(struct kgsl_device *device)
+static int adreno_drain(struct kgsl_device *device)
 {
-	unsigned int reg_rbbm_status;
-	bool status = false;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
-	/* Is the core idle? */
-	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
-				&reg_rbbm_status);
-
-	if (adreno_is_a2xx(adreno_dev)) {
-		if (reg_rbbm_status == 0x110)
-			status = true;
-	} else {
-		if (!(reg_rbbm_status & 0x80000000))
-			status = true;
-	}
-	return status;
-}
-
-static unsigned int adreno_isidle(struct kgsl_device *device)
-{
-	int status = false;
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
-	/* If the device isn't active, don't force it on. */
-	if (kgsl_pwrctrl_isenabled(device)) {
-		/* Is the ring buffer is empty? */
-		unsigned int rptr = adreno_get_rptr(rb);
-		if (rptr == rb->wptr) {
-			/*
-			 * Are there interrupts pending? If so then pretend we
-			 * are not idle - this avoids the possiblity that we go
-			 * to a lower power state without handling interrupts
-			 * first.
-			 */
-
-			if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
-				/* Is the core idle? */
-				status = is_adreno_rbbm_status_idle(device);
-			}
-		}
-	} else {
-		status = true;
-	}
-	return status;
+	adreno_dispatcher_pause(adreno_dev);
+	return 0;
 }
 
 /* Caller must hold the device mutex. */
@@ -3359,6 +2433,9 @@
 	int status = 0;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
+	/* process any profiling results that are available */
+	adreno_profile_process_results(device);
+
 	/* switch to NULL ctxt */
 	if (adreno_dev->drawctxt_active != NULL) {
 		adreno_drawctxt_switch(adreno_dev, NULL, 0);
@@ -3518,342 +2595,6 @@
 	__raw_writel(value, reg);
 }
 
-static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
-{
-	unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
-
-	if (k_ctxt != NULL) {
-		struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
-		if (kgsl_context_detached(k_ctxt))
-			context_id = KGSL_CONTEXT_INVALID;
-		else if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
-			context_id = k_ctxt->id;
-	}
-
-	return context_id;
-}
-
-static unsigned int adreno_check_hw_ts(struct kgsl_device *device,
-		struct kgsl_context *context, unsigned int timestamp)
-{
-	int status = 0;
-	unsigned int ref_ts, enableflag;
-	unsigned int context_id = _get_context_id(context);
-
-	/*
-	 * If the context ID is invalid, we are in a race with
-	 * the context being destroyed by userspace so bail.
-	 */
-	if (context_id == KGSL_CONTEXT_INVALID) {
-		KGSL_DRV_WARN(device, "context was detached");
-		return -EINVAL;
-	}
-
-	status = kgsl_check_timestamp(device, context, timestamp);
-	if (status)
-		return status;
-
-	kgsl_sharedmem_readl(&device->memstore, &enableflag,
-			KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
-	/*
-	 * Barrier is needed here to make sure the read from memstore
-	 * has posted
-	 */
-
-	mb();
-
-	if (enableflag) {
-		kgsl_sharedmem_readl(&device->memstore, &ref_ts,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					ref_wait_ts));
-
-		/* Make sure the memstore read has posted */
-		mb();
-		if (timestamp_cmp(ref_ts, timestamp) >= 0) {
-			kgsl_sharedmem_writel(device, &device->memstore,
-					KGSL_MEMSTORE_OFFSET(context_id,
-						ref_wait_ts), timestamp);
-			/* Make sure the memstore write is posted */
-			wmb();
-		}
-	} else {
-		kgsl_sharedmem_writel(device, &device->memstore,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					ref_wait_ts), timestamp);
-		enableflag = 1;
-		kgsl_sharedmem_writel(device, &device->memstore,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					ts_cmp_enable), enableflag);
-
-		/* Make sure the memstore write gets posted */
-		wmb();
-
-		/*
-		 * submit a dummy packet so that even if all
-		 * commands upto timestamp get executed we will still
-		 * get an interrupt
-		 */
-
-		if (context && device->state != KGSL_STATE_SLUMBER) {
-			adreno_ringbuffer_issuecmds(device,
-					ADRENO_CONTEXT(context),
-					KGSL_CMD_FLAGS_GET_INT, NULL, 0);
-		}
-	}
-
-	return 0;
-}
-
-/* Return 1 if the event timestmp has already passed, 0 if it was marked */
-static int adreno_next_event(struct kgsl_device *device,
-		struct kgsl_event *event)
-{
-	return adreno_check_hw_ts(device, event->context, event->timestamp);
-}
-
-static int adreno_check_interrupt_timestamp(struct kgsl_device *device,
-		struct kgsl_context *context, unsigned int timestamp)
-{
-	int status;
-
-	mutex_lock(&device->mutex);
-	status = adreno_check_hw_ts(device, context, timestamp);
-	mutex_unlock(&device->mutex);
-
-	return status;
-}
-
-/*
- wait_event_interruptible_timeout checks for the exit condition before
- placing a process in wait q. For conditional interrupts we expect the
- process to already be in its wait q when its exit condition checking
- function is called.
-*/
-#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\
-({									\
-	long __ret = timeout;						\
-	if (io)						\
-		__wait_io_event_interruptible_timeout(wq, condition, __ret);\
-	else						\
-		__wait_event_interruptible_timeout(wq, condition, __ret);\
-	__ret;								\
-})
-
-
-
-unsigned int adreno_ft_detect(struct kgsl_device *device,
-						unsigned int *prev_reg_val)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-	unsigned int curr_reg_val[FT_DETECT_REGS_COUNT];
-	unsigned int fast_hang_detected = 1;
-	unsigned int long_ib_detected = 1;
-	unsigned int i;
-	static unsigned long next_hang_detect_time;
-	static unsigned int prev_global_ts;
-	unsigned int curr_global_ts = 0;
-	unsigned int curr_context_id = 0;
-	static struct adreno_context *curr_context;
-	static struct kgsl_context *context;
-	static char pid_name[TASK_COMM_LEN] = "unknown";
-
-	if (!adreno_dev->fast_hang_detect)
-		fast_hang_detected = 0;
-
-	if (!adreno_dev->long_ib_detect)
-		long_ib_detected = 0;
-
-	if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
-		return 0;
-
-	if (is_adreno_rbbm_status_idle(device) &&
-		(kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED)
-		 == rb->global_ts)) {
-
-		/*
-		 * On A2XX if the RPTR != WPTR and the device is idle, then
-		 * the last write to WPTR probably failed to latch so write it
-		 * again
-		 */
-
-		if (adreno_is_a2xx(adreno_dev)) {
-			unsigned int rptr;
-			adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
-						&rptr);
-			if (rptr != adreno_dev->ringbuffer.wptr)
-				adreno_writereg(adreno_dev,
-					ADRENO_REG_CP_RB_WPTR,
-					adreno_dev->ringbuffer.wptr);
-		}
-
-		return 0;
-	}
-
-	/*
-	 * Time interval between hang detection should be KGSL_TIMEOUT_PART
-	 * or more, if next hang detection is requested < KGSL_TIMEOUT_PART
-	 * from the last time do nothing.
-	 */
-	if ((next_hang_detect_time) &&
-		(time_before(jiffies, next_hang_detect_time)))
-			return 0;
-	else
-		next_hang_detect_time = (jiffies +
-			msecs_to_jiffies(KGSL_TIMEOUT_PART-1));
-
-	/* Read the current Hang detect reg values here */
-	for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
-		if (ft_detect_regs[i] == 0)
-			continue;
-		kgsl_regread(device, ft_detect_regs[i],
-			&curr_reg_val[i]);
-	}
-
-	/* Read the current global timestamp here */
-	kgsl_sharedmem_readl(&device->memstore,
-			&curr_global_ts,
-			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-			eoptimestamp));
-	/* Make sure the memstore read has posted */
-	mb();
-
-	if (curr_global_ts == prev_global_ts) {
-
-		/* If we don't already have a good context, get it. */
-		if (kgsl_context_detached(context)) {
-			kgsl_context_put(context);
-			context = NULL;
-			curr_context = NULL;
-			strlcpy(pid_name, "unknown", sizeof(pid_name));
-
-			kgsl_sharedmem_readl(&device->memstore,
-				&curr_context_id,
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-				current_context));
-			/* Make sure the memstore read has posted */
-			mb();
-
-			context = kgsl_context_get(device, curr_context_id);
-			if (context != NULL) {
-				struct task_struct *task;
-				curr_context = ADRENO_CONTEXT(context);
-				curr_context->ib_gpu_time_used = 0;
-				task = find_task_by_vpid(context->pid);
-				if (task)
-					get_task_comm(pid_name, task);
-			} else {
-				KGSL_DRV_ERR(device,
-					"Fault tolerance no context found\n");
-			}
-		}
-		for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
-			if (curr_reg_val[i] != prev_reg_val[i]) {
-				fast_hang_detected = 0;
-
-				/* Check for long IB here */
-				if ((i >=
-					LONG_IB_DETECT_REG_INDEX_START)
-					&&
-					(i <=
-					LONG_IB_DETECT_REG_INDEX_END))
-					long_ib_detected = 0;
-			}
-		}
-
-		if (fast_hang_detected) {
-			KGSL_FT_ERR(device,
-				"Proc %s, ctxt_id %d ts %d triggered fault tolerance"
-				" on global ts %d\n",
-				pid_name, context ? context->id : 0,
-				(kgsl_readtimestamp(device, context,
-				KGSL_TIMESTAMP_RETIRED) + 1),
-				curr_global_ts + 1);
-			return 1;
-		}
-
-		if (curr_context != NULL) {
-
-			curr_context->ib_gpu_time_used += KGSL_TIMEOUT_PART;
-			KGSL_FT_INFO(device,
-			"Proc %s used GPU Time %d ms on timestamp 0x%X\n",
-			pid_name, curr_context->ib_gpu_time_used,
-			curr_global_ts+1);
-
-			if ((long_ib_detected) &&
-				(!(curr_context->flags &
-				 CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
-				curr_context->ib_gpu_time_used +=
-					KGSL_TIMEOUT_PART;
-				if (curr_context->ib_gpu_time_used >
-					KGSL_TIMEOUT_LONG_IB_DETECTION) {
-					if (adreno_dev->long_ib_ts !=
-						curr_global_ts) {
-						KGSL_FT_ERR(device,
-						"Proc %s, ctxt_id %d ts %d"
-						"used GPU for %d ms long ib "
-						"detected on global ts %d\n",
-						pid_name, context->id,
-						(kgsl_readtimestamp(device,
-						context,
-						KGSL_TIMESTAMP_RETIRED)+1),
-						curr_context->ib_gpu_time_used,
-						curr_global_ts+1);
-						adreno_dev->long_ib = 1;
-						adreno_dev->long_ib_ts =
-								curr_global_ts;
-						curr_context->ib_gpu_time_used =
-								0;
-						return 1;
-					}
-				}
-			}
-		}
-	} else {
-		/* GPU is moving forward */
-		prev_global_ts = curr_global_ts;
-		kgsl_context_put(context);
-		context = NULL;
-		curr_context = NULL;
-		strlcpy(pid_name, "unknown", sizeof(pid_name));
-		adreno_dev->long_ib = 0;
-		adreno_dev->long_ib_ts = 0;
-	}
-
-
-	/* If hangs are not detected copy the current reg values
-	 * to previous values and return no hang */
-	for (i = 0; i < FT_DETECT_REGS_COUNT; i++)
-			prev_reg_val[i] = curr_reg_val[i];
-	return 0;
-}
-
-static int _check_pending_timestamp(struct kgsl_device *device,
-		struct kgsl_context *context, unsigned int timestamp)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int context_id = _get_context_id(context);
-	unsigned int ts_issued;
-
-	if (context_id == KGSL_CONTEXT_INVALID)
-		return -EINVAL;
-
-	ts_issued = adreno_context_timestamp(context, &adreno_dev->ringbuffer);
-
-	if (timestamp_cmp(timestamp, ts_issued) <= 0)
-		return 0;
-
-	if (context && !context->wait_on_invalid_ts) {
-		KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, last issued ts <%d:0x%x>\n",
-			context_id, timestamp, context_id, ts_issued);
-
-			/* Only print this message once */
-			context->wait_on_invalid_ts = true;
-	}
-
-	return -EINVAL;
-}
-
 /**
  * adreno_waittimestamp - sleep while waiting for the specified timestamp
  * @device - pointer to a KGSL device structure
@@ -3861,147 +2602,35 @@
  * @timestamp - GPU timestamp to wait for
  * @msecs - amount of time to wait (in milliseconds)
  *
- * Wait 'msecs' milliseconds for the specified timestamp to expire. Wake up
- * every KGSL_TIMEOUT_PART milliseconds to check for a device hang and process
- * one if it happened.  Otherwise, spend most of our time in an interruptible
- * wait for the timestamp interrupt to be processed.  This function must be
- * called with the mutex already held.
+ * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
  */
 static int adreno_waittimestamp(struct kgsl_device *device,
-				struct kgsl_context *context,
-				unsigned int timestamp,
-				unsigned int msecs)
+		struct kgsl_context *context,
+		unsigned int timestamp,
+		unsigned int msecs)
 {
-	static unsigned int io_cnt;
-	struct adreno_context *adreno_ctx = context ? ADRENO_CONTEXT(context) :
-		NULL;
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-	unsigned int context_id = _get_context_id(context);
-	unsigned int time_elapsed = 0;
-	unsigned int wait;
-	int ts_compare = 1;
-	int io, ret = -ETIMEDOUT;
+	int ret;
+	struct adreno_context *drawctxt;
 
-	if (context_id == KGSL_CONTEXT_INVALID) {
-		KGSL_DRV_WARN(device, "context was detached");
+	if (context == NULL) {
+		/* If they are doing then complain once */
+		dev_WARN_ONCE(device->dev, 1,
+			"IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
 		return -EINVAL;
 	}
 
-	/*
-	 * Check to see if the requested timestamp is "newer" then the last
-	 * timestamp issued. If it is complain once and return error.  Only
-	 * print the message once per context so that badly behaving
-	 * applications don't spam the logs
-	 */
+	/* Return -EINVAL if the context has been detached */
+	if (kgsl_context_detached(context))
+		return -EINVAL;
 
-	if (adreno_ctx && !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
-		if (_check_pending_timestamp(device, context, timestamp))
-			return -EINVAL;
+	ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
+		timestamp, msecs_to_jiffies(msecs));
 
-		/* Reset the invalid timestamp flag on a valid wait */
-		context->wait_on_invalid_ts = false;
-	}
+	/* If the context got invalidated then return a specific error */
+	drawctxt = ADRENO_CONTEXT(context);
 
-	/*
-	 * On the first time through the loop only wait 100ms.
-	 * this gives enough time for the engine to start moving and oddly
-	 * provides better hang detection results than just going the full
-	 * KGSL_TIMEOUT_PART right off the bat. The exception to this rule
-	 * is if msecs happens to be < 100ms then just use 20ms or the msecs,
-	 * whichever is larger because anything less than 20 is unreliable
-	 */
-
-	if (msecs == 0 || msecs >= 100)
-		wait = 100;
-	else
-		wait = (msecs > 20) ? msecs : 20;
-
-	do {
-		long status;
-
-		/*
-		 * if the timestamp happens while we're not
-		 * waiting, there's a chance that an interrupt
-		 * will not be generated and thus the timestamp
-		 * work needs to be queued.
-		 */
-
-		if (kgsl_check_timestamp(device, context, timestamp)) {
-			queue_work(device->work_queue, &device->ts_expired_ws);
-			ret = 0;
-			break;
-		}
-
-		/*
-		 * For proper power accounting sometimes we need to call
-		 * io_wait_interruptible_timeout and sometimes we need to call
-		 * plain old wait_interruptible_timeout. We call the regular
-		 * timeout N times out of 100, where N is a number specified by
-		 * the current power level
-		 */
-
-		io_cnt = (io_cnt + 1) % 100;
-		io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
-			? 0 : 1;
-
-		mutex_unlock(&device->mutex);
-
-		/* Wait for a timestamp event */
-		status = kgsl_wait_event_interruptible_timeout(
-			device->wait_queue,
-			adreno_check_interrupt_timestamp(device, context,
-				timestamp), msecs_to_jiffies(wait), io);
-
-		mutex_lock(&device->mutex);
-
-		/*
-		 * If status is non zero then either the condition was satisfied
-		 * or there was an error.  In either event, this is the end of
-		 * the line for us
-		 */
-
-		if (status != 0) {
-			ret = (status > 0) ? 0 : (int) status;
-			break;
-		}
-		time_elapsed += wait;
-
-		/* If user specified timestamps are being used, wait at least
-		 * KGSL_SYNCOBJ_SERVER_TIMEOUT msecs for the user driver to
-		 * issue a IB for a timestamp before checking to see if the
-		 * current timestamp we are waiting for is valid or not
-		 */
-
-		if (ts_compare && (adreno_ctx &&
-			(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS))) {
-			if (time_elapsed > KGSL_SYNCOBJ_SERVER_TIMEOUT) {
-				ret = _check_pending_timestamp(device, context,
-					timestamp);
-				if (ret)
-					break;
-
-				/* Don't do this check again */
-				ts_compare = 0;
-
-				/*
-				 * Reset the invalid timestamp flag on a valid
-				 * wait
-				 */
-				context->wait_on_invalid_ts = false;
-			}
-		}
-
-		/*
-		 * We want to wait the floor of KGSL_TIMEOUT_PART
-		 * and (msecs - time_elapsed).
-		 */
-
-		if (KGSL_TIMEOUT_PART < (msecs - time_elapsed))
-			wait = KGSL_TIMEOUT_PART;
-		else
-			wait = (msecs - time_elapsed);
-
-	} while (!msecs || time_elapsed < msecs);
+	if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+		ret = -EDEADLK;
 
 	return ret;
 }
@@ -4010,13 +2639,13 @@
 		struct kgsl_context *context, enum kgsl_timestamp_type type)
 {
 	unsigned int timestamp = 0;
-	unsigned int context_id = _get_context_id(context);
+	unsigned int id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
 
 	/*
-	 * If the context ID is invalid, we are in a race with
+	 * If the context is detached we are in a race with
 	 * the context being destroyed by userspace so bail.
 	 */
-	if (context_id == KGSL_CONTEXT_INVALID) {
+	if (context && kgsl_context_detached(context)) {
 		KGSL_DRV_WARN(device, "context was detached");
 		return timestamp;
 	}
@@ -4030,11 +2659,11 @@
 	}
 	case KGSL_TIMESTAMP_CONSUMED:
 		kgsl_sharedmem_readl(&device->memstore, &timestamp,
-			KGSL_MEMSTORE_OFFSET(context_id, soptimestamp));
+			KGSL_MEMSTORE_OFFSET(id, soptimestamp));
 		break;
 	case KGSL_TIMESTAMP_RETIRED:
 		kgsl_sharedmem_readl(&device->memstore, &timestamp,
-			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
+			KGSL_MEMSTORE_OFFSET(id, eoptimestamp));
 		break;
 	}
 
@@ -4082,7 +2711,7 @@
 	case IOCTL_KGSL_PERFCOUNTER_PUT: {
 		struct kgsl_perfcounter_put *put = data;
 		result = adreno_perfcounter_put(adreno_dev, put->groupid,
-			put->countable);
+			put->countable, PERFCOUNTER_FLAG_NONE);
 		break;
 	}
 	case IOCTL_KGSL_PERFCOUNTER_QUERY: {
@@ -4194,6 +2823,7 @@
 	.gpuid = adreno_gpuid,
 	.snapshot = adreno_snapshot,
 	.irq_handler = adreno_irq_handler,
+	.drain = adreno_drain,
 	/* Optional functions */
 	.setstate = adreno_setstate,
 	.drawctxt_create = adreno_drawctxt_create,
@@ -4201,7 +2831,7 @@
 	.drawctxt_destroy = adreno_drawctxt_destroy,
 	.setproperty = adreno_setproperty,
 	.postmortem_dump = adreno_dump,
-	.next_event = adreno_next_event,
+	.drawctxt_sched = adreno_drawctxt_sched,
 };
 
 static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cb75b34..0363739 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -16,6 +16,7 @@
 #include "kgsl_device.h"
 #include "adreno_drawctxt.h"
 #include "adreno_ringbuffer.h"
+#include "adreno_profile.h"
 #include "kgsl_iommu.h"
 #include <mach/ocmem.h>
 
@@ -34,11 +35,11 @@
 #define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
 
 /* Flags to control command packet settings */
-#define KGSL_CMD_FLAGS_NONE             0x00000000
-#define KGSL_CMD_FLAGS_PMODE		0x00000001
-#define KGSL_CMD_FLAGS_INTERNAL_ISSUE	0x00000002
-#define KGSL_CMD_FLAGS_GET_INT		0x00000004
-#define KGSL_CMD_FLAGS_EOF	        0x00000100
+#define KGSL_CMD_FLAGS_NONE             0
+#define KGSL_CMD_FLAGS_PMODE		BIT(0)
+#define KGSL_CMD_FLAGS_INTERNAL_ISSUE   BIT(1)
+#define KGSL_CMD_FLAGS_WFI              BIT(2)
+#define KGSL_CMD_FLAGS_PROFILE		BIT(3)
 
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
@@ -48,6 +49,8 @@
 #define KGSL_END_OF_IB_IDENTIFIER	0x2ABEDEAD
 #define KGSL_END_OF_FRAME_IDENTIFIER	0x2E0F2E0F
 #define KGSL_NOP_IB_IDENTIFIER	        0x20F20F20
+#define KGSL_START_OF_PROFILE_IDENTIFIER	0x2DEFADE1
+#define KGSL_END_OF_PROFILE_IDENTIFIER	0x2DEFADE2
 
 #ifdef CONFIG_MSM_SCM
 #define ADRENO_DEFAULT_PWRSCALE_POLICY  (&kgsl_pwrscale_policy_tz)
@@ -92,6 +95,51 @@
 	TRACE_BUS_CTL,
 };
 
+#define ADRENO_SOFT_FAULT 1
+#define ADRENO_HARD_FAULT 2
+#define ADRENO_TIMEOUT_FAULT 3
+
+/*
+ * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
+ * smaller then this but this size will allow for a larger range of inflight
+ * sizes that can be chosen at runtime
+ */
+
+#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+
+/**
+ * struct adreno_dispatcher - container for the adreno GPU dispatcher
+ * @mutex: Mutex to protect the structure
+ * @state: Current state of the dispatcher (active or paused)
+ * @timer: Timer to monitor the progress of the command batches
+ * @inflight: Number of command batch operations pending in the ringbuffer
+ * @fault: Non-zero if a fault was detected.
+ * @pending: Priority list of contexts waiting to submit command batches
+ * @plist_lock: Spin lock to protect the pending queue
+ * @cmdqueue: Queue of command batches currently flight
+ * @head: pointer to the head of of the cmdqueue.  This is the oldest pending
+ * operation
+ * @tail: pointer to the tail of the cmdqueue.  This is the most recently
+ * submitted operation
+ * @work: work_struct to put the dispatcher in a work queue
+ * @kobj: kobject for the dispatcher directory in the device sysfs node
+ */
+struct adreno_dispatcher {
+	struct mutex mutex;
+	unsigned int state;
+	struct timer_list timer;
+	struct timer_list fault_timer;
+	unsigned int inflight;
+	atomic_t fault;
+	struct plist_head pending;
+	spinlock_t plist_lock;
+	struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+	unsigned int head;
+	unsigned int tail;
+	struct work_struct work;
+	struct kobject kobj;
+};
+
 struct adreno_gpudev;
 
 struct adreno_device {
@@ -131,6 +179,8 @@
 	struct ocmem_buf *ocmem_hdl;
 	unsigned int ocmem_base;
 	unsigned int gpu_cycles;
+	struct adreno_profile profile;
+	struct adreno_dispatcher dispatcher;
 };
 
 #define PERFCOUNTER_FLAG_NONE 0x0
@@ -141,24 +191,27 @@
 /**
  * struct adreno_perfcount_register: register state
  * @countable: countable the register holds
- * @refcount: number of users of the register
+ * @kernelcount: number of user space users of the register
+ * @usercount: number of kernel users of the register
  * @offset: register hardware offset
  */
 struct adreno_perfcount_register {
 	unsigned int countable;
-	unsigned int refcount;
+	unsigned int kernelcount;
+	unsigned int usercount;
 	unsigned int offset;
-	unsigned int flags;
 };
 
 /**
  * struct adreno_perfcount_group: registers for a hardware group
  * @regs: available registers for this group
  * @reg_count: total registers for this group
+ * @name: group name for this group
  */
 struct adreno_perfcount_group {
 	struct adreno_perfcount_register *regs;
 	unsigned int reg_count;
+	const char *name;
 };
 
 /**
@@ -258,9 +311,9 @@
 
 	/* GPU specific function hooks */
 	int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
-	void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
-	void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
-	void (*ctxt_draw_workaround)(struct adreno_device *,
+	int (*ctxt_save)(struct adreno_device *, struct adreno_context *);
+	int (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
+	int (*ctxt_draw_workaround)(struct adreno_device *,
 					struct adreno_context *);
 	irqreturn_t (*irq_handler)(struct adreno_device *);
 	void (*irq_control)(struct adreno_device *, int);
@@ -283,46 +336,6 @@
 	void (*postmortem_dump)(struct adreno_device *adreno_dev);
 };
 
-/*
- * struct adreno_ft_data - Structure that contains all information to
- * perform gpu fault tolerance
- * @ib1 - IB1 that the GPU was executing when hang happened
- * @context_id - Context which caused the hang
- * @global_eop - eoptimestamp at time of hang
- * @rb_buffer - Buffer that holds the commands from good contexts
- * @rb_size - Number of valid dwords in rb_buffer
- * @bad_rb_buffer - Buffer that holds commands from the hanging context
- * bad_rb_size - Number of valid dwords in bad_rb_buffer
- * @good_rb_buffer - Buffer that holds commands from good contexts
- * good_rb_size - Number of valid dwords in good_rb_buffer
- * @last_valid_ctx_id - The last context from which commands were placed in
- * ringbuffer before the GPU hung
- * @step - Current fault tolerance step being executed
- * @err_code - Fault tolerance error code
- * @fault - Indicates whether the hang was caused due to a pagefault
- * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
- * replayed during fault tolerance
- * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
- * replaying with snapshot
- */
-struct adreno_ft_data {
-	unsigned int ib1;
-	unsigned int context_id;
-	unsigned int global_eop;
-	unsigned int *rb_buffer;
-	unsigned int rb_size;
-	unsigned int *bad_rb_buffer;
-	unsigned int bad_rb_size;
-	unsigned int *good_rb_buffer;
-	unsigned int good_rb_size;
-	unsigned int last_valid_ctx_id;
-	unsigned int status;
-	unsigned int ft_policy;
-	unsigned int err_code;
-	unsigned int start_of_replay_cmds;
-	unsigned int replay_for_snapshot;
-};
-
 #define FT_DETECT_REGS_COUNT 12
 
 struct log_field {
@@ -331,13 +344,16 @@
 };
 
 /* Fault Tolerance policy flags */
-#define  KGSL_FT_OFF                      BIT(0)
-#define  KGSL_FT_REPLAY                   BIT(1)
-#define  KGSL_FT_SKIPIB                   BIT(2)
-#define  KGSL_FT_SKIPFRAME                BIT(3)
-#define  KGSL_FT_DISABLE                  BIT(4)
-#define  KGSL_FT_TEMP_DISABLE             BIT(5)
-#define  KGSL_FT_DEFAULT_POLICY           (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
+#define  KGSL_FT_OFF                      0
+#define  KGSL_FT_REPLAY                   1
+#define  KGSL_FT_SKIPIB                   2
+#define  KGSL_FT_SKIPFRAME                3
+#define  KGSL_FT_DISABLE                  4
+#define  KGSL_FT_TEMP_DISABLE             5
+#define  KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB))
+
+/* This internal bit is used to skip the PM dump on replayed command batches */
+#define  KGSL_FT_SKIP_PMDUMP              31
 
 /* Pagefault policy flags */
 #define KGSL_FT_PAGEFAULT_INT_ENABLE         BIT(0)
@@ -347,6 +363,14 @@
 #define KGSL_FT_PAGEFAULT_DEFAULT_POLICY     (KGSL_FT_PAGEFAULT_INT_ENABLE + \
 					KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
 
+#define ADRENO_FT_TYPES \
+	{ BIT(KGSL_FT_OFF), "off" }, \
+	{ BIT(KGSL_FT_REPLAY), "replay" }, \
+	{ BIT(KGSL_FT_SKIPIB), "skipib" }, \
+	{ BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
+	{ BIT(KGSL_FT_DISABLE), "disable" }, \
+	{ BIT(KGSL_FT_TEMP_DISABLE), "temp" }
+
 extern struct adreno_gpudev adreno_a2xx_gpudev;
 extern struct adreno_gpudev adreno_a3xx_gpudev;
 
@@ -402,23 +426,39 @@
 void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
 		int hang);
 
-int adreno_dump_and_exec_ft(struct kgsl_device *device);
+void adreno_dispatcher_start(struct adreno_device *adreno_dev);
+int adreno_dispatcher_init(struct adreno_device *adreno_dev);
+void adreno_dispatcher_close(struct adreno_device *adreno_dev);
+int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
+		unsigned int timeout);
+void adreno_dispatcher_irq_fault(struct kgsl_device *device);
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
 
-void adreno_dump_rb(struct kgsl_device *device, const void *buf,
-			 size_t len, int start, int size);
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+		struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+		uint32_t *timestamp);
 
-unsigned int adreno_ft_detect(struct kgsl_device *device,
-						unsigned int *prev_reg_val);
+void adreno_dispatcher_schedule(struct kgsl_device *device);
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
+void adreno_dispatcher_queue_context(struct kgsl_device *device,
+	struct adreno_context *drawctxt);
+int adreno_reset(struct kgsl_device *device);
 
 int adreno_ft_init_sysfs(struct kgsl_device *device);
 void adreno_ft_uninit_sysfs(struct kgsl_device *device);
 
+int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
+					const char *name);
+
+const char *adreno_perfcounter_get_name(struct adreno_device
+					*adreno_dev, unsigned int groupid);
+
 int adreno_perfcounter_get(struct adreno_device *adreno_dev,
 	unsigned int groupid, unsigned int countable, unsigned int *offset,
 	unsigned int flags);
 
 int adreno_perfcounter_put(struct adreno_device *adreno_dev,
-	unsigned int groupid, unsigned int countable);
+	unsigned int groupid, unsigned int countable, unsigned int flags);
 
 int adreno_soft_reset(struct kgsl_device *device);
 
@@ -519,9 +559,7 @@
 {
 	if (k_ctxt) {
 		struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
-
-		if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
-			return a_ctxt->timestamp;
+		return a_ctxt->timestamp;
 	}
 	return rb->global_ts;
 }
@@ -720,4 +758,31 @@
 		return ADRENO_REG_REGISTER_MAX;
 	return adreno_dev->gpudev->reg_offsets->offsets[offset_name];
 }
+
+/**
+ * adreno_gpu_fault() - Return the current state of the GPU
+ * @adreno_dev: A ponter to the adreno_device to query
+ *
+ * Return 0 if there is no fault or positive with the last type of fault that
+ * occurred
+ */
+static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
+{
+	smp_rmb();
+	return atomic_read(&adreno_dev->dispatcher.fault);
+}
+
+/**
+ * adreno_set_gpu_fault() - Set the current fault status of the GPU
+ * @adreno_dev: A pointer to the adreno_device to set
+ * @state: fault state to set
+ *
+ */
+static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
+	int state)
+{
+	atomic_set(&adreno_dev->dispatcher.fault, state);
+	smp_wmb();
+}
+
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 3d72c5c..cce4f91 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1451,7 +1451,7 @@
 	return ret;
 }
 
-static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
 					struct adreno_context *context)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
@@ -1468,7 +1468,7 @@
 				ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW)
 			adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
 		else
-			return;
+			return 0;
 		/*
 		 * Issue an empty draw call to avoid possible hangs due to
 		 * repeated idles without intervening draw calls.
@@ -1499,41 +1499,46 @@
 					| adreno_dev->pix_shader_start;
 	}
 
-	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
-			&cmd[0], cmds - cmd);
+	return adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_PMODE, &cmd[0], cmds - cmd);
 }
 
-static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
 			struct adreno_context *context)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
+	int ret;
 
 	if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
-		return;
+		return 0;
 
-	if (context->flags & CTXT_FLAGS_GPU_HANG)
-		KGSL_CTXT_WARN(device,
-			"Current active context has caused gpu hang\n");
+	if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+		return 0;
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 		kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
 			context->reg_save[1],
 			context->reg_save[2] << 2, true);
 		/* save registers and constants. */
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE,
 			context->reg_save, 3);
 
+		if (ret)
+			return ret;
+
 		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
 			kgsl_cffdump_syncmem(context->base.device,
 				&context->gpustate,
 				context->shader_save[1],
 				context->shader_save[2] << 2, true);
 			/* save shader partitioning and instructions. */
-			adreno_ringbuffer_issuecmds(device, context,
+			ret = adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_PMODE,
 				context->shader_save, 3);
 
+			if (ret)
+				return ret;
 			kgsl_cffdump_syncmem(context->base.device,
 				&context->gpustate,
 				context->shader_fixup[1],
@@ -1542,10 +1547,13 @@
 			 * fixup shader partitioning parameter for
 			 *  SET_SHADER_BASES.
 			 */
-			adreno_ringbuffer_issuecmds(device, context,
+			ret = adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->shader_fixup, 3);
 
+			if (ret)
+				return ret;
+
 			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
 		}
 	}
@@ -1558,32 +1566,41 @@
 		/* save gmem.
 		 * (note: changes shader. shader must already be saved.)
 		 */
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_save, 3);
 
+		if (ret)
+			return ret;
+
 		kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
 			context->chicken_restore[1],
 			context->chicken_restore[2] << 2, true);
 
 		/* Restore TP0_CHICKEN */
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
-			adreno_ringbuffer_issuecmds(device, context,
+			ret = adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->chicken_restore, 3);
+
+			if (ret)
+				return ret;
 		}
 		adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
 
 		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
 	} else if (adreno_is_a2xx(adreno_dev))
-		a2xx_drawctxt_draw_workaround(adreno_dev, context);
+		return a2xx_drawctxt_draw_workaround(adreno_dev, context);
+
+	return 0;
 }
 
-static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
 			struct adreno_context *context)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 	unsigned int cmds[5];
+	int ret = 0;
 
 	if (context == NULL) {
 		/* No context - set the default pagetable and thats it */
@@ -1598,7 +1615,7 @@
 			: KGSL_CONTEXT_INVALID;
 		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
 				  id);
-		return;
+		return 0;
 	}
 
 	cmds[0] = cp_nop_packet(1);
@@ -1607,8 +1624,11 @@
 	cmds[3] = device->memstore.gpuaddr +
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
 	cmds[4] = context->base.id;
-	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+	ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
 					cmds, 5);
+	if (ret)
+		return ret;
+
 	kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
 			context->base.id);
 
@@ -1621,9 +1641,11 @@
 			context->context_gmem_shadow.gmem_restore[2] << 2,
 			true);
 
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_restore, 3);
+		if (ret)
+			return ret;
 
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 			kgsl_cffdump_syncmem(context->base.device,
@@ -1632,9 +1654,11 @@
 				context->chicken_restore[2] << 2, true);
 
 			/* Restore TP0_CHICKEN */
-			adreno_ringbuffer_issuecmds(device, context,
+			ret = adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->chicken_restore, 3);
+			if (ret)
+				return ret;
 		}
 
 		context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
@@ -1646,8 +1670,10 @@
 			context->reg_restore[2] << 2, true);
 
 		/* restore registers and constants. */
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+		if (ret)
+			return ret;
 
 		/* restore shader instructions & partitioning. */
 		if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
@@ -1656,18 +1682,22 @@
 				context->shader_restore[1],
 				context->shader_restore[2] << 2, true);
 
-			adreno_ringbuffer_issuecmds(device, context,
+			ret = adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->shader_restore, 3);
+			if (ret)
+				return ret;
 		}
 	}
 
 	if (adreno_is_a20x(adreno_dev)) {
 		cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
 		cmds[1] = context->bin_base_offset;
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE, cmds, 2);
 	}
+
+	return ret;
 }
 
 /*
@@ -1734,13 +1764,14 @@
 
 	if (!status) {
 		if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
-			/* This indicates that we could not read CP_INT_STAT.
-			 * As a precaution just wake up processes so
-			 * they can check their timestamps. Since, we
-			 * did not ack any interrupts this interrupt will
-			 * be generated again */
+			/*
+			 * This indicates that we could not read CP_INT_STAT.
+			 * As a precaution schedule the dispatcher to check
+			 * things out. Since we did not ack any interrupts this
+			 * interrupt will be generated again
+			 */
 			KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
-			wake_up_interruptible_all(&device->wait_queue);
+			adreno_dispatcher_schedule(device);
 		} else
 			KGSL_DRV_WARN(device, "Spurious interrput detected\n");
 		return;
@@ -1766,7 +1797,7 @@
 
 	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
 		queue_work(device->work_queue, &device->ts_expired_ws);
-		wake_up_interruptible_all(&device->wait_queue);
+		adreno_dispatcher_schedule(device);
 	}
 }
 
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index b1b27f5..c4f81fa 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2382,32 +2382,38 @@
 	return ret;
 }
 
-static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
+static int a3xx_drawctxt_save(struct adreno_device *adreno_dev,
 			   struct adreno_context *context)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
+	int ret;
 
 	if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
-		return;
+		return 0;
 
-	if (context->flags & CTXT_FLAGS_GPU_HANG)
-		KGSL_CTXT_WARN(device,
-			       "Current active context has caused gpu hang\n");
+	if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+		return 0;
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 		/* Fixup self modifying IBs for save operations */
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
+		if (ret)
+			return ret;
 
 		/* save registers and constants. */
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE,
 			context->regconstant_save, 3);
+		if (ret)
+			return ret;
 
 		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
 			/* Save shader instructions */
-			adreno_ringbuffer_issuecmds(device, context,
+			ret = adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
+			if (ret)
+				return ret;
 
 			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
 		}
@@ -2425,19 +2431,25 @@
 			context->context_gmem_shadow.gmem_save[1],
 			context->context_gmem_shadow.gmem_save[2] << 2, true);
 
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
 					    gmem_save, 3);
+		if (ret)
+			return ret;
+
 		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
 	}
+
+	return 0;
 }
 
-static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static int a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
 			      struct adreno_context *context)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 	unsigned int cmds[5];
+	int ret = 0;
 
 	if (context == NULL) {
 		/* No context - set the default pagetable and thats it */
@@ -2452,7 +2464,7 @@
 			: KGSL_CONTEXT_INVALID;
 		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
 				  id);
-		return;
+		return 0;
 	}
 
 	cmds[0] = cp_nop_packet(1);
@@ -2461,8 +2473,11 @@
 	cmds[3] = device->memstore.gpuaddr +
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
 	cmds[4] = context->base.id;
-	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+	ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
 					cmds, 5);
+	if (ret)
+		return ret;
+
 	kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
 			context->base.id);
 
@@ -2478,36 +2493,47 @@
 			context->context_gmem_shadow.gmem_restore[2] << 2,
 			true);
 
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
 					    gmem_restore, 3);
+		if (ret)
+			return ret;
 		context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
 	}
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+		if (ret)
+			return ret;
 
 		/* Fixup self modifying IBs for restore operations */
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE,
 			context->restore_fixup, 3);
+		if (ret)
+			return ret;
 
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE,
 			context->constant_restore, 3);
+		if (ret)
+			return ret;
 
 		if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
-			adreno_ringbuffer_issuecmds(device, context,
+			ret = adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->shader_restore, 3);
-
+			if (ret)
+				return ret;
 		/* Restore HLSQ_CONTROL_0 register */
-		adreno_ringbuffer_issuecmds(device, context,
+		ret = adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE,
 			context->hlsqcontrol_restore, 3);
 	}
+
+	return ret;
 }
 
 static int a3xx_rb_init(struct adreno_device *adreno_dev,
@@ -2570,7 +2596,7 @@
 
 		/* Clear the error */
 		kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
-		return;
+		goto done;
 	}
 	case A3XX_INT_RBBM_REG_TIMEOUT:
 		err = "RBBM: AHB register timeout";
@@ -2611,21 +2637,23 @@
 	case A3XX_INT_UCHE_OOB_ACCESS:
 		err = "UCHE:  Out of bounds access";
 		break;
+	default:
+		return;
 	}
-
 	KGSL_DRV_CRIT(device, "%s\n", err);
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+done:
+	/* Trigger a fault in the dispatcher - this will effect a restart */
+	adreno_dispatcher_irq_fault(device);
 }
 
 static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 
-	/* Wake up everybody waiting for the interrupt */
-	wake_up_interruptible_all(&device->wait_queue);
-
-	/* Schedule work to free mem and issue ibs */
 	queue_work(device->work_queue, &device->ts_expired_ws);
+	adreno_dispatcher_schedule(device);
 }
 
 /**
@@ -3152,115 +3180,118 @@
  */
 
 static struct adreno_perfcount_register a3xx_perfcounters_cp[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_CP_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_CP_0_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_pc[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_1_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_2_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_3_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_2_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_3_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_1_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_1_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_tse[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_1_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_ras[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_1_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_uche[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_tp[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_1_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_2_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_3_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_4_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_5_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_2_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_3_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_4_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_5_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_sp[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_1_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_2_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_3_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_4_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_5_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_6_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_7_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_2_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_3_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_4_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_5_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_6_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_7_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_1_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_0_LO, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT1_LO },
 };
 static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT1_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
 };
 
+#define A3XX_PERFCOUNTER_GROUP(name) { a3xx_perfcounters_##name, \
+	ARRAY_SIZE(a3xx_perfcounters_##name), __stringify(name) }
+
 static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
-	{ a3xx_perfcounters_cp, ARRAY_SIZE(a3xx_perfcounters_cp) },
-	{ a3xx_perfcounters_rbbm, ARRAY_SIZE(a3xx_perfcounters_rbbm) },
-	{ a3xx_perfcounters_pc, ARRAY_SIZE(a3xx_perfcounters_pc) },
-	{ a3xx_perfcounters_vfd, ARRAY_SIZE(a3xx_perfcounters_vfd) },
-	{ a3xx_perfcounters_hlsq, ARRAY_SIZE(a3xx_perfcounters_hlsq) },
-	{ a3xx_perfcounters_vpc, ARRAY_SIZE(a3xx_perfcounters_vpc) },
-	{ a3xx_perfcounters_tse, ARRAY_SIZE(a3xx_perfcounters_tse) },
-	{ a3xx_perfcounters_ras, ARRAY_SIZE(a3xx_perfcounters_ras) },
-	{ a3xx_perfcounters_uche, ARRAY_SIZE(a3xx_perfcounters_uche) },
-	{ a3xx_perfcounters_tp, ARRAY_SIZE(a3xx_perfcounters_tp) },
-	{ a3xx_perfcounters_sp, ARRAY_SIZE(a3xx_perfcounters_sp) },
-	{ a3xx_perfcounters_rb, ARRAY_SIZE(a3xx_perfcounters_rb) },
-	{ a3xx_perfcounters_pwr, ARRAY_SIZE(a3xx_perfcounters_pwr) },
-	{ a3xx_perfcounters_vbif, ARRAY_SIZE(a3xx_perfcounters_vbif) },
-	{ a3xx_perfcounters_vbif_pwr, ARRAY_SIZE(a3xx_perfcounters_vbif_pwr) },
+	A3XX_PERFCOUNTER_GROUP(cp),
+	A3XX_PERFCOUNTER_GROUP(rbbm),
+	A3XX_PERFCOUNTER_GROUP(pc),
+	A3XX_PERFCOUNTER_GROUP(vfd),
+	A3XX_PERFCOUNTER_GROUP(hlsq),
+	A3XX_PERFCOUNTER_GROUP(vpc),
+	A3XX_PERFCOUNTER_GROUP(tse),
+	A3XX_PERFCOUNTER_GROUP(ras),
+	A3XX_PERFCOUNTER_GROUP(uche),
+	A3XX_PERFCOUNTER_GROUP(tp),
+	A3XX_PERFCOUNTER_GROUP(sp),
+	A3XX_PERFCOUNTER_GROUP(rb),
+	A3XX_PERFCOUNTER_GROUP(pwr),
+	A3XX_PERFCOUNTER_GROUP(vbif),
+	A3XX_PERFCOUNTER_GROUP(vbif_pwr),
 };
 
 static struct adreno_perfcounters a3xx_perfcounters = {
@@ -3304,6 +3335,9 @@
 	/* Reserve and start countable 1 in the PWR perfcounter group */
 	adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
 			NULL, PERFCOUNTER_FLAG_KERNEL);
+
+	/* Default performance counter profiling to false */
+	adreno_dev->profile.enabled = false;
 }
 
 /**
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
new file mode 100644
index 0000000..13b4ba3
--- /dev/null
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -0,0 +1,1611 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_trace.h"
+
+#define ADRENO_DISPATCHER_ACTIVE 0
+#define ADRENO_DISPATCHER_PAUSE 1
+
+#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+
+/* Number of commands that can be queued in a context before it sleeps */
+static unsigned int _context_cmdqueue_size = 50;
+
+/* Number of milliseconds to wait for the context queue to clear */
+static unsigned int _context_queue_wait = 10000;
+
+/* Number of command batches sent at a time from a single context */
+static unsigned int _context_cmdbatch_burst = 5;
+
+/* Number of command batches inflight in the ringbuffer at any time */
+static unsigned int _dispatcher_inflight = 15;
+
+/* Command batch timeout (in milliseconds) */
+static unsigned int _cmdbatch_timeout = 2000;
+
+/* Interval for reading and comparing fault detection registers */
+static unsigned int _fault_timer_interval = 50;
+
+/* Local array for the current set of fault detect registers */
+static unsigned int fault_detect_regs[FT_DETECT_REGS_COUNT];
+
+/* The last retired global timestamp read during fault detect */
+static unsigned int fault_detect_ts;
+
+/**
+ * fault_detect_read() - Read the set of fault detect registers
+ * @device: Pointer to the KGSL device struct
+ *
+ * Read the set of fault detect registers and store them in the local array.
+ * This is for the initial values that are compared later with
+ * fault_detect_read_compare
+ */
+static void fault_detect_read(struct kgsl_device *device)
+{
+	int i;
+
+	fault_detect_ts = kgsl_readtimestamp(device, NULL,
+		KGSL_TIMESTAMP_RETIRED);
+
+	for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+		if (ft_detect_regs[i] == 0)
+			continue;
+		kgsl_regread(device, ft_detect_regs[i],
+			&fault_detect_regs[i]);
+	}
+}
+
+/**
+ * fault_detect_read_compare() - Read the fault detect registers and compare
+ * them to the current value
+ * @device: Pointer to the KGSL device struct
+ *
+ * Read the set of fault detect registers and compare them to the current set
+ * of registers.  Return 1 if any of the register values changed
+ */
+static int fault_detect_read_compare(struct kgsl_device *device)
+{
+	int i, ret = 0;
+	unsigned int ts;
+
+	for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+		unsigned int val;
+
+		if (ft_detect_regs[i] == 0)
+			continue;
+		kgsl_regread(device, ft_detect_regs[i], &val);
+		if (val != fault_detect_regs[i])
+			ret = 1;
+		fault_detect_regs[i] = val;
+	}
+
+	ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+	if (ts != fault_detect_ts)
+		ret = 1;
+
+	fault_detect_ts = ts;
+
+	return ret;
+}
+
+/**
+ * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Dequeue a new command batch from the context list
+ */
+static inline struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
+		struct adreno_context *drawctxt)
+{
+	struct kgsl_cmdbatch *cmdbatch = NULL;
+
+	mutex_lock(&drawctxt->mutex);
+	if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+		cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+		/*
+		 * Don't dequeue a cmdbatch that is still waiting for other
+		 * events
+		 */
+		if (kgsl_cmdbatch_sync_pending(cmdbatch)) {
+			cmdbatch = ERR_PTR(-EAGAIN);
+			goto done;
+		}
+
+		drawctxt->cmdqueue_head =
+			CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
+			ADRENO_CONTEXT_CMDQUEUE_SIZE);
+		drawctxt->queued--;
+	}
+
+done:
+	mutex_unlock(&drawctxt->mutex);
+
+	return cmdbatch;
+}
+
+/**
+ * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * queue
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ *
+ * Failure to submit a command to the ringbuffer isn't the fault of the command
+ * being submitted so if a failure happens, push it back on the head of the the
+ * context queue to be reconsidered again
+ */
+static inline void adreno_dispatcher_requeue_cmdbatch(
+		struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+{
+	unsigned int prev;
+	mutex_lock(&drawctxt->mutex);
+
+	if (kgsl_context_detached(&drawctxt->base) ||
+		drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+		mutex_unlock(&drawctxt->mutex);
+		return;
+	}
+
+	prev = drawctxt->cmdqueue_head - 1;
+
+	if (prev < 0)
+		prev = ADRENO_CONTEXT_CMDQUEUE_SIZE - 1;
+
+	/*
+	 * The maximum queue size always needs to be one less then the size of
+	 * the ringbuffer queue so there is "room" to put the cmdbatch back in
+	 */
+
+	BUG_ON(prev == drawctxt->cmdqueue_tail);
+
+	drawctxt->cmdqueue[prev] = cmdbatch;
+	drawctxt->queued++;
+
+	/* Reset the command queue head to reflect the newly requeued change */
+	drawctxt->cmdqueue_head = prev;
+	mutex_unlock(&drawctxt->mutex);
+}
+
+/**
+ * dispatcher_queue_context() - Queue a context in the dispatcher pending list
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Add a context to the dispatcher pending list.
+ */
+static void  dispatcher_queue_context(struct adreno_device *adreno_dev,
+		struct adreno_context *drawctxt)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	spin_lock(&dispatcher->plist_lock);
+
+	if (plist_node_empty(&drawctxt->pending)) {
+		/* Get a reference to the context while it sits on the list */
+		_kgsl_context_get(&drawctxt->base);
+		trace_dispatch_queue_context(drawctxt);
+		plist_add(&drawctxt->pending, &dispatcher->pending);
+	}
+
+	spin_unlock(&dispatcher->plist_lock);
+}
+
+/**
+ * sendcmd() - Send a command batch to the GPU hardware
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ *
+ * Send a KGSL command batch to the GPU hardware
+ */
+static int sendcmd(struct adreno_device *adreno_dev,
+	struct kgsl_cmdbatch *cmdbatch)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+	int ret;
+
+	dispatcher->inflight++;
+
+	mutex_lock(&device->mutex);
+
+	if (dispatcher->inflight == 1) {
+		/* Time to make the donuts.  Turn on the GPU */
+		ret = kgsl_active_count_get(device);
+		if (ret) {
+			dispatcher->inflight--;
+			mutex_unlock(&device->mutex);
+			return ret;
+		}
+	}
+
+	ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
+
+	/*
+	 * On the first command, if the submission was successful, then read the
+	 * fault registers.  If it failed then turn off the GPU. Sad face.
+	 */
+
+	if (dispatcher->inflight == 1) {
+		if (ret == 0)
+			fault_detect_read(device);
+		else
+			kgsl_active_count_put(device);
+	}
+
+	mutex_unlock(&device->mutex);
+
+	if (ret) {
+		dispatcher->inflight--;
+		KGSL_DRV_ERR(device,
+			"Unable to submit command to the ringbuffer\n");
+		return ret;
+	}
+
+	trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
+
+	dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
+	dispatcher->tail = (dispatcher->tail + 1) %
+		ADRENO_DISPATCH_CMDQUEUE_SIZE;
+
+	/*
+	 * If this is the first command in the pipe then the GPU will
+	 * immediately start executing it so we can start the expiry timeout on
+	 * the command batch here.  Subsequent command batches will have their
+	 * timer started when the previous command batch is retired
+	 */
+	if (dispatcher->inflight == 1) {
+		cmdbatch->expires = jiffies +
+			msecs_to_jiffies(_cmdbatch_timeout);
+		mod_timer(&dispatcher->timer, cmdbatch->expires);
+
+		/* Start the fault detection timer */
+		if (adreno_dev->fast_hang_detect)
+			mod_timer(&dispatcher->fault_timer,
+				jiffies +
+				msecs_to_jiffies(_fault_timer_interval));
+	}
+
+	return 0;
+}
+
+/**
+ * dispatcher_context_sendcmds() - Send commands from a context to the GPU
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno context to dispatch commands from
+ *
+ * Dequeue and send a burst of commands from the specified context to the GPU
+ */
+static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
+		struct adreno_context *drawctxt)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+	int count = 0;
+	int requeued = 0;
+
+	/*
+	 * Each context can send a specific number of command batches per cycle
+	 */
+	while ((count < _context_cmdbatch_burst) &&
+		(dispatcher->inflight < _dispatcher_inflight)) {
+		int ret;
+		struct kgsl_cmdbatch *cmdbatch;
+
+		if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
+			break;
+
+		if (adreno_gpu_fault(adreno_dev) != 0)
+			break;
+
+		cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
+
+		if (cmdbatch == NULL)
+			break;
+
+		/*
+		 * adreno_context_get_cmdbatch returns -EAGAIN if the current
+		 * cmdbatch has pending sync points so no more to do here.
+		 * When the sync points are satisfied then the context will get
+		 * reqeueued
+		 */
+
+		if (IS_ERR(cmdbatch) && PTR_ERR(cmdbatch) == -EAGAIN) {
+			requeued = 1;
+			break;
+		}
+
+		/*
+		 * If this is a synchronization submission then there are no
+		 * commands to submit.  Discard it and get the next item from
+		 * the queue.  Decrement count so this packet doesn't count
+		 * against the burst for the context
+		 */
+
+		if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
+			kgsl_cmdbatch_destroy(cmdbatch);
+			continue;
+		}
+
+		ret = sendcmd(adreno_dev, cmdbatch);
+
+		/*
+		 * There are various reasons why we can't submit a command (no
+		 * memory for the commands, full ringbuffer, etc) but none of
+		 * these are actually the current command's fault.  Requeue it
+		 * back on the context and let it come back around again if
+		 * conditions improve
+		 */
+		if (ret) {
+			adreno_dispatcher_requeue_cmdbatch(drawctxt, cmdbatch);
+			requeued = 1;
+			break;
+		}
+		count++;
+	}
+
+	/*
+	 * If the context successfully submitted commands, then
+	 * unconditionally put it back on the queue to be considered the
+	 * next time around. This might seem a little wasteful but it is
+	 * reasonable to think that a busy context will stay busy.
+	 */
+
+	if (count || requeued) {
+		dispatcher_queue_context(adreno_dev, drawctxt);
+
+		/*
+		 * If we submitted something there will be room in the
+		 * context queue so ping the context wait queue on the
+		 * chance that the context is snoozing
+		 */
+
+		wake_up_interruptible_all(&drawctxt->wq);
+	}
+
+	/* Return the number of command batches processed */
+	if (count > 0)
+		return count;
+
+	/*
+	 * If we didn't process anything because of a stall or an error
+	 * return -1 so the issuecmds loop knows that we shouldn't
+	 * keep trying to process it
+	 */
+
+	return requeued ? -1 : 0;
+}
+
+/**
+ * _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Issue as many commands as possible (up to inflight) from the pending contexts
+ * This function assumes the dispatcher mutex has been locked.
+ */
+static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+	int last_id = 0;
+
+	while (dispatcher->inflight < _dispatcher_inflight) {
+		struct adreno_context *drawctxt = NULL;
+
+		/* Don't do anything if the dispatcher is paused */
+		if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
+			break;
+
+		if (adreno_gpu_fault(adreno_dev) != 0)
+			break;
+
+		spin_lock(&dispatcher->plist_lock);
+
+		if (!plist_head_empty(&dispatcher->pending)) {
+			drawctxt = plist_first_entry(&dispatcher->pending,
+				struct adreno_context, pending);
+
+			plist_del(&drawctxt->pending, &dispatcher->pending);
+		}
+
+		spin_unlock(&dispatcher->plist_lock);
+
+		if (drawctxt == NULL)
+			break;
+
+		if (kgsl_context_detached(&drawctxt->base) ||
+			drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+			kgsl_context_put(&drawctxt->base);
+			continue;
+		}
+
+		/*
+		 * Don't loop endlessly on the same stalled context - if we see
+		 * the same context twice in a row then assume that there is
+		 * nothing else to do and bail.  But don't forget to put the
+		 * stalled context back on the queue otherwise it will get
+		 * forgotten
+		 */
+
+		if (last_id == drawctxt->base.id) {
+			dispatcher_queue_context(adreno_dev, drawctxt);
+			kgsl_context_put(&drawctxt->base);
+			break;
+		}
+
+		last_id = drawctxt->base.id;
+		dispatcher_context_sendcmds(adreno_dev, drawctxt);
+		kgsl_context_put(&drawctxt->base);
+	}
+
+	return 0;
+}
+
+/**
+ * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Lock the dispatcher and call _adreno_dispatcher_issueibcmds
+ */
+int adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+	int ret;
+
+	mutex_lock(&dispatcher->mutex);
+	ret = _adreno_dispatcher_issuecmds(adreno_dev);
+	mutex_unlock(&dispatcher->mutex);
+
+	return ret;
+}
+
+static int _check_context_queue(struct adreno_context *drawctxt)
+{
+	int ret;
+
+	mutex_lock(&drawctxt->mutex);
+
+	/*
+	 * Wake up if there is room in the context or if the whole thing got
+	 * invalidated while we were asleep
+	 */
+
+	if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+		ret = 1;
+	else
+		ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+
+	mutex_unlock(&drawctxt->mutex);
+
+	return ret;
+}
+
+/**
+ * get_timestamp() - Return the next timestamp for the context
+ * @drawctxt - Pointer to an adreno draw context struct
+ * @cmdbatch - Pointer to a command batch
+ * @timestamp - Pointer to a timestamp value possibly passed from the user
+ *
+ * Assign a timestamp based on the settings of the draw context and the command
+ * batch.
+ */
+static int get_timestamp(struct adreno_context *drawctxt,
+		struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
+{
+	/* Synchronization commands don't get a timestamp */
+	if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
+		*timestamp = 0;
+		return 0;
+	}
+
+	if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
+		/*
+		 * User specified timestamps need to be greater than the last
+		 * issued timestamp in the context
+		 */
+		if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
+			return -ERANGE;
+
+		drawctxt->timestamp = *timestamp;
+	} else
+		drawctxt->timestamp++;
+
+	*timestamp = drawctxt->timestamp;
+	return 0;
+}
+
+/**
+ * adreno_dispactcher_queue_cmd() - Queue a new command in the context
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the command batch being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+		struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+		uint32_t *timestamp)
+{
+	int ret;
+
+	mutex_lock(&drawctxt->mutex);
+
+	if (drawctxt->flags & CTXT_FLAGS_BEING_DESTROYED) {
+		mutex_unlock(&drawctxt->mutex);
+		return -EINVAL;
+	}
+
+	/*
+	 * After skipping to the end of the frame we need to force the preamble
+	 * to run (if it exists) regardless of the context state.
+	 */
+
+	if (drawctxt->flags & CTXT_FLAGS_FORCE_PREAMBLE) {
+		set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+		drawctxt->flags &= ~CTXT_FLAGS_FORCE_PREAMBLE;
+	}
+
+	/*
+	 * If we are waiting for the end of frame and it hasn't appeared yet,
+	 * then mark the command batch as skipped.  It will still progress
+	 * through the pipeline but it won't actually send any commands
+	 */
+
+	if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+		set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+
+		/*
+		 * If this command batch represents the EOF then clear the way
+		 * for the dispatcher to continue submitting
+		 */
+
+		if (cmdbatch->flags & KGSL_CONTEXT_END_OF_FRAME) {
+			drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+
+			/*
+			 * Force the preamble on the next command to ensure that
+			 * the state is correct
+			 */
+
+			drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
+		}
+	}
+
+	/* Wait for room in the context queue */
+
+	while (drawctxt->queued >= _context_cmdqueue_size) {
+		trace_adreno_drawctxt_sleep(drawctxt);
+		mutex_unlock(&drawctxt->mutex);
+
+		ret = wait_event_interruptible_timeout(drawctxt->wq,
+			_check_context_queue(drawctxt),
+			msecs_to_jiffies(_context_queue_wait));
+
+		mutex_lock(&drawctxt->mutex);
+		trace_adreno_drawctxt_wake(drawctxt);
+
+		if (ret <= 0) {
+			mutex_unlock(&drawctxt->mutex);
+			return (ret == 0) ? -ETIMEDOUT : (int) ret;
+		}
+
+		/*
+		 * Account for the possiblity that the context got invalidated
+		 * while we were sleeping
+		 */
+
+		if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+			mutex_unlock(&drawctxt->mutex);
+			return -EDEADLK;
+		}
+	}
+
+	ret = get_timestamp(drawctxt, cmdbatch, timestamp);
+	if (ret) {
+		mutex_unlock(&drawctxt->mutex);
+		return ret;
+	}
+
+	cmdbatch->timestamp = *timestamp;
+
+	/*
+	 * Set the fault tolerance policy for the command batch - assuming the
+	 * context hsn't disabled FT use the current device policy
+	 */
+
+	if (drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+		set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
+	else
+		cmdbatch->fault_policy = adreno_dev->ft_policy;
+
+	/* Put the command into the queue */
+	drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
+	drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
+		ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+	drawctxt->queued++;
+	trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+
+
+	mutex_unlock(&drawctxt->mutex);
+
+	/* Add the context to the dispatcher pending list */
+	dispatcher_queue_context(adreno_dev, drawctxt);
+
+	/*
+	 * Only issue commands if inflight is less than burst -this prevents us
+	 * from sitting around waiting for the mutex on a busy system - the work
+	 * loop will schedule it for us. Inflight is mutex protected but the
+	 * worse that can happen is that it will go to 0 after we check and if
+	 * it goes to 0 it is because the work loop decremented it and the work
+	 * queue will try to schedule new commands anyway.
+	 */
+
+	if (adreno_dev->dispatcher.inflight < _context_cmdbatch_burst)
+		adreno_dispatcher_issuecmds(adreno_dev);
+
+	return 0;
+}
+
+/*
+ * If an IB inside of the command batch has a gpuaddr that matches the base
+ * passed in then zero the size which effectively skips it when it is submitted
+ * in the ringbuffer.
+ */
+static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, unsigned int base)
+{
+	int i;
+
+	for (i = 0; i < cmdbatch->ibcount; i++) {
+		if (cmdbatch->ibdesc[i].gpuaddr == base) {
+			cmdbatch->ibdesc[i].sizedwords = 0;
+			return;
+		}
+	}
+}
+
+static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
+	struct kgsl_cmdbatch **replay, int count)
+{
+	struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+	int skip = 1;
+	int i;
+
+	for (i = 0; i < count; i++) {
+
+		/*
+		 * Only operate on command batches that belong to the
+		 * faulting context
+		 */
+
+		if (replay[i]->context->id != cmdbatch->context->id)
+			continue;
+
+		/*
+		 * Skip all the command batches in this context until
+		 * the EOF flag is seen.  If the EOF flag is seen then
+		 * force the preamble for the next command.
+		 */
+
+		if (skip) {
+			set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+
+			if (replay[i]->flags & KGSL_CONTEXT_END_OF_FRAME)
+				skip = 0;
+		} else {
+			set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+			return;
+		}
+	}
+
+	/*
+	 * If the EOF flag hasn't been seen yet then set the flag in the
+	 * drawctxt to keep looking for it
+	 */
+
+	if (skip && drawctxt)
+		drawctxt->flags |= CTXT_FLAGS_SKIP_EOF;
+
+	/*
+	 * If we did see the EOF flag then force the preamble on for the
+	 * next command issued on this context
+	 */
+
+	if (!skip && drawctxt)
+		drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
+}
+
+static void remove_invalidated_cmdbatches(struct kgsl_device *device,
+		struct kgsl_cmdbatch **replay, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		struct kgsl_cmdbatch *cmd = replay[i];
+		struct adreno_context *drawctxt;
+
+		if (cmd == NULL)
+			continue;
+
+		drawctxt = ADRENO_CONTEXT(cmd->context);
+
+		if (kgsl_context_detached(cmd->context) ||
+			drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+			replay[i] = NULL;
+
+			mutex_lock(&device->mutex);
+			kgsl_cancel_events_timestamp(device, cmd->context,
+				cmd->timestamp);
+			mutex_unlock(&device->mutex);
+
+			kgsl_cmdbatch_destroy(cmd);
+		}
+	}
+}
+
+static char _pidname[TASK_COMM_LEN];
+
+static inline const char *_kgsl_context_comm(struct kgsl_context *context)
+{
+	struct task_struct *task = NULL;
+
+	if (context)
+		task = find_task_by_vpid(context->pid);
+
+	if (task)
+		get_task_comm(_pidname, task);
+	else
+		snprintf(_pidname, TASK_COMM_LEN, "unknown");
+
+	return _pidname;
+}
+
+#define pr_fault(_d, _c, fmt, args...) \
+		dev_err((_d)->dev, "%s[%d]: " fmt, \
+		_kgsl_context_comm((_c)->context), \
+		(_c)->context->pid, ##args)
+
+
+static void adreno_fault_header(struct kgsl_device *device,
+	struct kgsl_cmdbatch *cmdbatch)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int status, base, rptr, wptr, ib1base, ib2base, ib1sz, ib2sz;
+
+	kgsl_regread(device,
+			adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS),
+			&status);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_BASE),
+		&base);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_RPTR),
+		&rptr);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_WPTR),
+		&wptr);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
+		&ib1base);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ),
+		&ib1sz);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
+		&ib2base);
+	kgsl_regread(device,
+		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
+		&ib2sz);
+
+	trace_adreno_gpu_fault(cmdbatch->context->id, cmdbatch->timestamp,
+		status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
+
+	pr_fault(device, cmdbatch,
+		"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %8.8x/%4.4x ib2 %8.8x/%4.4x\n",
+		cmdbatch->context->id, cmdbatch->timestamp, status,
+		rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
+}
+
+static int dispatcher_do_fault(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+	unsigned int ptr;
+	unsigned int reg, base;
+	struct kgsl_cmdbatch **replay = NULL;
+	struct kgsl_cmdbatch *cmdbatch;
+	int ret, i, count = 0;
+	int fault, first = 0;
+	bool pagefault = false;
+	BUG_ON(dispatcher->inflight == 0);
+
+	fault = atomic_xchg(&dispatcher->fault, 0);
+	if (fault == 0)
+		return 0;
+
+	/* Turn off all the timers */
+	del_timer_sync(&dispatcher->timer);
+	del_timer_sync(&dispatcher->fault_timer);
+
+	mutex_lock(&device->mutex);
+
+	cmdbatch = dispatcher->cmdqueue[dispatcher->head];
+
+	trace_adreno_cmdbatch_fault(cmdbatch, fault);
+
+	/*
+	 * If the fault was due to a timeout then stop the CP to ensure we don't
+	 * get activity while we are trying to dump the state of the system
+	 */
+
+	if (fault == ADRENO_TIMEOUT_FAULT) {
+		adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, &reg);
+		reg |= (1 << 27) | (1 << 28);
+		adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
+
+		/* Skip the PM dump for a timeout because it confuses people */
+		set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+	}
+
+	adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
+
+	/*
+	 * Dump the postmortem and snapshot information if this is the first
+	 * detected fault for the oldest active command batch
+	 */
+
+	if (!test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) {
+		adreno_fault_header(device, cmdbatch);
+
+		if (device->pm_dump_enable)
+			kgsl_postmortem_dump(device, 0);
+
+		kgsl_device_snapshot(device, 1);
+	}
+
+	mutex_unlock(&device->mutex);
+
+	/* Allocate memory to store the inflight commands */
+	replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
+
+	if (replay == NULL) {
+		unsigned int ptr = dispatcher->head;
+
+		while (ptr != dispatcher->tail) {
+			struct kgsl_context *context =
+				dispatcher->cmdqueue[ptr]->context;
+
+			adreno_drawctxt_invalidate(device, context);
+			kgsl_cmdbatch_destroy(dispatcher->cmdqueue[ptr]);
+
+			ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+		}
+
+		/*
+		 * Set the replay count to zero - this will ensure that the
+		 * hardware gets reset but nothing else goes played
+		 */
+
+		count = 0;
+		goto replay;
+	}
+
+	/* Copy the inflight command batches into the temporary storage */
+	ptr = dispatcher->head;
+
+	while (ptr != dispatcher->tail) {
+		replay[count++] = dispatcher->cmdqueue[ptr];
+		ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+	}
+
+	/*
+	 * For the purposes of replay, we assume that the oldest command batch
+	 * that hasn't retired a timestamp is "hung".
+	 */
+
+	cmdbatch = replay[0];
+
+	/*
+	 * If FT is disabled for this cmdbatch invalidate immediately
+	 */
+
+	if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
+		test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+		pr_fault(device, cmdbatch, "gpu skipped ctx %d ts %d\n",
+			cmdbatch->context->id, cmdbatch->timestamp);
+
+		adreno_drawctxt_invalidate(device, cmdbatch->context);
+	}
+
+	/*
+	 * Set a flag so we don't print another PM dump if the cmdbatch fails
+	 * again on replay
+	 */
+
+	set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+
+	/*
+	 * A hardware fault generally means something was deterministically
+	 * wrong with the command batch - no point in trying to replay it
+	 * Clear the replay bit and move on to the next policy level
+	 */
+
+	if (fault == ADRENO_HARD_FAULT)
+		clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+
+	/*
+	 * A timeout fault means the IB timed out - clear the policy and
+	 * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
+	 * because we won't see this cmdbatch again
+	 */
+
+	if (fault == ADRENO_TIMEOUT_FAULT)
+		bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+
+	/*
+	 * If the context had a GPU page fault then it is likely it would fault
+	 * again if replayed
+	 */
+
+	if (test_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv)) {
+		/* we'll need to resume the mmu later... */
+		pagefault = true;
+		clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+		clear_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv);
+	}
+
+	/*
+	 * Execute the fault tolerance policy. Each command batch stores the
+	 * current fault policy that was set when it was queued.
+	 * As the options are tried in descending priority
+	 * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
+	 * from the cmdbatch policy so the next thing can be tried if the
+	 * change comes around again
+	 */
+
+	/* Replay the hanging command batch again */
+	if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
+		trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
+		set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+		goto replay;
+	}
+
+	/*
+	 * Skip the last IB1 that was played but replay everything else.
+	 * Note that the last IB1 might not be in the "hung" command batch
+	 * because the CP may have caused a page-fault while it was prefetching
+	 * the next IB1/IB2. walk all outstanding commands and zap the
+	 * supposedly bad IB1 where ever it lurks.
+	 */
+
+	if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
+		trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
+		set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+
+		for (i = 0; i < count; i++) {
+			if (replay[i] != NULL)
+				cmdbatch_skip_ib(replay[i], base);
+		}
+
+		goto replay;
+	}
+
+	if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
+		trace_adreno_cmdbatch_recovery(cmdbatch,
+			BIT(KGSL_FT_SKIPFRAME));
+		set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+
+		/*
+		 * Skip all the pending command batches for this context until
+		 * the EOF frame is seen
+		 */
+		cmdbatch_skip_frame(cmdbatch, replay, count);
+		goto replay;
+	}
+
+	/* If we get here then all the policies failed */
+
+	pr_fault(device, cmdbatch, "gpu failed ctx %d ts %d\n",
+		cmdbatch->context->id, cmdbatch->timestamp);
+
+	/* Invalidate the context */
+	adreno_drawctxt_invalidate(device, cmdbatch->context);
+
+
+replay:
+	/* Reset the dispatcher queue */
+	dispatcher->inflight = 0;
+	dispatcher->head = dispatcher->tail = 0;
+
+	/* Reset the GPU */
+	mutex_lock(&device->mutex);
+
+	/* resume the MMU if it is stalled */
+	if (pagefault && device->mmu.mmu_ops->mmu_pagefault_resume != NULL)
+		device->mmu.mmu_ops->mmu_pagefault_resume(&device->mmu);
+
+	ret = adreno_reset(device);
+	mutex_unlock(&device->mutex);
+
+	/* If adreno_reset() fails then what hope do we have for the future? */
+	BUG_ON(ret);
+
+	/* Remove any pending command batches that have been invalidated */
+	remove_invalidated_cmdbatches(device, replay, count);
+
+	/* Replay the pending command buffers */
+	for (i = 0; i < count; i++) {
+
+		int ret;
+
+		if (replay[i] == NULL)
+			continue;
+
+		/*
+		 * Force the preamble on the first command (if applicable) to
+		 * avoid any strange stage issues
+		 */
+
+		if (first == 0) {
+			set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+			first = 1;
+		}
+
+		/*
+		 * Force each command batch to wait for idle - this avoids weird
+		 * CP parse issues
+		 */
+
+		set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+
+		ret = sendcmd(adreno_dev, replay[i]);
+
+		/*
+		 * If sending the command fails, then try to recover by
+		 * invalidating the context
+		 */
+
+		if (ret) {
+			pr_fault(device, replay[i],
+				"gpu reset failed ctx %d ts %d\n",
+				replay[i]->context->id, replay[i]->timestamp);
+
+			adreno_drawctxt_invalidate(device, replay[i]->context);
+			remove_invalidated_cmdbatches(device, &replay[i],
+				count - i);
+		}
+	}
+
+	mutex_lock(&device->mutex);
+	kgsl_active_count_put(device);
+	mutex_unlock(&device->mutex);
+
+	kfree(replay);
+
+	return 1;
+}
+
+static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+		unsigned int consumed, unsigned int retired)
+{
+	return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
+		(timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+}
+
+static void _print_recovery(struct kgsl_device *device,
+		struct kgsl_cmdbatch *cmdbatch)
+{
+	static struct {
+		unsigned int mask;
+		const char *str;
+	} flags[] = { ADRENO_FT_TYPES };
+
+	int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+	char *result = "unknown";
+
+	for (i = 0; i < ARRAY_SIZE(flags); i++) {
+		if (flags[i].mask == BIT(nr)) {
+			result = (char *) flags[i].str;
+			break;
+		}
+	}
+
+	pr_fault(device, cmdbatch,
+		"gpu %s ctx %d ts %d policy %lX\n",
+		result, cmdbatch->context->id, cmdbatch->timestamp,
+		cmdbatch->fault_recovery);
+}
+
+/**
+ * adreno_dispatcher_work() - Master work handler for the dispatcher
+ * @work: Pointer to the work struct for the current work queue
+ *
+ * Process expired commands and send new ones.
+ */
+static void adreno_dispatcher_work(struct work_struct *work)
+{
+	struct adreno_dispatcher *dispatcher =
+		container_of(work, struct adreno_dispatcher, work);
+	struct adreno_device *adreno_dev =
+		container_of(dispatcher, struct adreno_device, dispatcher);
+	struct kgsl_device *device = &adreno_dev->dev;
+	int count = 0;
+
+	mutex_lock(&dispatcher->mutex);
+
+	while (dispatcher->head != dispatcher->tail) {
+		uint32_t consumed, retired = 0;
+		struct kgsl_cmdbatch *cmdbatch =
+			dispatcher->cmdqueue[dispatcher->head];
+		struct adreno_context *drawctxt;
+		BUG_ON(cmdbatch == NULL);
+
+		drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+
+		/*
+		 * First try to expire the timestamp. This happens if the
+		 * context is valid and the timestamp expired normally or if the
+		 * context was destroyed before the command batch was finished
+		 * in the GPU.  Either way retire the command batch advance the
+		 * pointers and continue processing the queue
+		 */
+
+		if (!kgsl_context_detached(cmdbatch->context))
+			retired = kgsl_readtimestamp(device, cmdbatch->context,
+				KGSL_TIMESTAMP_RETIRED);
+
+		if (kgsl_context_detached(cmdbatch->context) ||
+			(timestamp_cmp(cmdbatch->timestamp, retired) <= 0)) {
+
+			/*
+			 * If the cmdbatch in question had faulted announce its
+			 * successful completion to the world
+			 */
+
+			if (cmdbatch->fault_recovery != 0)
+				_print_recovery(device, cmdbatch);
+
+			trace_adreno_cmdbatch_retired(cmdbatch,
+				dispatcher->inflight - 1);
+
+			/* Reduce the number of inflight command batches */
+			dispatcher->inflight--;
+
+			/* Zero the old entry*/
+			dispatcher->cmdqueue[dispatcher->head] = NULL;
+
+			/* Advance the buffer head */
+			dispatcher->head = CMDQUEUE_NEXT(dispatcher->head,
+				ADRENO_DISPATCH_CMDQUEUE_SIZE);
+
+			/* Destroy the retired command batch */
+			kgsl_cmdbatch_destroy(cmdbatch);
+
+			/* Update the expire time for the next command batch */
+
+			if (dispatcher->inflight > 0) {
+				cmdbatch =
+					dispatcher->cmdqueue[dispatcher->head];
+				cmdbatch->expires = jiffies +
+					msecs_to_jiffies(_cmdbatch_timeout);
+			}
+
+			count++;
+			continue;
+		}
+
+		/*
+		 * If we got a fault from the interrupt handler, this command
+		 * is to blame.  Invalidate it, reset and replay
+		 */
+
+		if (dispatcher_do_fault(device))
+			goto done;
+
+		/* Get the last consumed timestamp */
+		consumed = kgsl_readtimestamp(device, cmdbatch->context,
+			KGSL_TIMESTAMP_CONSUMED);
+
+		/*
+		 * Break here if fault detection is disabled for the context or
+		 * if the long running IB detection is disaled device wide
+		 * Long running command buffers will be allowed to run to
+		 * completion - but badly behaving command buffers (infinite
+		 * shaders etc) can end up running forever.
+		 */
+
+		if (!adreno_dev->long_ib_detect ||
+			drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+			break;
+
+		/*
+		 * The last line of defense is to check if the command batch has
+		 * timed out. If we get this far but the timeout hasn't expired
+		 * yet then the GPU is still ticking away
+		 */
+
+		if (time_is_after_jiffies(cmdbatch->expires))
+			break;
+
+		/* Boom goes the dynamite */
+
+		pr_fault(device, cmdbatch,
+			"gpu timeout ctx %d ts %d\n",
+			cmdbatch->context->id, cmdbatch->timestamp);
+
+		adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
+
+		dispatcher_do_fault(device);
+		break;
+	}
+
+	/*
+	 * Decrement the active count to 0 - this will allow the system to go
+	 * into suspend even if there are queued command batches
+	 */
+
+	if (count && dispatcher->inflight == 0) {
+		mutex_lock(&device->mutex);
+		kgsl_active_count_put(device);
+		mutex_unlock(&device->mutex);
+	}
+
+	/* Dispatch new commands if we have the room */
+	if (dispatcher->inflight < _dispatcher_inflight)
+		_adreno_dispatcher_issuecmds(adreno_dev);
+
+done:
+	/* Either update the timer for the next command batch or disable it */
+	if (dispatcher->inflight) {
+		struct kgsl_cmdbatch *cmdbatch
+			= dispatcher->cmdqueue[dispatcher->head];
+
+		/* Update the timeout timer for the next command batch */
+		mod_timer(&dispatcher->timer, cmdbatch->expires);
+	} else {
+		del_timer_sync(&dispatcher->timer);
+		del_timer_sync(&dispatcher->fault_timer);
+	}
+
+	/* Before leaving update the pwrscale information */
+	mutex_lock(&device->mutex);
+	kgsl_pwrscale_idle(device);
+	mutex_unlock(&device->mutex);
+
+	mutex_unlock(&dispatcher->mutex);
+}
+
+void adreno_dispatcher_schedule(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	queue_work(device->work_queue, &dispatcher->work);
+}
+
+/**
+ * adreno_dispatcher_queue_context() - schedule a drawctxt in the dispatcher
+ * device: pointer to the KGSL device
+ * drawctxt: pointer to the drawctxt to schedule
+ *
+ * Put a draw context on the dispatcher pending queue and schedule the
+ * dispatcher. This is used to reschedule changes that might have been blocked
+ * for sync points or other concerns
+ */
+void adreno_dispatcher_queue_context(struct kgsl_device *device,
+	struct adreno_context *drawctxt)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	dispatcher_queue_context(adreno_dev, drawctxt);
+	adreno_dispatcher_schedule(device);
+}
+
+/*
+ * This is called on a regular basis while command batches are inflight.  Fault
+ * detection registers are read and compared to the existing values - if they
+ * changed then the GPU is still running.  If they are the same between
+ * subsequent calls then the GPU may have faulted
+ */
+
+void adreno_dispatcher_fault_timer(unsigned long data)
+{
+	struct adreno_device *adreno_dev = (struct adreno_device *) data;
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	/* Leave if the user decided to turn off fast hang detection */
+	if (adreno_dev->fast_hang_detect == 0)
+		return;
+
+	if (adreno_gpu_fault(adreno_dev)) {
+		adreno_dispatcher_schedule(device);
+		return;
+	}
+
+	/*
+	 * Read the fault registers - if it returns 0 then they haven't changed
+	 * so mark the dispatcher as faulted and schedule the work loop.
+	 */
+
+	if (!fault_detect_read_compare(device)) {
+		adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
+		adreno_dispatcher_schedule(device);
+	} else {
+		mod_timer(&dispatcher->fault_timer,
+			jiffies + msecs_to_jiffies(_fault_timer_interval));
+	}
+}
+
+/*
+ * This is called when the timer expires - it either means the GPU is hung or
+ * the IB is taking too long to execute
+ */
+void adreno_dispatcher_timer(unsigned long data)
+{
+	struct adreno_device *adreno_dev = (struct adreno_device *) data;
+	struct kgsl_device *device = &adreno_dev->dev;
+
+	adreno_dispatcher_schedule(device);
+}
+/**
+ * adreno_dispatcher_irq_fault() - Trigger a fault in the dispatcher
+ * @device: Pointer to the KGSL device
+ *
+ * Called from an interrupt context this will trigger a fault in the
+ * dispatcher for the oldest pending command batch
+ */
+void adreno_dispatcher_irq_fault(struct kgsl_device *device)
+{
+	adreno_set_gpu_fault(ADRENO_DEVICE(device), ADRENO_HARD_FAULT);
+	adreno_dispatcher_schedule(device);
+}
+
+/**
+ * adreno_dispatcher_pause() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Pause the dispather so it doesn't accept any new commands
+ */
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	/*
+	 * This will probably get called while holding other mutexes so don't
+	 * take the dispatcher mutex.  The biggest penalty is that another
+	 * command might be submitted while we are in here but thats okay
+	 * because whoever is waiting for the drain will just have another
+	 * command batch to wait for
+	 */
+
+	dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_start() - activate the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Set the disaptcher active and start the loop once to get things going
+ */
+void adreno_dispatcher_start(struct adreno_device *adreno_dev)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+	/* Schedule the work loop to get things going */
+	adreno_dispatcher_schedule(&adreno_dev->dev);
+}
+
+/**
+ * adreno_dispatcher_stop() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Stop the dispatcher and close all the timers
+ */
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	del_timer_sync(&dispatcher->timer);
+	del_timer_sync(&dispatcher->fault_timer);
+
+	dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_close() - close the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Close the dispatcher and free all the oustanding commands and memory
+ */
+void adreno_dispatcher_close(struct adreno_device *adreno_dev)
+{
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	mutex_lock(&dispatcher->mutex);
+	del_timer_sync(&dispatcher->timer);
+	del_timer_sync(&dispatcher->fault_timer);
+
+	while (dispatcher->head != dispatcher->tail) {
+		kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
+		dispatcher->head = (dispatcher->head + 1)
+			% ADRENO_DISPATCH_CMDQUEUE_SIZE;
+	}
+
+	mutex_unlock(&dispatcher->mutex);
+
+	kobject_put(&dispatcher->kobj);
+}
+
+struct dispatcher_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct adreno_dispatcher *,
+			struct dispatcher_attribute *, char *);
+	ssize_t (*store)(struct adreno_dispatcher *,
+			struct dispatcher_attribute *, const char *buf,
+			size_t count);
+	unsigned int max;
+	unsigned int *value;
+};
+
+#define DISPATCHER_UINT_ATTR(_name, _mode, _max, _value) \
+	struct dispatcher_attribute dispatcher_attr_##_name =  { \
+		.attr = { .name = __stringify(_name), .mode = _mode }, \
+		.show = _show_uint, \
+		.store = _store_uint, \
+		.max = _max, \
+		.value = &(_value), \
+	}
+
+#define to_dispatcher_attr(_a) \
+	container_of((_a), struct dispatcher_attribute, attr)
+#define to_dispatcher(k) container_of(k, struct adreno_dispatcher, kobj)
+
+static ssize_t _store_uint(struct adreno_dispatcher *dispatcher,
+		struct dispatcher_attribute *attr,
+		const char *buf, size_t size)
+{
+	unsigned long val;
+	int ret = kstrtoul(buf, 0, &val);
+
+	if (ret)
+		return ret;
+
+	if (!val || (attr->max && (val > attr->max)))
+		return -EINVAL;
+
+	*((unsigned int *) attr->value) = val;
+	return size;
+}
+
+static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
+		struct dispatcher_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		*((unsigned int *) attr->value));
+}
+
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+	_dispatcher_inflight);
+/*
+ * Our code that "puts back" a command from the context is much cleaner
+ * if we are sure that there will always be enough room in the
+ * ringbuffer so restrict the maximum size of the context queue to
+ * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ */
+static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
+	ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
+	_context_cmdbatch_burst);
+static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, _cmdbatch_timeout);
+static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
+static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
+	_fault_timer_interval);
+
+static struct attribute *dispatcher_attrs[] = {
+	&dispatcher_attr_inflight.attr,
+	&dispatcher_attr_context_cmdqueue_size.attr,
+	&dispatcher_attr_context_burst_count.attr,
+	&dispatcher_attr_cmdbatch_timeout.attr,
+	&dispatcher_attr_context_queue_wait.attr,
+	&dispatcher_attr_fault_detect_interval.attr,
+	NULL,
+};
+
+static ssize_t dispatcher_sysfs_show(struct kobject *kobj,
+				   struct attribute *attr, char *buf)
+{
+	struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+	struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (pattr->show)
+		ret = pattr->show(dispatcher, pattr, buf);
+
+	return ret;
+}
+
+static ssize_t dispatcher_sysfs_store(struct kobject *kobj,
+				    struct attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+	struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (pattr->store)
+		ret = pattr->store(dispatcher, pattr, buf, count);
+
+	return ret;
+}
+
+static void dispatcher_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops dispatcher_sysfs_ops = {
+	.show = dispatcher_sysfs_show,
+	.store = dispatcher_sysfs_store
+};
+
+static struct kobj_type ktype_dispatcher = {
+	.sysfs_ops = &dispatcher_sysfs_ops,
+	.default_attrs = dispatcher_attrs,
+	.release = dispatcher_sysfs_release
+};
+
+/**
+ * adreno_dispatcher_init() - Initialize the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Initialize the dispatcher
+ */
+int adreno_dispatcher_init(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+	int ret;
+
+	memset(dispatcher, 0, sizeof(*dispatcher));
+
+	mutex_init(&dispatcher->mutex);
+
+	setup_timer(&dispatcher->timer, adreno_dispatcher_timer,
+		(unsigned long) adreno_dev);
+
+	setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
+		(unsigned long) adreno_dev);
+
+	INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+
+	plist_head_init(&dispatcher->pending);
+	spin_lock_init(&dispatcher->plist_lock);
+
+	dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+	ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
+		&device->dev->kobj, "dispatch");
+
+	return ret;
+}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index bf173a7..907d41f 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -13,10 +13,12 @@
 
 #include <linux/slab.h>
 #include <linux/msm_kgsl.h>
+#include <linux/sched.h>
 
 #include "kgsl.h"
 #include "kgsl_sharedmem.h"
 #include "adreno.h"
+#include "adreno_trace.h"
 
 #define KGSL_INIT_REFTIMESTAMP		0x7FFFFFFF
 
@@ -132,6 +134,247 @@
 	*incmd = cmd;
 }
 
+static void wait_callback(struct kgsl_device *device, void *priv, u32 id,
+		u32 timestamp, u32 type)
+{
+	struct adreno_context *drawctxt = priv;
+	wake_up_interruptible_all(&drawctxt->waiting);
+}
+
+#define adreno_wait_event_interruptible_timeout(wq, condition, timeout, io)   \
+({                                                                            \
+	long __ret = timeout;                                                 \
+	if (io)                                                               \
+		__wait_io_event_interruptible_timeout(wq, condition, __ret);  \
+	else                                                                  \
+		__wait_event_interruptible_timeout(wq, condition, __ret);     \
+	__ret;                                                                \
+})
+
+#define adreno_wait_event_interruptible(wq, condition, io)                    \
+({                                                                            \
+	long __ret;                                                           \
+	if (io)                                                               \
+		__wait_io_event_interruptible(wq, condition, __ret);          \
+	else                                                                  \
+		__wait_event_interruptible(wq, condition, __ret);             \
+	__ret;                                                                \
+})
+
+static int _check_context_timestamp(struct kgsl_device *device,
+		struct adreno_context *drawctxt, unsigned int timestamp)
+{
+	int ret = 0;
+
+	/* Bail if the drawctxt has been invalidated or destroyed */
+	if (kgsl_context_detached(&drawctxt->base) ||
+		drawctxt->state != ADRENO_CONTEXT_STATE_ACTIVE)
+		return 1;
+
+	mutex_lock(&device->mutex);
+	ret = kgsl_check_timestamp(device, &drawctxt->base, timestamp);
+	mutex_unlock(&device->mutex);
+
+	return ret;
+}
+
+/**
+ * adreno_drawctxt_wait() - sleep until a timestamp expires
+ * @adreno_dev: pointer to the adreno_device struct
+ * @drawctxt: Pointer to the draw context to sleep for
+ * @timetamp: Timestamp to wait on
+ * @timeout: Number of jiffies to wait (0 for infinite)
+ *
+ * Register an event to wait for a timestamp on a context and sleep until it
+ * has past.  Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
+ * on success
+ */
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+		struct kgsl_context *context,
+		uint32_t timestamp, unsigned int timeout)
+{
+	static unsigned int io_cnt;
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+	int ret, io;
+
+	if (kgsl_context_detached(context))
+		return -EINVAL;
+
+	if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+		return -EDEADLK;
+
+	/* Needs to hold the device mutex */
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	trace_adreno_drawctxt_wait_start(context->id, timestamp);
+
+	ret = kgsl_add_event(device, context->id, timestamp,
+		wait_callback, drawctxt, NULL);
+	if (ret)
+		goto done;
+
+	/*
+	 * For proper power accounting sometimes we need to call
+	 * io_wait_interruptible_timeout and sometimes we need to call
+	 * plain old wait_interruptible_timeout. We call the regular
+	 * timeout N times out of 100, where N is a number specified by
+	 * the current power level
+	 */
+
+	io_cnt = (io_cnt + 1) % 100;
+	io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+		? 0 : 1;
+
+	mutex_unlock(&device->mutex);
+
+	if (timeout) {
+		ret = (int) adreno_wait_event_interruptible_timeout(
+			drawctxt->waiting,
+			_check_context_timestamp(device, drawctxt, timestamp),
+			msecs_to_jiffies(timeout), io);
+
+		if (ret == 0)
+			ret = -ETIMEDOUT;
+		else if (ret > 0)
+			ret = 0;
+	} else {
+		ret = (int) adreno_wait_event_interruptible(drawctxt->waiting,
+			_check_context_timestamp(device, drawctxt, timestamp),
+				io);
+	}
+
+	mutex_lock(&device->mutex);
+
+	/* -EDEADLK if the context was invalidated while we were waiting */
+	if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+		ret = -EDEADLK;
+
+
+	/* Return -EINVAL if the context was detached while we were waiting */
+	if (kgsl_context_detached(context))
+		ret = -EINVAL;
+
+done:
+	trace_adreno_drawctxt_wait_done(context->id, timestamp, ret);
+	return ret;
+}
+
+static void global_wait_callback(struct kgsl_device *device, void *priv, u32 id,
+		u32 timestamp, u32 type)
+{
+	struct adreno_context *drawctxt = priv;
+
+	wake_up_interruptible_all(&drawctxt->waiting);
+	kgsl_context_put(&drawctxt->base);
+}
+
+static int _check_global_timestamp(struct kgsl_device *device,
+		unsigned int timestamp)
+{
+	int ret;
+
+	mutex_lock(&device->mutex);
+	ret = kgsl_check_timestamp(device, NULL, timestamp);
+	mutex_unlock(&device->mutex);
+
+	return ret;
+}
+
+int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev,
+		struct kgsl_context *context,
+		uint32_t timestamp, unsigned int timeout)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+	int ret;
+
+	/* Needs to hold the device mutex */
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	_kgsl_context_get(context);
+
+	trace_adreno_drawctxt_wait_start(KGSL_MEMSTORE_GLOBAL, timestamp);
+
+	ret = kgsl_add_event(device, KGSL_MEMSTORE_GLOBAL, timestamp,
+		global_wait_callback, drawctxt, NULL);
+	if (ret) {
+		kgsl_context_put(context);
+		goto done;
+	}
+
+	mutex_unlock(&device->mutex);
+
+	if (timeout) {
+		ret = (int) wait_event_interruptible_timeout(drawctxt->waiting,
+			_check_global_timestamp(device, timestamp),
+			msecs_to_jiffies(timeout));
+
+		if (ret == 0)
+			ret = -ETIMEDOUT;
+		else if (ret > 0)
+			ret = 0;
+	} else {
+		ret = (int) wait_event_interruptible(drawctxt->waiting,
+			_check_global_timestamp(device, timestamp));
+	}
+
+	mutex_lock(&device->mutex);
+
+	if (ret)
+		kgsl_cancel_events_timestamp(device, NULL, timestamp);
+
+done:
+	trace_adreno_drawctxt_wait_done(KGSL_MEMSTORE_GLOBAL, timestamp, ret);
+	return ret;
+}
+
+/**
+ * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @context: Pointer to the KGSL context structure
+ *
+ * Invalidate the context and remove all queued commands and cancel any pending
+ * waiters
+ */
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+		struct kgsl_context *context)
+{
+	struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+
+	trace_adreno_drawctxt_invalidate(drawctxt);
+
+	drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
+
+	/* Clear the pending queue */
+	mutex_lock(&drawctxt->mutex);
+
+	while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+		struct kgsl_cmdbatch *cmdbatch =
+			drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+		drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+			ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+		mutex_unlock(&drawctxt->mutex);
+
+		mutex_lock(&device->mutex);
+		kgsl_cancel_events_timestamp(device, context,
+			cmdbatch->timestamp);
+		mutex_unlock(&device->mutex);
+
+		kgsl_cmdbatch_destroy(cmdbatch);
+		mutex_lock(&drawctxt->mutex);
+	}
+
+	mutex_unlock(&drawctxt->mutex);
+
+	/* Give the bad news to everybody waiting around */
+	wake_up_interruptible_all(&drawctxt->waiting);
+	wake_up_interruptible_all(&drawctxt->wq);
+}
+
 /**
  * adreno_drawctxt_create - create a new adreno draw context
  * @dev_priv: the owner of the context
@@ -149,6 +392,7 @@
 	int ret;
 
 	drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
+
 	if (drawctxt == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -168,22 +412,30 @@
 		KGSL_CONTEXT_NO_FAULT_TOLERANCE |
 		KGSL_CONTEXT_TYPE_MASK);
 
+	/* Always enable per-context timestamps */
+	*flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
+	drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
+
 	if (*flags & KGSL_CONTEXT_PREAMBLE)
 		drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
 
 	if (*flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
 		drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
 
-	if (*flags & KGSL_CONTEXT_PER_CONTEXT_TS)
-		drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
-
-	if (*flags & KGSL_CONTEXT_USER_GENERATED_TS) {
-		if (!(*flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
-			ret = -EINVAL;
-			goto err;
-		}
+	if (*flags & KGSL_CONTEXT_USER_GENERATED_TS)
 		drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
-	}
+
+	mutex_init(&drawctxt->mutex);
+	init_waitqueue_head(&drawctxt->wq);
+	init_waitqueue_head(&drawctxt->waiting);
+
+	/*
+	 * Set up the plist node for the dispatcher.  For now all contexts have
+	 * the same priority, but later the priority will be set at create time
+	 * by the user
+	 */
+
+	plist_node_init(&drawctxt->pending, ADRENO_CONTEXT_DEFAULT_PRIORITY);
 
 	if (*flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
 		drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
@@ -196,12 +448,6 @@
 		goto err;
 
 	kgsl_sharedmem_writel(device, &device->memstore,
-			KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ref_wait_ts),
-			KGSL_INIT_REFTIMESTAMP);
-	kgsl_sharedmem_writel(device, &device->memstore,
-			KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ts_cmp_enable),
-			0);
-	kgsl_sharedmem_writel(device, &device->memstore,
 			KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
 			0);
 	kgsl_sharedmem_writel(device, &device->memstore,
@@ -215,22 +461,39 @@
 }
 
 /**
+ * adreno_drawctxt_sched() - Schedule a previously blocked context
+ * @device: pointer to a KGSL device
+ * @drawctxt: drawctxt to rechedule
+ *
+ * This function is called by the core when it knows that a previously blocked
+ * context has been unblocked.  The default adreno response is to reschedule the
+ * context on the dispatcher
+ */
+void adreno_drawctxt_sched(struct kgsl_device *device,
+		struct kgsl_context *context)
+{
+	adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
+}
+
+/**
  * adreno_drawctxt_detach(): detach a context from the GPU
  * @context: Generic KGSL context container for the context
  *
  */
-void adreno_drawctxt_detach(struct kgsl_context *context)
+int adreno_drawctxt_detach(struct kgsl_context *context)
 {
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
 	struct adreno_context *drawctxt;
+	int ret;
 
 	if (context == NULL)
-		return;
+		return 0;
 
 	device = context->device;
 	adreno_dev = ADRENO_DEVICE(device);
 	drawctxt = ADRENO_CONTEXT(context);
+
 	/* deactivate context */
 	if (adreno_dev->drawctxt_active == drawctxt) {
 		/* no need to save GMEM or shader, the context is
@@ -246,11 +509,39 @@
 		adreno_drawctxt_switch(adreno_dev, NULL, 0);
 	}
 
-	if (device->state != KGSL_STATE_HUNG)
-		adreno_idle(device);
+	mutex_lock(&drawctxt->mutex);
+
+	while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+		struct kgsl_cmdbatch *cmdbatch =
+			drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+		drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+			ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+		mutex_unlock(&drawctxt->mutex);
+
+		/*
+		 * Don't hold the drawctxt mutex while the cmdbatch is being
+		 * destroyed because the cmdbatch destroy takes the device
+		 * mutex and the world falls in on itself
+		 */
+
+		kgsl_cmdbatch_destroy(cmdbatch);
+		mutex_lock(&drawctxt->mutex);
+	}
+
+	mutex_unlock(&drawctxt->mutex);
+
+	/* Wait for the last global timestamp to pass before continuing */
+	ret = adreno_drawctxt_wait_global(adreno_dev, context,
+		drawctxt->internal_timestamp, 10 * 1000);
+
+	adreno_profile_process_results(device);
 
 	kgsl_sharedmem_free(&drawctxt->gpustate);
 	kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
+
+	return ret;
 }
 
 
@@ -294,11 +585,12 @@
  * Switch the current draw context
  */
 
-void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
 				struct adreno_context *drawctxt,
 				unsigned int flags)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
+	int ret = 0;
 
 	if (drawctxt) {
 		if (flags & KGSL_CONTEXT_SAVE_GMEM)
@@ -314,18 +606,24 @@
 	if (adreno_dev->drawctxt_active == drawctxt) {
 		if (adreno_dev->gpudev->ctxt_draw_workaround &&
 			adreno_is_a225(adreno_dev))
-				adreno_dev->gpudev->ctxt_draw_workaround(
+				ret = adreno_dev->gpudev->ctxt_draw_workaround(
 					adreno_dev, drawctxt);
-		return;
+		return ret;
 	}
 
-	KGSL_CTXT_INFO(device, "from %d to %d flags %d\n",
-		adreno_dev->drawctxt_active ?
-		adreno_dev->drawctxt_active->base.id : 0,
-		drawctxt ? drawctxt->base.id : 0, flags);
+	trace_adreno_drawctxt_switch(adreno_dev->drawctxt_active,
+		drawctxt, flags);
 
 	/* Save the old context */
-	adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active);
+	ret = adreno_dev->gpudev->ctxt_save(adreno_dev,
+		adreno_dev->drawctxt_active);
+
+	if (ret) {
+		KGSL_DRV_ERR(device,
+			"Error in GPU context %d save: %d\n",
+			adreno_dev->drawctxt_active->base.id, ret);
+		return ret;
+	}
 
 	/* Put the old instance of the active drawctxt */
 	if (adreno_dev->drawctxt_active) {
@@ -338,6 +636,14 @@
 		_kgsl_context_get(&drawctxt->base);
 
 	/* Set the new context */
-	adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+	ret = adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+	if (ret) {
+		KGSL_DRV_ERR(device,
+			"Error in GPU context %d restore: %d\n",
+			drawctxt->base.id, ret);
+		return ret;
+	}
+
 	adreno_dev->drawctxt_active = drawctxt;
+	return 0;
 }
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 88d1b8c..5c12676 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -54,6 +54,8 @@
 #define CTXT_FLAGS_SKIP_EOF             BIT(15)
 /* Context no fault tolerance */
 #define CTXT_FLAGS_NO_FAULT_TOLERANCE  BIT(16)
+/* Force the preamble for the next submission */
+#define CTXT_FLAGS_FORCE_PREAMBLE      BIT(17)
 
 /* Symbolic table for the adreno draw context type */
 #define ADRENO_DRAWCTXT_TYPES \
@@ -61,7 +63,20 @@
 	{ KGSL_CONTEXT_TYPE_GL, "GL" }, \
 	{ KGSL_CONTEXT_TYPE_CL, "CL" }, \
 	{ KGSL_CONTEXT_TYPE_C2D, "C2D" }, \
-	{ KGSL_CONTEXT_TYPE_RS, "RS" }
+	{ KGSL_CONTEXT_TYPE_RS, "RS" }, \
+	{ KGSL_CONTEXT_TYPE_UNKNOWN, "UNKNOWN" }
+
+struct adreno_context_type {
+	unsigned int type;
+	const char *str;
+};
+
+#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+
+#define ADRENO_CONTEXT_DEFAULT_PRIORITY 1
+
+#define ADRENO_CONTEXT_STATE_ACTIVE 0
+#define ADRENO_CONTEXT_STATE_INVALID 1
 
 struct kgsl_device;
 struct adreno_device;
@@ -93,18 +108,58 @@
 	struct kgsl_memdesc quad_vertices_restore;
 };
 
+/**
+ * struct adreno_context - Adreno GPU draw context
+ * @id: Unique integer ID of the context
+ * @timestamp: Last issued context-specific timestamp
+ * @internal_timestamp: Global timestamp of the last issued command
+ * @state: Current state of the context
+ * @flags: Bitfield controlling behavior of the context
+ * @type: Context type (GL, CL, RS)
+ * @mutex: Mutex to protect the cmdqueue
+ * @pagetable: Pointer to the GPU pagetable for the context
+ * @gpustate: Pointer to the GPU scratch memory for context save/restore
+ * @reg_restore: Command buffer for restoring context registers
+ * @shader_save: Command buffer for saving shaders
+ * @shader_restore: Command buffer to restore shaders
+ * @context_gmem_shadow: GMEM shadow structure for save/restore
+ * @reg_save: A2XX command buffer to save context registers
+ * @shader_fixup: A2XX command buffer to "fix" shaders on restore
+ * @chicken_restore: A2XX command buffer to "fix" register restore
+ * @bin_base_offset: Saved value of the A2XX BIN_BASE_OFFSET register
+ * @regconstant_save: A3XX command buffer to save some registers
+ * @constant_retore: A3XX command buffer to restore some registers
+ * @hslqcontrol_restore: A3XX command buffer to restore HSLSQ registers
+ * @save_fixup: A3XX command buffer to "fix" register save
+ * @restore_fixup: A3XX cmmand buffer to restore register save fixes
+ * @shader_load_commands: A3XX GPU memory descriptor for shader load IB
+ * @shader_save_commands: A3XX GPU memory descriptor for shader save IB
+ * @constantr_save_commands: A3XX GPU memory descriptor for constant save IB
+ * @constant_load_commands: A3XX GPU memory descriptor for constant load IB
+ * @cond_execs: A3XX GPU memory descriptor for conditional exec IB
+ * @hlsq_restore_commands: A3XX GPU memory descriptor for HLSQ restore IB
+ * @cmdqueue: Queue of command batches waiting to be dispatched for this context
+ * @cmdqueue_head: Head of the cmdqueue queue
+ * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @pending: Priority list node for the dispatcher list of pending contexts
+ * @wq: Workqueue structure for contexts to sleep pending room in the queue
+ * @waiting: Workqueue structure for contexts waiting for a timestamp or event
+ * @queued: Number of commands queued in the cmdqueue
+ */
 struct adreno_context {
 	struct kgsl_context base;
 	unsigned int ib_gpu_time_used;
 	unsigned int timestamp;
+	unsigned int internal_timestamp;
+	int state;
 	uint32_t flags;
 	unsigned int type;
+	struct mutex mutex;
 	struct kgsl_memdesc gpustate;
 	unsigned int reg_restore[3];
 	unsigned int shader_save[3];
 	unsigned int shader_restore[3];
 
-	/* Information of the GMEM shadow that is created in context create */
 	struct gmem_shadow_t context_gmem_shadow;
 
 	/* A2XX specific items */
@@ -125,23 +180,44 @@
 	struct kgsl_memdesc constant_load_commands[3];
 	struct kgsl_memdesc cond_execs[4];
 	struct kgsl_memdesc hlsqcontrol_restore_commands[1];
+
+	/* Dispatcher */
+	struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+	int cmdqueue_head;
+	int cmdqueue_tail;
+
+	struct plist_node pending;
+	wait_queue_head_t wq;
+	wait_queue_head_t waiting;
+
+	int queued;
 };
 
 
 struct kgsl_context *adreno_drawctxt_create(struct kgsl_device_private *,
 			uint32_t *flags);
 
-void adreno_drawctxt_detach(struct kgsl_context *context);
+int adreno_drawctxt_detach(struct kgsl_context *context);
 
 void adreno_drawctxt_destroy(struct kgsl_context *context);
 
-void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+void adreno_drawctxt_sched(struct kgsl_device *device,
+		struct kgsl_context *context);
+
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
 				struct adreno_context *drawctxt,
 				unsigned int flags);
 void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
 					struct kgsl_context *context,
 					unsigned int offset);
 
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+		struct kgsl_context *context,
+		uint32_t timestamp, unsigned int timeout);
+
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+		struct kgsl_context *context);
+
 /* GPU context switch helper functions */
 
 void build_quad_vtxbuff(struct adreno_context *drawctxt,
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 7a070a6..8fb2830 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -22,6 +22,7 @@
 #include "adreno_ringbuffer.h"
 #include "kgsl_cffdump.h"
 #include "kgsl_pwrctrl.h"
+#include "adreno_trace.h"
 
 #include "a2xx_reg.h"
 #include "a3xx_reg.h"
@@ -79,6 +80,8 @@
 	{KGSL_CMD_INTERNAL_IDENTIFIER,		"CMD__INT"},
 	{KGSL_START_OF_IB_IDENTIFIER,		"IB_START"},
 	{KGSL_END_OF_IB_IDENTIFIER,		"IB___END"},
+	{KGSL_START_OF_PROFILE_IDENTIFIER,	"PRO_STRT"},
+	{KGSL_END_OF_PROFILE_IDENTIFIER,	"PRO__END"},
 };
 
 static uint32_t adreno_is_pm4_len(uint32_t word)
@@ -393,8 +396,8 @@
 
 int adreno_dump(struct kgsl_device *device, int manual)
 {
-	unsigned int cp_ib1_base, cp_ib1_bufsz;
-	unsigned int cp_ib2_base, cp_ib2_bufsz;
+	unsigned int cp_ib1_base;
+	unsigned int cp_ib2_base;
 	phys_addr_t pt_base, cur_pt_base;
 	unsigned int cp_rb_base, cp_rb_ctrl, rb_count;
 	unsigned int cp_rb_wptr, cp_rb_rptr;
@@ -407,7 +410,6 @@
 	unsigned int ts_processed = 0xdeaddead;
 	struct kgsl_context *context;
 	unsigned int context_id;
-	unsigned int rbbm_status;
 
 	static struct ib_list ib_list;
 
@@ -417,16 +419,10 @@
 
 	mb();
 
-	if (device->pm_dump_enable) {
-		msm_clk_dump_debug_info();
+	msm_clk_dump_debug_info();
 
-		if (adreno_dev->gpudev->postmortem_dump)
-			adreno_dev->gpudev->postmortem_dump(adreno_dev);
-	}
-
-	kgsl_regread(device,
-			adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS),
-			&rbbm_status);
+	if (adreno_dev->gpudev->postmortem_dump)
+		adreno_dev->gpudev->postmortem_dump(adreno_dev);
 
 	pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
 	cur_pt_base = pt_base;
@@ -448,26 +444,8 @@
 		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
 		&cp_ib1_base);
 	kgsl_regread(device,
-		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ),
-		&cp_ib1_bufsz);
-	kgsl_regread(device,
 		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
 		&cp_ib2_base);
-	kgsl_regread(device,
-		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
-		&cp_ib2_bufsz);
-
-	/* If postmortem dump is not enabled, dump minimal set and return */
-	if (!device->pm_dump_enable) {
-
-		KGSL_LOG_DUMP(device,
-			"STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
-			" | RPTR: %04X | WPTR: %04X\n",
-			rbbm_status,  cp_ib1_base, cp_ib1_bufsz, cp_ib2_base,
-			cp_ib2_bufsz, cp_rb_rptr, cp_rb_wptr);
-
-		return 0;
-	}
 
 	kgsl_sharedmem_readl(&device->memstore,
 			(unsigned int *) &context_id,
@@ -642,5 +620,9 @@
 error_vfree:
 	vfree(rb_copy);
 end:
+	/* Restart the dispatcher after a manually triggered dump */
+	if (manual)
+		adreno_dispatcher_start(adreno_dev);
+
 	return result;
 }
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
new file mode 100644
index 0000000..896b6e8
--- /dev/null
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -0,0 +1,1161 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+
+#include "adreno.h"
+#include "adreno_profile.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#define ASSIGNS_STR_FORMAT "%.8s:%u "
+
+/*
+ * Raw Data for processing later:
+ *        : 3 - timestamp, count, context id
+ * [per counter] - data for each counter
+ *        : 1 - Register offset
+ *        : 2 - Pre IB register hi/lo value
+ *        : 2 - Post IB register hi/lo value
+ * [per counter end]
+ */
+#define SIZE_DATA(cnt) (3 + (cnt) * 5)
+
+/*
+ * Pre-IB command size (in dwords):
+ *        : 2 - NOP start identifier
+ *        : 3 - timestamp
+ *        : 3 - count
+ *        : 3 - context id
+ * [loop count start] - for each counter to watch
+ *        : 3 - Register offset
+ *        : 3 - Register read lo
+ *        : 3 - Register read high
+ * [loop end]
+ *        : 2 - NOP end identifier
+ */
+#define SIZE_PREIB(cnt) (13 + (cnt) * 9)
+
+/*
+ * Post-IB command size (in dwords):
+ *        : 2 - NOP start identifier
+ * [loop count start] - for each counter to watch
+ *        : 3 - Register read lo
+ *        : 3 - Register read high
+ * [loop end]
+ *        : 2 - NOP end identifier
+ */
+#define SIZE_POSTIB(cnt) (4 + (cnt) * 6)
+
+/* Counter data + Pre size + post size = total size */
+#define SIZE_SHARED_ENTRY(cnt) (SIZE_DATA(cnt) + SIZE_PREIB(cnt) \
+		+ SIZE_POSTIB(cnt))
+
+/*
+ * Space for following string :"%u %u %u %.5s %u "
+ * [count iterations]: "%.8s:%u %llu %llu%c"
+ */
+#define SIZE_PIPE_ENTRY(cnt) (50 + (cnt) * 62)
+#define SIZE_LOG_ENTRY(cnt) (5 + (cnt) * 5)
+
+static struct adreno_context_type ctxt_type_table[] = {ADRENO_DRAWCTXT_TYPES};
+
+static const char *get_api_type_str(unsigned int type)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(ctxt_type_table) - 1; i++) {
+		if (ctxt_type_table[i].type == type)
+			break;
+	}
+	return ctxt_type_table[i].str;
+}
+
+static inline void _create_ib_ref(struct kgsl_memdesc *memdesc,
+		unsigned int *cmd, unsigned int cnt, unsigned int off)
+{
+	cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
+	cmd[1] = memdesc->gpuaddr + off;
+	cmd[2] = cnt;
+}
+
+#define IB_START(cmd) do { \
+		*cmd++ = cp_nop_packet(1); \
+		*cmd++ = KGSL_START_OF_PROFILE_IDENTIFIER; \
+	} while (0);
+
+#define IB_END(cmd) do { \
+		*cmd++ = cp_nop_packet(1); \
+		*cmd++ = KGSL_END_OF_PROFILE_IDENTIFIER; \
+	} while (0);
+
+#define IB_CMD(cmd, type, val1, val2, off) do { \
+		*cmd++ = cp_type3_packet(type, 2); \
+		*cmd++ = val1; \
+		*cmd++ = val2; \
+		off += sizeof(unsigned int); \
+	} while (0);
+
+static void _build_pre_ib_cmds(struct adreno_profile *profile,
+		unsigned int *rbcmds, unsigned int head,
+		unsigned int timestamp, unsigned int ctxt_id)
+{
+	struct adreno_profile_assigns_list *entry;
+	unsigned int *start, *ibcmds;
+	unsigned int count = profile->assignment_count;
+	unsigned int gpuaddr = profile->shared_buffer.gpuaddr;
+	unsigned int ib_offset = head + SIZE_DATA(count);
+	unsigned int data_offset = head * sizeof(unsigned int);
+
+	ibcmds = ib_offset + ((unsigned int *) profile->shared_buffer.hostptr);
+	start = ibcmds;
+
+	/* start of profile identifier */
+	IB_START(ibcmds);
+
+	/* timestamp */
+	IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+			timestamp, data_offset);
+
+	/* count:  number of perf counters pairs GPU will write */
+	IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+			profile->assignment_count, data_offset);
+
+	/* context id */
+	IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+			ctxt_id, data_offset);
+
+	/* loop for each countable assigned */
+	list_for_each_entry(entry, &profile->assignments_list, list) {
+		IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+				entry->offset, data_offset);
+		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
+				gpuaddr + data_offset, data_offset);
+		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+				gpuaddr + data_offset, data_offset);
+
+		/* skip over post_ib counter data */
+		data_offset += sizeof(unsigned int) * 2;
+	}
+
+	/* end of profile identifier */
+	IB_END(ibcmds);
+
+	_create_ib_ref(&profile->shared_buffer, rbcmds,
+			ibcmds - start, ib_offset * sizeof(unsigned int));
+}
+
+static void _build_post_ib_cmds(struct adreno_profile *profile,
+		unsigned int *rbcmds, unsigned int head)
+{
+	struct adreno_profile_assigns_list *entry;
+	unsigned int *start, *ibcmds;
+	unsigned int count = profile->assignment_count;
+	unsigned int gpuaddr =  profile->shared_buffer.gpuaddr;
+	unsigned int ib_offset = head + SIZE_DATA(count) + SIZE_PREIB(count);
+	unsigned int data_offset = head * sizeof(unsigned int);
+
+	ibcmds = ib_offset + ((unsigned int *) profile->shared_buffer.hostptr);
+	start = ibcmds;
+	/* end of profile identifier */
+	IB_END(ibcmds);
+
+	/* skip over pre_ib preamble */
+	data_offset += sizeof(unsigned int) * 3;
+
+	/* loop for each countable assigned */
+	list_for_each_entry(entry, &profile->assignments_list, list) {
+		/* skip over pre_ib counter data */
+		data_offset += sizeof(unsigned int) * 3;
+
+		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
+				gpuaddr + data_offset, data_offset);
+		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+				gpuaddr + data_offset, data_offset);
+	}
+
+	/* end of profile identifier */
+	IB_END(ibcmds);
+
+	_create_ib_ref(&profile->shared_buffer, rbcmds,
+			ibcmds - start, ib_offset * sizeof(unsigned int));
+}
+
+static bool shared_buf_empty(struct adreno_profile *profile)
+{
+	if (profile->shared_buffer.hostptr == NULL ||
+			profile->shared_buffer.size == 0)
+		return true;
+
+	if (profile->shared_head == profile->shared_tail)
+		return true;
+
+	return false;
+}
+
+static inline void shared_buf_inc(unsigned int max_size,
+		unsigned int *offset, size_t inc)
+{
+	*offset = (*offset + inc) % max_size;
+}
+
+static inline void log_buf_wrapcnt(unsigned int cnt, unsigned int *off)
+{
+	*off = (*off + cnt) % ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS;
+}
+
+static inline void log_buf_wrapinc(unsigned int *profile_log_buffer,
+		unsigned int **ptr)
+{
+	*ptr += 1;
+	if (*ptr >= (profile_log_buffer +
+				ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS))
+		*ptr -= ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS;
+}
+
+static inline unsigned int log_buf_available(struct adreno_profile *profile,
+		unsigned int *head_ptr)
+{
+	unsigned int tail, head;
+
+	tail = (unsigned int) profile->log_tail -
+		(unsigned int) profile->log_buffer;
+	head = (unsigned int) head_ptr - (unsigned int) profile->log_buffer;
+	if (tail > head)
+		return (tail - head) / sizeof(unsigned int);
+	else
+		return ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS - ((head - tail) /
+				sizeof(unsigned int));
+}
+
+static inline unsigned int shared_buf_available(struct adreno_profile *profile)
+{
+	if (profile->shared_tail > profile->shared_head)
+		return profile->shared_tail - profile->shared_head;
+	else
+		return profile->shared_size -
+			(profile->shared_head - profile->shared_tail);
+}
+
+static struct adreno_profile_assigns_list *_find_assignment_by_offset(
+		struct adreno_profile *profile, unsigned int offset)
+{
+	struct adreno_profile_assigns_list *entry;
+
+	list_for_each_entry(entry, &profile->assignments_list, list) {
+		if (entry->offset == offset)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static bool _in_assignments_list(struct adreno_profile *profile,
+		unsigned int groupid, unsigned int countable)
+{
+	struct adreno_profile_assigns_list *entry;
+
+	list_for_each_entry(entry, &profile->assignments_list, list) {
+		if (entry->groupid == groupid && entry->countable ==
+				countable)
+			return true;
+	}
+
+	return false;
+}
+
+static bool _add_to_assignments_list(struct adreno_profile *profile,
+		const char *str, unsigned int groupid, unsigned int countable,
+		unsigned int offset)
+{
+	struct adreno_profile_assigns_list *entry;
+
+	/* first make sure we can alloc memory */
+	entry = kmalloc(sizeof(struct adreno_profile_assigns_list), GFP_KERNEL);
+	if (!entry)
+		return false;
+
+	list_add_tail(&entry->list, &profile->assignments_list);
+
+	entry->countable = countable;
+	entry->groupid = groupid;
+	entry->offset = offset;
+
+	strlcpy(entry->name, str, sizeof(entry->name));
+
+	profile->assignment_count++;
+
+	return true;
+}
+
+static void check_close_profile(struct adreno_profile *profile)
+{
+	if (profile->log_buffer == NULL)
+		return;
+
+	if (!adreno_profile_enabled(profile) && shared_buf_empty(profile)) {
+		if (profile->log_head == profile->log_tail) {
+			vfree(profile->log_buffer);
+			profile->log_buffer = NULL;
+			profile->log_head = NULL;
+			profile->log_tail = NULL;
+		}
+	}
+}
+
+static bool results_available(struct kgsl_device *device,
+		unsigned int *shared_buf_tail)
+{
+	unsigned int global_eop;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	unsigned int off = profile->shared_tail;
+	unsigned int *shared_ptr = (unsigned int *)
+		profile->shared_buffer.hostptr;
+	unsigned int ts, cnt;
+	int ts_cmp;
+
+	/*
+	 * If shared_buffer empty or Memstore EOP timestamp is less than
+	 * outstanding counter buffer timestamps then no results available
+	 */
+	if (shared_buf_empty(profile))
+		return false;
+
+	global_eop = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+	do {
+		cnt = *(shared_ptr + off + 1);
+		if (cnt == 0)
+			return false;
+
+		ts = *(shared_ptr + off);
+		ts_cmp = timestamp_cmp(ts, global_eop);
+		if (ts_cmp >= 0) {
+			*shared_buf_tail = off;
+			if (off == profile->shared_tail)
+				return false;
+			else
+				return true;
+		}
+		shared_buf_inc(profile->shared_size, &off,
+				SIZE_SHARED_ENTRY(cnt));
+	} while (off != profile->shared_head);
+
+	*shared_buf_tail = profile->shared_head;
+
+	return true;
+}
+
+static void transfer_results(struct kgsl_device *device,
+		unsigned int shared_buf_tail)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	unsigned int buf_off;
+	unsigned int ts, cnt, ctxt_id, pid, tid, client_type;
+	unsigned int *ptr = (unsigned int *) profile->shared_buffer.hostptr;
+	struct kgsl_context *k_ctxt;
+	unsigned int *log_ptr, *log_base;
+	struct adreno_profile_assigns_list *assigns_list;
+	int i;
+
+	log_ptr = profile->log_head;
+	log_base = profile->log_buffer;
+	if (log_ptr == NULL)
+		return;
+
+	/*
+	 * go through counter buffers and format for write into log_buffer
+	 * if log buffer doesn't have space just overwrite it circularly
+	 * shared_buf is guaranteed to not wrap within an entry so can use
+	 * ptr increment
+	 */
+	while (profile->shared_tail != shared_buf_tail) {
+		buf_off = profile->shared_tail;
+		/*
+		 * format: timestamp, count, context_id
+		 * count entries: pc_off, pc_start, pc_end
+		 */
+		ts = *(ptr + buf_off);
+		cnt = *(ptr + buf_off + 1);
+		ctxt_id = *(ptr + buf_off + 2);
+		/*
+		 * if entry overwrites the tail of log_buffer then adjust tail
+		 * ptr to make room for the new entry, discarding old entry
+		 */
+		while (log_buf_available(profile, log_ptr) <=
+				SIZE_LOG_ENTRY(cnt)) {
+			unsigned int size_tail, boff;
+			size_tail = SIZE_LOG_ENTRY(0xffff &
+					*(profile->log_tail));
+			boff = ((unsigned int) profile->log_tail -
+				(unsigned int) log_base) / sizeof(unsigned int);
+			log_buf_wrapcnt(size_tail, &boff);
+			profile->log_tail = log_base + boff;
+		}
+
+		/* find Adreno ctxt struct */
+		k_ctxt = idr_find(&device->context_idr, ctxt_id);
+		if (k_ctxt == NULL) {
+			shared_buf_inc(profile->shared_size,
+					&profile->shared_tail,
+					SIZE_SHARED_ENTRY(cnt));
+			continue;
+		} else {
+			struct adreno_context *adreno_ctxt =
+				ADRENO_CONTEXT(k_ctxt);
+			pid = k_ctxt->pid;  /* pid */
+			tid = k_ctxt->tid; /* tid creator */
+			client_type =  adreno_ctxt->type << 16;
+		}
+
+		buf_off += 3;
+		*log_ptr = client_type | cnt;
+		log_buf_wrapinc(log_base, &log_ptr);
+		*log_ptr = pid;
+		log_buf_wrapinc(log_base, &log_ptr);
+		*log_ptr = tid;
+		log_buf_wrapinc(log_base, &log_ptr);
+		*log_ptr = ctxt_id;
+		log_buf_wrapinc(log_base, &log_ptr);
+		*log_ptr = ts;
+		log_buf_wrapinc(log_base, &log_ptr);
+
+		for (i = 0; i < cnt; i++) {
+			assigns_list = _find_assignment_by_offset(
+					profile, *(ptr + buf_off++));
+			if (assigns_list == NULL) {
+				*log_ptr = (unsigned int) -1;
+				goto err;
+			} else {
+				*log_ptr = assigns_list->groupid << 16 |
+					(assigns_list->countable & 0xffff);
+			}
+			log_buf_wrapinc(log_base, &log_ptr);
+			*log_ptr  = *(ptr + buf_off++); /* perf cntr start hi */
+			log_buf_wrapinc(log_base, &log_ptr);
+			*log_ptr = *(ptr + buf_off++);  /* perf cntr start lo */
+			log_buf_wrapinc(log_base, &log_ptr);
+			*log_ptr = *(ptr + buf_off++);  /* perf cntr end hi */
+			log_buf_wrapinc(log_base, &log_ptr);
+			*log_ptr = *(ptr + buf_off++);  /* perf cntr end lo */
+			log_buf_wrapinc(log_base, &log_ptr);
+
+		}
+		shared_buf_inc(profile->shared_size,
+				&profile->shared_tail,
+				SIZE_SHARED_ENTRY(cnt));
+
+	}
+	profile->log_head = log_ptr;
+	return;
+err:
+	/* reset head/tail to same on error in hopes we work correctly later */
+	profile->log_head = profile->log_tail;
+}
+
+static int profile_enable_get(void *data, u64 *val)
+{
+	struct kgsl_device *device = data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	mutex_lock(&device->mutex);
+	*val = adreno_profile_enabled(&adreno_dev->profile);
+	mutex_unlock(&device->mutex);
+
+	return 0;
+}
+
+static int profile_enable_set(void *data, u64 val)
+{
+	struct kgsl_device *device = data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+
+	mutex_lock(&device->mutex);
+
+	if (adreno_is_a2xx(adreno_dev)) {
+		mutex_unlock(&device->mutex);
+		return 0;
+	}
+
+	profile->enabled = val;
+
+	check_close_profile(profile);
+
+	mutex_unlock(&device->mutex);
+
+	return 0;
+}
+
+static ssize_t profile_assignments_read(struct file *filep,
+		char __user *ubuf, size_t max, loff_t *ppos)
+{
+	struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	struct adreno_profile_assigns_list *entry;
+	int len = 0, max_size = PAGE_SIZE;
+	char *buf, *pos;
+	ssize_t size = 0;
+
+	if (adreno_is_a2xx(adreno_dev))
+		return -EINVAL;
+
+	mutex_lock(&device->mutex);
+
+	buf = kmalloc(max_size, GFP_KERNEL);
+	if (!buf) {
+		mutex_unlock(&device->mutex);
+		return -ENOMEM;
+	}
+
+	pos = buf;
+
+	/* copy all assingments from list to str */
+	list_for_each_entry(entry, &profile->assignments_list, list) {
+		len = snprintf(pos, max_size, ASSIGNS_STR_FORMAT,
+				entry->name, entry->countable);
+
+		max_size -= len;
+		pos += len;
+	}
+
+	size = simple_read_from_buffer(ubuf, max, ppos, buf,
+			strlen(buf));
+
+	kfree(buf);
+
+	mutex_unlock(&device->mutex);
+	return size;
+}
+
+static void _remove_assignment(struct adreno_device *adreno_dev,
+		unsigned int groupid, unsigned int countable)
+{
+	struct adreno_profile *profile = &adreno_dev->profile;
+	struct adreno_profile_assigns_list *entry, *tmp;
+
+	list_for_each_entry_safe(entry, tmp, &profile->assignments_list, list) {
+		if (entry->groupid == groupid &&
+				entry->countable == countable) {
+			list_del(&entry->list);
+
+			profile->assignment_count--;
+
+			kfree(entry);
+
+			/* remove from perf counter allocation */
+			adreno_perfcounter_put(adreno_dev, groupid, countable,
+					PERFCOUNTER_FLAG_KERNEL);
+		}
+	}
+}
+
+static void _add_assignment(struct adreno_device *adreno_dev,
+		unsigned int groupid, unsigned int countable)
+{
+	struct adreno_profile *profile = &adreno_dev->profile;
+	unsigned int offset;
+	const char *name = NULL;
+
+	name = adreno_perfcounter_get_name(adreno_dev, groupid);
+	if (!name)
+		return;
+
+	/* if already in assigned list skip it */
+	if (_in_assignments_list(profile, groupid, countable))
+		return;
+
+	/* add to perf counter allocation, if fail skip it */
+	if (adreno_perfcounter_get(adreno_dev, groupid,
+				countable, &offset, PERFCOUNTER_FLAG_NONE))
+		return;
+
+	/* add to assignments list, put counter back if error */
+	if (!_add_to_assignments_list(profile, name, groupid,
+				countable, offset))
+		adreno_perfcounter_put(adreno_dev, groupid,
+				countable, PERFCOUNTER_FLAG_KERNEL);
+}
+
+static char *_parse_next_assignment(struct adreno_device *adreno_dev,
+		char *str, int *groupid, int *countable, bool *remove)
+{
+	char *groupid_str, *countable_str;
+	int ret;
+
+	*groupid = -EINVAL;
+	*countable = -EINVAL;
+	*remove = false;
+
+	/* remove spaces */
+	while (*str == ' ')
+		str++;
+
+	/* check if it's a remove assignment */
+	if (*str == '-') {
+		*remove = true;
+		str++;
+	}
+
+	/* get the groupid string */
+	groupid_str = str;
+	while (*str != ':') {
+		if (*str == '\0')
+			return NULL;
+		*str = tolower(*str);
+		str++;
+	}
+	if (groupid_str == str)
+		return NULL;
+
+	*str = '\0';
+	str++;
+
+	/* get the countable string */
+	countable_str = str;
+	while (*str != ' ' && *str != '\0')
+		str++;
+	if (countable_str == str)
+		return NULL;
+
+	*str = '\0';
+	str++;
+
+	/* set results */
+	*groupid = adreno_perfcounter_get_groupid(adreno_dev,
+			groupid_str);
+	if (*groupid < 0)
+		return NULL;
+	ret = kstrtou32(countable_str, 10, countable);
+	if (ret)
+		return NULL;
+
+	return str;
+}
+
+static ssize_t profile_assignments_write(struct file *filep,
+		const char __user *user_buf, size_t len, loff_t *off)
+{
+	struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	size_t size = 0;
+	char *buf, *pbuf;
+	bool remove_assignment = false;
+	int groupid, countable;
+
+	if (len >= PAGE_SIZE || len == 0)
+		return -EINVAL;
+
+	if (adreno_is_a2xx(adreno_dev))
+		return -ENOSPC;
+
+	mutex_lock(&device->mutex);
+
+	if (adreno_profile_enabled(profile)) {
+		size = -EINVAL;
+		goto error_unlock;
+	}
+
+	kgsl_active_count_get(device);
+
+	/*
+	 * When adding/removing assignments, ensure that the GPU is done with
+	 * all it's work.  This helps to syncronize the work flow to the
+	 * GPU and avoid racey conditions.
+	 */
+	if (adreno_idle(device)) {
+		size = -EINVAL;
+		goto error_put;
+	}
+
+	/* clear all shared buffer results */
+	adreno_profile_process_results(device);
+
+	buf = kmalloc(len + 1, GFP_KERNEL);
+	if (!buf) {
+		size = -EINVAL;
+		goto error_put;
+	}
+
+	pbuf = buf;
+
+	/* clear the log buffer */
+	if (profile->log_buffer != NULL) {
+		profile->log_head = profile->log_buffer;
+		profile->log_tail = profile->log_buffer;
+	}
+
+	if (copy_from_user(buf, user_buf, len)) {
+		size = -EFAULT;
+		goto error_free;
+	}
+
+	/* for sanity and parsing, ensure it is null terminated */
+	buf[len] = '\0';
+
+	/* parse file buf and add(remove) to(from) appropriate lists */
+	while (1) {
+		pbuf = _parse_next_assignment(adreno_dev, pbuf, &groupid,
+				&countable, &remove_assignment);
+		if (pbuf == NULL)
+			break;
+
+		if (remove_assignment)
+			_remove_assignment(adreno_dev, groupid, countable);
+		else
+			_add_assignment(adreno_dev, groupid, countable);
+	}
+
+	size = len;
+
+error_free:
+	kfree(buf);
+error_put:
+	kgsl_active_count_put(device);
+error_unlock:
+	mutex_unlock(&device->mutex);
+	return size;
+}
+
+static int _pipe_print_pending(char *ubuf, size_t max)
+{
+	loff_t unused = 0;
+	char str[] = "Operation Would Block!";
+
+	return simple_read_from_buffer(ubuf, max,
+			&unused, str, strlen(str));
+}
+
+static int _pipe_print_results(struct adreno_device *adreno_dev,
+		char *ubuf, size_t max)
+{
+	struct adreno_profile *profile = &adreno_dev->profile;
+	const char *grp_name;
+	char *usr_buf = ubuf;
+	unsigned int *log_ptr = NULL;
+	int len, i;
+	int status = 0;
+	ssize_t size, total_size = 0;
+	unsigned int cnt, api_type, ctxt_id, pid, tid, ts, cnt_reg;
+	unsigned long long pc_start, pc_end;
+	const char *api_str;
+	char format_space;
+	loff_t unused = 0;
+	char pipe_hdr_buf[51];   /* 4 uint32 + 5 space + 5 API type + '\0' */
+	char pipe_cntr_buf[63];  /* 2 uint64 + 1 uint32 + 4 spaces + 8 group */
+
+	/* convert unread entries to ASCII, copy to user-space */
+	log_ptr = profile->log_tail;
+
+	do {
+		cnt = *log_ptr & 0xffff;
+		if (SIZE_PIPE_ENTRY(cnt) > max) {
+			status = 0;
+			goto err;
+		}
+		if ((max - (usr_buf - ubuf)) < SIZE_PIPE_ENTRY(cnt))
+			break;
+
+		api_type = *log_ptr >> 16;
+		api_str = get_api_type_str(api_type);
+		log_buf_wrapinc(profile->log_buffer, &log_ptr);
+		pid = *log_ptr;
+		log_buf_wrapinc(profile->log_buffer, &log_ptr);
+		tid = *log_ptr;
+		log_buf_wrapinc(profile->log_buffer, &log_ptr);
+		ctxt_id =  *log_ptr;
+		log_buf_wrapinc(profile->log_buffer, &log_ptr);
+		ts = *log_ptr;
+		log_buf_wrapinc(profile->log_buffer, &log_ptr);
+		len = snprintf(pipe_hdr_buf, sizeof(pipe_hdr_buf) - 1,
+				"%u %u %u %.5s %u ",
+				pid, tid, ctxt_id, api_str, ts);
+		size = simple_read_from_buffer(usr_buf,
+				max - (usr_buf - ubuf),
+				&unused, pipe_hdr_buf, len);
+		if (size < 0) {
+			status = -EINVAL;
+			goto err;
+		}
+
+		unused = 0;
+		usr_buf += size;
+		total_size += size;
+
+		for (i = 0; i < cnt; i++) {
+			grp_name = adreno_perfcounter_get_name(
+					adreno_dev, *log_ptr >> 16);
+			if (grp_name == NULL) {
+				status = -EFAULT;
+				goto err;
+			}
+
+			if (i == cnt - 1)
+				format_space = '\n';
+			else
+				format_space = ' ';
+
+			cnt_reg = *log_ptr & 0xffff;
+			log_buf_wrapinc(profile->log_buffer, &log_ptr);
+			pc_start = *((unsigned long long *) log_ptr);
+			log_buf_wrapinc(profile->log_buffer, &log_ptr);
+			log_buf_wrapinc(profile->log_buffer, &log_ptr);
+			pc_end = *((unsigned long long *) log_ptr);
+			log_buf_wrapinc(profile->log_buffer, &log_ptr);
+			log_buf_wrapinc(profile->log_buffer, &log_ptr);
+
+			len = snprintf(pipe_cntr_buf,
+					sizeof(pipe_cntr_buf) - 1,
+					"%.8s:%u %llu %llu%c",
+					grp_name, cnt_reg, pc_start,
+					pc_end, format_space);
+
+			size = simple_read_from_buffer(usr_buf,
+					max - (usr_buf - ubuf),
+					&unused, pipe_cntr_buf, len);
+			if (size < 0) {
+				status = size;
+				goto err;
+			}
+			unused = 0;
+			usr_buf += size;
+			total_size += size;
+		}
+	} while (log_ptr != profile->log_head);
+
+	status = total_size;
+err:
+	profile->log_tail = log_ptr;
+
+	return status;
+}
+
+static int profile_pipe_print(struct file *filep, char __user *ubuf,
+		size_t max, loff_t *ppos)
+{
+	struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	char *usr_buf = ubuf;
+	int status = 0;
+
+	if (adreno_is_a2xx(adreno_dev))
+		return 0;
+
+	/*
+	 * this file not seekable since it only supports streaming, ignore
+	 * ppos <> 0
+	 */
+	/*
+	 * format <pid>  <tid> <context id> <cnt<<16 | client type> <timestamp>
+	 * for each perf counter <cntr_reg_off> <start hi & lo> <end hi & low>
+	 */
+
+	mutex_lock(&device->mutex);
+
+	while (1) {
+		/* process any results that are available into the log_buffer */
+		status = adreno_profile_process_results(device);
+		if (status > 0) {
+			/* if we have results, print them and exit */
+			status = _pipe_print_results(adreno_dev, usr_buf, max);
+			break;
+		}
+
+		/* there are no unread results, act accordingly */
+		if (filep->f_flags & O_NONBLOCK) {
+			if (profile->shared_tail != profile->shared_head) {
+				status = _pipe_print_pending(usr_buf, max);
+				break;
+			} else {
+				status = 0;
+				break;
+			}
+		}
+
+		mutex_unlock(&device->mutex);
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(HZ / 10);
+		mutex_lock(&device->mutex);
+
+		if (signal_pending(current)) {
+			status = 0;
+			break;
+		}
+	}
+
+	check_close_profile(profile);
+	mutex_unlock(&device->mutex);
+
+	return status;
+}
+
+static int profile_groups_print(struct seq_file *s, void *unused)
+{
+	struct kgsl_device *device = (struct kgsl_device *) s->private;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	int i, j, used;
+
+	/* perfcounter list not allowed on a2xx */
+	if (adreno_is_a2xx(adreno_dev))
+		return -EINVAL;
+
+	mutex_lock(&device->mutex);
+
+	for (i = 0; i < counters->group_count; ++i) {
+		group = &(counters->groups[i]);
+		/* get number of counters used for this group */
+		used = 0;
+		for (j = 0; j < group->reg_count; j++) {
+			if (group->regs[j].countable !=
+					KGSL_PERFCOUNTER_NOT_USED)
+				used++;
+		}
+
+		seq_printf(s, "%s %d %d\n", group->name,
+			group->reg_count, used);
+	}
+
+	mutex_unlock(&device->mutex);
+
+	return 0;
+}
+
+static int profile_groups_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, profile_groups_print, inode->i_private);
+}
+
+static const struct file_operations profile_groups_fops = {
+	.owner = THIS_MODULE,
+	.open = profile_groups_open,
+	.read = seq_read,
+	.llseek = noop_llseek,
+	.release = single_release,
+};
+
+static const struct file_operations profile_pipe_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = profile_pipe_print,
+	.llseek = noop_llseek,
+};
+
+static const struct file_operations profile_assignments_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = profile_assignments_read,
+	.write = profile_assignments_write,
+	.llseek = noop_llseek,
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(profile_enable_fops,
+			profile_enable_get,
+			profile_enable_set, "%llu\n");
+
+void adreno_profile_init(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	struct dentry *profile_dir;
+	int ret;
+
+	profile->enabled = false;
+
+	/* allocate shared_buffer, which includes pre_ib and post_ib */
+	profile->shared_size = ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS;
+	ret = kgsl_allocate_contiguous(&profile->shared_buffer,
+			profile->shared_size * sizeof(unsigned int));
+	if (ret) {
+		profile->shared_buffer.hostptr = NULL;
+		profile->shared_size = 0;
+	}
+
+	INIT_LIST_HEAD(&profile->assignments_list);
+
+	/* Create perf counter debugfs */
+	profile_dir = debugfs_create_dir("profiling", device->d_debugfs);
+	if (IS_ERR(profile_dir))
+		return;
+
+	debugfs_create_file("enable",  0644, profile_dir, device,
+			&profile_enable_fops);
+	debugfs_create_file("blocks", 0444, profile_dir, device,
+			&profile_groups_fops);
+	debugfs_create_file("pipe", 0444, profile_dir, device,
+			&profile_pipe_fops);
+	debugfs_create_file("assignments", 0644, profile_dir, device,
+			&profile_assignments_fops);
+}
+
+void adreno_profile_close(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	struct adreno_profile_assigns_list *entry, *tmp;
+
+	profile->enabled = false;
+	vfree(profile->log_buffer);
+	profile->log_buffer = NULL;
+	profile->log_head = NULL;
+	profile->log_tail = NULL;
+	profile->shared_head = 0;
+	profile->shared_tail = 0;
+	kgsl_sharedmem_free(&profile->shared_buffer);
+	profile->shared_buffer.hostptr = NULL;
+	profile->shared_size = 0;
+
+	profile->assignment_count = 0;
+
+	list_for_each_entry_safe(entry, tmp, &profile->assignments_list, list) {
+		list_del(&entry->list);
+		kfree(entry);
+	}
+}
+
+int adreno_profile_process_results(struct kgsl_device *device)
+{
+
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	unsigned int shared_buf_tail = profile->shared_tail;
+
+	if (!results_available(device, &shared_buf_tail)) {
+		check_close_profile(profile);
+		return 0;
+	}
+
+	/* allocate profile_log_buffer if needed */
+	if (profile->log_buffer == NULL) {
+		profile->log_buffer = vmalloc(ADRENO_PROFILE_LOG_BUF_SIZE);
+		if (profile->log_buffer == NULL)
+			return -ENOMEM;
+		profile->log_tail = profile->log_buffer;
+		profile->log_head = profile->log_buffer;
+	}
+
+	/*
+	 * transfer retired results to log_buffer
+	 * update shared_buffer tail ptr
+	 */
+	transfer_results(device, shared_buf_tail);
+
+	/* check for any cleanup */
+	check_close_profile(profile);
+
+	return 1;
+}
+
+void adreno_profile_preib_processing(struct kgsl_device *device,
+		unsigned int context_id, unsigned int *cmd_flags,
+		unsigned int **rbptr, unsigned int *cmds_gpu)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	int count = profile->assignment_count;
+	unsigned int entry_head = profile->shared_head;
+	unsigned int *shared_ptr;
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int rbcmds[3] = { cp_nop_packet(2),
+		KGSL_NOP_IB_IDENTIFIER, KGSL_NOP_IB_IDENTIFIER };
+
+	*cmd_flags &= ~KGSL_CMD_FLAGS_PROFILE;
+
+	if (!adreno_profile_assignments_ready(profile))
+		goto done;
+
+	/*
+	 * check if space available, include the post_ib in space available
+	 * check so don't have to handle trying to undo the pre_ib insertion in
+	 * ringbuffer in the case where only the post_ib fails enough space
+	 */
+	if (SIZE_SHARED_ENTRY(count) >= shared_buf_available(profile))
+		goto done;
+
+	if (entry_head + SIZE_SHARED_ENTRY(count) > profile->shared_size) {
+		/* entry_head would wrap, start entry_head at 0 in buffer */
+		entry_head = 0;
+		profile->shared_size = profile->shared_head;
+		profile->shared_head = 0;
+		if (profile->shared_tail == profile->shared_size)
+			profile->shared_tail = 0;
+
+		/* recheck space available */
+		if (SIZE_SHARED_ENTRY(count) >= shared_buf_available(profile))
+			goto done;
+	}
+
+	/* zero out the counter area of shared_buffer entry_head */
+	shared_ptr = entry_head + ((unsigned int *)
+			profile->shared_buffer.hostptr);
+	memset(shared_ptr, 0, SIZE_SHARED_ENTRY(count) * sizeof(unsigned int));
+
+	/* reserve space for the pre ib shared buffer */
+	shared_buf_inc(profile->shared_size, &profile->shared_head,
+			SIZE_SHARED_ENTRY(count));
+
+	/* create the shared ibdesc */
+	_build_pre_ib_cmds(profile, rbcmds, entry_head,
+			rb->global_ts + 1, context_id);
+
+	/* set flag to sync with post ib commands */
+	*cmd_flags |= KGSL_CMD_FLAGS_PROFILE;
+
+done:
+	/* write the ibdesc to the ringbuffer */
+	GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[0]);
+	GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[1]);
+	GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[2]);
+}
+
+void adreno_profile_postib_processing(struct kgsl_device *device,
+		unsigned int *cmd_flags, unsigned int **rbptr,
+		unsigned int *cmds_gpu)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_profile *profile = &adreno_dev->profile;
+	int count = profile->assignment_count;
+	unsigned int entry_head = profile->shared_head -
+		SIZE_SHARED_ENTRY(count);
+	unsigned int rbcmds[3] = { cp_nop_packet(2),
+		KGSL_NOP_IB_IDENTIFIER, KGSL_NOP_IB_IDENTIFIER };
+
+	if (!adreno_profile_assignments_ready(profile))
+		goto done;
+
+	if (!(*cmd_flags & KGSL_CMD_FLAGS_PROFILE))
+		goto done;
+
+	/* create the shared ibdesc */
+	_build_post_ib_cmds(profile, rbcmds, entry_head);
+
+done:
+	/* write the ibdesc to the ringbuffer */
+	GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[0]);
+	GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[1]);
+	GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[2]);
+
+	/* reset the sync flag */
+	*cmd_flags &= ~KGSL_CMD_FLAGS_PROFILE;
+}
+
diff --git a/drivers/gpu/msm/adreno_profile.h b/drivers/gpu/msm/adreno_profile.h
new file mode 100644
index 0000000..d91b09b
--- /dev/null
+++ b/drivers/gpu/msm/adreno_profile.h
@@ -0,0 +1,92 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_PROFILE_H
+#define __ADRENO_PROFILE_H
+#include <linux/seq_file.h>
+
+/**
+ * struct adreno_profile_assigns_list: linked list for assigned perf counters
+ * @list: linkage  for nodes in list
+ * @name: group name  or GPU name name
+ * @groupid: group id
+ * @countable: countable assigned to perfcounter
+ * @offset: perfcounter register address offset
+ */
+struct adreno_profile_assigns_list {
+	struct list_head list;
+	char name[25];
+	unsigned int groupid;
+	unsigned int countable;
+	unsigned int offset;   /* LO offset,  HI offset is +1 */
+};
+
+struct adreno_profile {
+	struct list_head assignments_list; /* list of all assignments */
+	unsigned int assignment_count;  /* Number of assigned counters */
+	unsigned int *log_buffer;
+	unsigned int *log_head;
+	unsigned int *log_tail;
+	bool enabled;
+	/* counter, pre_ib, and post_ib held in one large circular buffer
+	 * shared between kgsl and GPU
+	 * counter entry 0
+	 * pre_ib entry 0
+	 * post_ib entry 0
+	 * ...
+	 * counter entry N
+	 * pre_ib entry N
+	 * post_ib entry N
+	 */
+	struct kgsl_memdesc shared_buffer;
+	unsigned int shared_head;
+	unsigned int shared_tail;
+	unsigned int shared_size;
+};
+
+#define ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS (48 * 4096 / sizeof(uint))
+/* sized @ 48 pages should allow for over 50 outstanding IBs minimum, 1755 max*/
+
+#define ADRENO_PROFILE_LOG_BUF_SIZE  (1024 * 920)
+/* sized for 1024 entries of fully assigned 45 cnters in log buffer, 230 pages*/
+#define ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS  (ADRENO_PROFILE_LOG_BUF_SIZE / \
+						sizeof(unsigned int))
+
+void adreno_profile_init(struct kgsl_device *device);
+void adreno_profile_close(struct kgsl_device *device);
+int adreno_profile_process_results(struct kgsl_device *device);
+void adreno_profile_preib_processing(struct kgsl_device *device,
+		unsigned int context_id, unsigned int *cmd_flags,
+		unsigned int **rbptr, unsigned int *cmds_gpu);
+void adreno_profile_postib_processing(struct kgsl_device *device,
+		unsigned int *cmd_flags, unsigned int **rbptr,
+		unsigned int *cmds_gpu);
+
+static inline bool adreno_profile_enabled(struct adreno_profile *profile)
+{
+	return profile->enabled;
+}
+
+static inline bool adreno_profile_has_assignments(
+	struct adreno_profile *profile)
+{
+	return list_empty(&profile->assignments_list) ? false : true;
+}
+
+static inline bool adreno_profile_assignments_ready(
+	struct adreno_profile *profile)
+{
+	return adreno_profile_enabled(profile) &&
+		adreno_profile_has_assignments(profile);
+}
+
+#endif
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index e03f708..1ad90fd 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -67,11 +67,8 @@
 	unsigned long wait_time;
 	unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
 	unsigned long wait_time_part;
-	unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
 	unsigned int rptr;
 
-	memset(prev_reg_val, 0, sizeof(prev_reg_val));
-
 	/* if wptr ahead, fill the remaining with NOPs */
 	if (wptr_ahead) {
 		/* -1 for header */
@@ -91,10 +88,6 @@
 			rptr = adreno_get_rptr(rb);
 		} while (!rptr);
 
-		rb->wptr++;
-
-		adreno_ringbuffer_submit(rb);
-
 		rb->wptr = 0;
 	}
 
@@ -109,43 +102,13 @@
 		if (freecmds == 0 || freecmds > numcmds)
 			break;
 
-		/* Dont wait for timeout, detect hang faster.
-		 */
-		if (time_after(jiffies, wait_time_part)) {
-			wait_time_part = jiffies +
-				msecs_to_jiffies(KGSL_TIMEOUT_PART);
-			if ((adreno_ft_detect(rb->device,
-						prev_reg_val))){
-				KGSL_DRV_ERR(rb->device,
-				"Hang detected while waiting for freespace in"
-				"ringbuffer rptr: 0x%x, wptr: 0x%x\n",
-				rptr, rb->wptr);
-				goto err;
-			}
-		}
-
 		if (time_after(jiffies, wait_time)) {
 			KGSL_DRV_ERR(rb->device,
 			"Timed out while waiting for freespace in ringbuffer "
 			"rptr: 0x%x, wptr: 0x%x\n", rptr, rb->wptr);
-			goto err;
+			return -ETIMEDOUT;
 		}
 
-		continue;
-
-err:
-		if (!adreno_dump_and_exec_ft(rb->device)) {
-			if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
-				KGSL_CTXT_WARN(rb->device,
-				"Context %p caused a gpu hang. Will not accept commands for context %d\n",
-				context, context->base.id);
-				return -EDEADLK;
-			}
-			wait_time = jiffies + wait_timeout;
-		} else {
-			/* GPU is hung and fault tolerance failed */
-			BUG();
-		}
 	}
 	return 0;
 }
@@ -184,7 +147,8 @@
 	if (!ret) {
 		ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
 		rb->wptr += numcmds;
-	}
+	} else
+		ptr = ERR_PTR(ret);
 
 	return ptr;
 }
@@ -351,7 +315,6 @@
 int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
 {
 	int status;
-	/*cp_rb_cntl_u cp_rb_cntl; */
 	union reg_cp_rb_cntl cp_rb_cntl;
 	unsigned int rb_cntl;
 	struct kgsl_device *device = rb->device;
@@ -572,28 +535,44 @@
 
 static int
 adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
-				struct adreno_context *context,
+				struct adreno_context *drawctxt,
 				unsigned int flags, unsigned int *cmds,
-				int sizedwords)
+				int sizedwords, uint32_t timestamp)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
 	unsigned int *ringcmds;
 	unsigned int total_sizedwords = sizedwords;
 	unsigned int i;
 	unsigned int rcmd_gpu;
-	unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
+	unsigned int context_id;
 	unsigned int gpuaddr = rb->device->memstore.gpuaddr;
-	unsigned int timestamp;
+	bool profile_ready;
 
 	/*
-	 * if the context was not created with per context timestamp
-	 * support, we must use the global timestamp since issueibcmds
-	 * will be returning that one, or if an internal issue then
-	 * use global timestamp.
+	 * If in stream ib profiling is enabled and there are counters
+	 * assigned, then space needs to be reserved for profiling.  This
+	 * space in the ringbuffer is always consumed (might be filled with
+	 * NOPs in error case.  profile_ready needs to be consistent through
+	 * the _addcmds call since it is allocating additional ringbuffer
+	 * command space.
 	 */
-	if ((context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) &&
-		!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
-		context_id = context->base.id;
+	profile_ready = !adreno_is_a2xx(adreno_dev) &&
+		adreno_profile_assignments_ready(&adreno_dev->profile) &&
+		!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
+
+	/* The global timestamp always needs to be incremented */
+	rb->global_ts++;
+
+	/* If this is a internal IB, use the global timestamp for it */
+	if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+		timestamp = rb->global_ts;
+		context_id = KGSL_MEMSTORE_GLOBAL;
+	} else {
+		context_id = drawctxt->base.id;
+	}
+
+	if (drawctxt)
+		drawctxt->internal_timestamp = rb->global_ts;
 
 	/* reserve space to temporarily turn off protected mode
 	*  error checking if needed
@@ -604,13 +583,8 @@
 	/* internal ib command identifier for the ringbuffer */
 	total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
 
-	/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
-	total_sizedwords += context ? 13 : 0;
-
-	if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
-		(flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
-		KGSL_CMD_FLAGS_GET_INT)))
-			total_sizedwords += 2;
+	/* Add two dwords for the CP_INTERRUPT */
+	total_sizedwords += drawctxt ? 2 : 0;
 
 	if (adreno_is_a3xx(adreno_dev))
 		total_sizedwords += 7;
@@ -618,22 +592,31 @@
 	if (adreno_is_a2xx(adreno_dev))
 		total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
 
-	total_sizedwords += 2; /* scratchpad ts for fault tolerance */
 	total_sizedwords += 3; /* sop timestamp */
 	total_sizedwords += 4; /* eop timestamp */
 
-	if (KGSL_MEMSTORE_GLOBAL != context_id)
-		total_sizedwords += 3; /* global timestamp without cache
-					* flush for non-zero context */
-
 	if (adreno_is_a20x(adreno_dev))
 		total_sizedwords += 2; /* CACHE_FLUSH */
 
-	if (flags & KGSL_CMD_FLAGS_EOF)
-		total_sizedwords += 2;
+	if (drawctxt) {
+		total_sizedwords += 3; /* global timestamp without cache
+					* flush for non-zero context */
+	}
 
-	ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
-	if (!ringcmds)
+	if (adreno_is_a20x(adreno_dev))
+		total_sizedwords += 2; /* CACHE_FLUSH */
+
+	if (flags & KGSL_CMD_FLAGS_WFI)
+		total_sizedwords += 2; /* WFI */
+
+	if (profile_ready)
+		total_sizedwords += 6;   /* space for pre_ib and post_ib */
+
+	ringcmds = adreno_ringbuffer_allocspace(rb, drawctxt, total_sizedwords);
+
+	if (IS_ERR(ringcmds))
+		return PTR_ERR(ringcmds);
+	if (ringcmds == NULL)
 		return -ENOSPC;
 
 	rcmd_gpu = rb->buffer_desc.gpuaddr
@@ -648,20 +631,10 @@
 				KGSL_CMD_INTERNAL_IDENTIFIER);
 	}
 
-	/* always increment the global timestamp. once. */
-	rb->global_ts++;
-
-	if (KGSL_MEMSTORE_GLOBAL != context_id)
-		timestamp = context->timestamp;
-	else
-		timestamp = rb->global_ts;
-
-	/* scratchpad ts for fault tolerance */
-	GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-		cp_type0_packet(adreno_getreg(adreno_dev,
-			ADRENO_REG_CP_TIMESTAMP), 1));
-	GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-			rb->global_ts);
+	/* Add any IB required for profiling if it is enabled */
+	if (profile_ready)
+		adreno_profile_preib_processing(rb->device, drawctxt->base.id,
+				&flags, &ringcmds, &rcmd_gpu);
 
 	/* start-of-pipeline timestamp */
 	GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
@@ -714,6 +687,12 @@
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00);
 	}
 
+	/* Add any postIB required for profiling if it is enabled and has
+	   assigned counters */
+	if (profile_ready)
+		adreno_profile_postib_processing(rb->device, &flags,
+						 &ringcmds, &rcmd_gpu);
+
 	/*
 	 * end-of-pipeline timestamp.  If per context timestamps is not
 	 * enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all
@@ -726,7 +705,7 @@
 		KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
 	GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
 
-	if (KGSL_MEMSTORE_GLOBAL != context_id) {
+	if (drawctxt) {
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
@@ -742,56 +721,13 @@
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CACHE_FLUSH);
 	}
 
-	if (context) {
-		/* Conditional execution based on memory values */
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_COND_EXEC, 4));
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(
-				context_id, ts_cmp_enable)) >> 2);
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(
-				context_id, ref_wait_ts)) >> 2);
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
-		/* # of conditional command DWORDs */
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 8);
-
-		/* Clear the ts_cmp_enable for the context */
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_MEM_WRITE, 2));
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
-			KGSL_MEMSTORE_OFFSET(
-				context_id, ts_cmp_enable));
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
-
-		/* Clear the ts_cmp_enable for the global timestamp */
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_MEM_WRITE, 2));
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
-			KGSL_MEMSTORE_OFFSET(
-				KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
-
-		/* Trigger the interrupt */
+	if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_INTERRUPT, 1));
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
 				CP_INT_CNTL__RB_INT_MASK);
 	}
 
-	/*
-	 * If per context timestamps are enabled and any of the kgsl
-	 * internal commands want INT to be generated trigger the INT
-	*/
-	if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
-		(flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
-		KGSL_CMD_FLAGS_GET_INT))) {
-			GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-				cp_type3_packet(CP_INTERRUPT, 1));
-			GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-				CP_INT_CNTL__RB_INT_MASK);
-	}
-
 	if (adreno_is_a3xx(adreno_dev)) {
 		/* Dummy set-constant to trigger context rollover */
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
@@ -801,10 +737,10 @@
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
 	}
 
-	if (flags & KGSL_CMD_FLAGS_EOF) {
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
+	if (flags & KGSL_CMD_FLAGS_WFI) {
 		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
-				KGSL_END_OF_FRAME_IDENTIFIER);
+			cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00000000);
 	}
 
 	adreno_ringbuffer_submit(rb);
@@ -822,14 +758,10 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 
-	if (device->state & KGSL_STATE_HUNG)
-		return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
-					KGSL_TIMESTAMP_RETIRED);
-
 	flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
 
 	return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
-							sizedwords);
+		sizedwords, 0);
 }
 
 static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -1022,55 +954,113 @@
 	return ret;
 }
 
-int
-adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
-				struct kgsl_context *context,
-				struct kgsl_ibdesc *ibdesc,
-				unsigned int numibs,
-				uint32_t *timestamp,
-				unsigned int flags)
+/**
+ * _ringbuffer_verify_ib() - parse an IB and verify that it is correct
+ * @dev_priv: Pointer to the process struct
+ * @ibdesc: Pointer to the IB descriptor
+ *
+ * This function only gets called if debugging is enabled  - it walks the IB and
+ * does additional level parsing and verification above and beyond what KGSL
+ * core does
+ */
+static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
+		struct kgsl_ibdesc *ibdesc)
 {
 	struct kgsl_device *device = dev_priv->device;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int *link = 0;
+
+	/* Check that the size of the IBs is under the allowable limit */
+	if (ibdesc->sizedwords == 0 || ibdesc->sizedwords > 0xFFFFF) {
+		KGSL_DRV_ERR(device, "Invalid IB size 0x%X\n",
+				ibdesc->sizedwords);
+		return false;
+	}
+
+	if (unlikely(adreno_dev->ib_check_level >= 1) &&
+		!_parse_ibs(dev_priv, ibdesc->gpuaddr, ibdesc->sizedwords)) {
+		KGSL_DRV_ERR(device, "Could not verify the IBs\n");
+		return false;
+	}
+
+	return true;
+}
+
+int
+adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+				struct kgsl_context *context,
+				struct kgsl_cmdbatch *cmdbatch,
+				uint32_t *timestamp)
+{
+	struct kgsl_device *device = dev_priv->device;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+	int i, ret;
+
+	if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+		return -EDEADLK;
+
+	/* Verify the IBs before they get queued */
+
+	for (i = 0; i < cmdbatch->ibcount; i++) {
+		if (!_ringbuffer_verify_ib(dev_priv, &cmdbatch->ibdesc[i]))
+			return -EINVAL;
+	}
+
+	/* Queue the command in the ringbuffer */
+	ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
+		timestamp);
+
+	if (ret)
+		KGSL_DRV_ERR(device,
+			"adreno_dispatcher_queue_cmd returned %d\n", ret);
+
+	return ret;
+}
+
+/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+		struct kgsl_cmdbatch *cmdbatch)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct kgsl_ibdesc *ibdesc;
+	unsigned int numibs;
+	unsigned int *link;
 	unsigned int *cmds;
 	unsigned int i;
-	struct adreno_context *drawctxt = NULL;
+	struct kgsl_context *context;
+	struct adreno_context *drawctxt;
 	unsigned int start_index = 0;
+	int flags = KGSL_CMD_FLAGS_NONE;
 	int ret;
 
-	if (device->state & KGSL_STATE_HUNG) {
-		ret = -EBUSY;
-		goto done;
-	}
-
-	if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
-	      context == NULL || ibdesc == 0 || numibs == 0) {
-		ret = -EINVAL;
-		goto done;
-	}
+	context = cmdbatch->context;
 	drawctxt = ADRENO_CONTEXT(context);
 
-	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
-		ret = -EDEADLK;
-		goto done;
-	}
+	ibdesc = cmdbatch->ibdesc;
+	numibs = cmdbatch->ibcount;
+
+	/* process any profiling results that are available into the log_buf */
+	adreno_profile_process_results(device);
 
 	/*When preamble is enabled, the preamble buffer with state restoration
 	commands are stored in the first node of the IB chain. We can skip that
 	if a context switch hasn't occured */
 
-	if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
-		adreno_dev->drawctxt_active == drawctxt)
+	if ((drawctxt->flags & CTXT_FLAGS_PREAMBLE) &&
+		!test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+		(adreno_dev->drawctxt_active == drawctxt))
 		start_index = 1;
 
-	if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
-		if (flags & KGSL_CMD_FLAGS_EOF)
-			drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
-		if (start_index)
-			numibs = 1;
-		else
-			numibs = 0;
+	/*
+	 * In skip mode don't issue the draw IBs but keep all the other
+	 * accoutrements of a submision (including the interrupt) to keep
+	 * the accounting sane. Set start_index and numibs to 0 to just
+	 * generate the start and end markers and skip everything else
+	 */
+
+	if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+		start_index = 0;
+		numibs = 0;
 	}
 
 	cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
@@ -1091,19 +1081,17 @@
 		*cmds++ = ibdesc[0].sizedwords;
 	}
 	for (i = start_index; i < numibs; i++) {
-		if (unlikely(adreno_dev->ib_check_level >= 1 &&
-		    !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
-				ibdesc[i].sizedwords))) {
-			ret = -EINVAL;
-			goto done;
-		}
 
-		if (ibdesc[i].sizedwords == 0) {
-			ret = -EINVAL;
-			goto done;
-		}
+		/*
+		 * Skip 0 sized IBs - these are presumed to have been removed
+		 * from consideration by the FT policy
+		 */
 
-		*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+		if (ibdesc[i].sizedwords == 0)
+			*cmds++ = cp_nop_packet(2);
+		else
+			*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+
 		*cmds++ = ibdesc[i].gpuaddr;
 		*cmds++ = ibdesc[i].sizedwords;
 	}
@@ -1111,253 +1099,47 @@
 	*cmds++ = cp_nop_packet(1);
 	*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
 
-	kgsl_setstate(&device->mmu, context->id,
+	ret = kgsl_setstate(&device->mmu, context->id,
 		      kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
 					device->id));
 
-	adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
-
-	if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
-		if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
-			KGSL_DRV_ERR(device,
-				"Invalid user generated ts <%d:0x%x>, "
-				"less than last issued ts <%d:0x%x>\n",
-				context->id, *timestamp, context->id,
-				drawctxt->timestamp);
-			return -ERANGE;
-		}
-		drawctxt->timestamp = *timestamp;
-	} else
-		drawctxt->timestamp++;
-
-	ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
-					drawctxt,
-					(flags & KGSL_CMD_FLAGS_EOF),
-					&link[0], (cmds - link));
 	if (ret)
 		goto done;
 
-	if (drawctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
-		*timestamp = drawctxt->timestamp;
-	else
-		*timestamp = adreno_dev->ringbuffer.global_ts;
+	ret = adreno_drawctxt_switch(adreno_dev, drawctxt, cmdbatch->flags);
+
+	/*
+	 * In the unlikely event of an error in the drawctxt switch,
+	 * treat it like a hang
+	 */
+	if (ret)
+		goto done;
+
+	if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+		flags = KGSL_CMD_FLAGS_WFI;
+
+	ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
+					drawctxt,
+					flags,
+					&link[0], (cmds - link),
+					cmdbatch->timestamp);
 
 #ifdef CONFIG_MSM_KGSL_CFF_DUMP
+	if (ret)
+		goto done;
 	/*
 	 * insert wait for idle after every IB1
 	 * this is conservative but works reliably and is ok
 	 * even for performance simulations
 	 */
-	adreno_idle(device);
+	ret = adreno_idle(device);
 #endif
 
-	/*
-	 * If context hung and recovered then return error so that the
-	 * application may handle it
-	 */
-	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
-		drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
-		ret = -EPROTO;
-	} else
-		ret = 0;
-
 done:
-	device->pwrctrl.irq_last = 0;
-	kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
-		numibs, *timestamp, flags, ret,
-		drawctxt ? drawctxt->type : 0);
+	kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+		cmdbatch->timestamp, cmdbatch->flags, ret,
+		drawctxt->type);
 
 	kfree(link);
 	return ret;
 }
-
-static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
-				unsigned int rb_rptr)
-{
-	unsigned int temp_rb_rptr = rb_rptr;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int val[2];
-	int i = 0;
-	bool check = false;
-	bool cmd_start = false;
-
-	/* Go till the start of the ib sequence and turn on preamble */
-	while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
-		if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
-			/* decrement i */
-			i = (i + 1) % 2;
-			if (val[i] == cp_nop_packet(4)) {
-				temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
-						temp_rb_rptr, size);
-				kgsl_sharedmem_writel(rb->device,
-					&rb->buffer_desc,
-					temp_rb_rptr, cp_nop_packet(1));
-			}
-			KGSL_FT_INFO(rb->device,
-			"Turned preamble on at offset 0x%x\n",
-			temp_rb_rptr / 4);
-			break;
-		}
-		/* If you reach beginning of next command sequence then exit
-		 * First command encountered is the current one so don't break
-		 * on that. */
-		if (KGSL_CMD_IDENTIFIER == val[i]) {
-			if (cmd_start)
-				break;
-			cmd_start = true;
-		}
-
-		i = (i + 1) % 2;
-		if (1 == i)
-			check = true;
-		temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
-								size);
-	}
-}
-
-void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
-				struct adreno_ft_data *ft_data)
-{
-	struct kgsl_device *device = rb->device;
-	unsigned int rb_rptr = ft_data->start_of_replay_cmds;
-	unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
-	unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
-	unsigned int cmd_start_idx = 0;
-	unsigned int val1 = 0;
-	int copy_rb_contents = 0;
-	unsigned int temp_rb_rptr;
-	struct kgsl_context *k_ctxt;
-	struct adreno_context *a_ctxt;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int *temp_rb_buffer = ft_data->rb_buffer;
-	int *rb_size = &ft_data->rb_size;
-	unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
-	int *bad_rb_size = &ft_data->bad_rb_size;
-	unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
-	int *good_rb_size = &ft_data->good_rb_size;
-
-	/*
-	 * If the start index from where commands need to be copied is invalid
-	 * then no need to save off any commands
-	 */
-	if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
-		return;
-
-	k_ctxt = kgsl_context_get(device, ft_data->context_id);
-
-	if (k_ctxt) {
-		a_ctxt = ADRENO_CONTEXT(k_ctxt);
-		if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
-			_turn_preamble_on_for_ib_seq(rb, rb_rptr);
-		kgsl_context_put(k_ctxt);
-	}
-	k_ctxt = NULL;
-
-	/* Walk the rb from the context switch. Omit any commands
-	 * for an invalid context. */
-	while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
-
-		if (KGSL_CMD_IDENTIFIER == val1) {
-			/* Start is the NOP dword that comes before
-			 * KGSL_CMD_IDENTIFIER */
-			cmd_start_idx = temp_rb_idx - 1;
-			if ((copy_rb_contents) && (good_rb_idx))
-				last_good_cmd_end_idx = good_rb_idx - 1;
-			if ((!copy_rb_contents) && (bad_rb_idx))
-				last_bad_cmd_end_idx = bad_rb_idx - 1;
-		}
-
-		/* check for context switch indicator */
-		if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
-			unsigned int temp_idx, val2;
-			/* increment by 3 to get to the context_id */
-			temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
-					size;
-			kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
-						temp_rb_rptr);
-
-			/* if context switches to a context that did not cause
-			 * hang then start saving the rb contents as those
-			 * commands can be executed */
-			k_ctxt = kgsl_context_get(rb->device, val2);
-
-			if (k_ctxt) {
-				a_ctxt = ADRENO_CONTEXT(k_ctxt);
-
-			/* If we are changing to a good context and were not
-			 * copying commands then copy over commands to the good
-			 * context */
-			if (!copy_rb_contents && ((k_ctxt &&
-				!(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
-				!k_ctxt)) {
-				for (temp_idx = cmd_start_idx;
-					temp_idx < temp_rb_idx;
-					temp_idx++)
-					good_rb_buffer[good_rb_idx++] =
-						temp_rb_buffer[temp_idx];
-				ft_data->last_valid_ctx_id = val2;
-				copy_rb_contents = 1;
-				/* remove the good commands from bad buffer */
-				bad_rb_idx = last_bad_cmd_end_idx;
-			} else if (copy_rb_contents && k_ctxt &&
-				(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
-
-				/* If we are changing back to a bad context
-				 * from good ctxt and were not copying commands
-				 * to bad ctxt then copy over commands to
-				 * the bad context */
-				for (temp_idx = cmd_start_idx;
-					temp_idx < temp_rb_idx;
-					temp_idx++)
-					bad_rb_buffer[bad_rb_idx++] =
-						temp_rb_buffer[temp_idx];
-				/* If we are changing to bad context then
-				 * remove the dwords we copied for this
-				 * sequence from the good buffer */
-				good_rb_idx = last_good_cmd_end_idx;
-				copy_rb_contents = 0;
-			}
-			}
-			kgsl_context_put(k_ctxt);
-		}
-
-		if (copy_rb_contents)
-			good_rb_buffer[good_rb_idx++] = val1;
-		else
-			bad_rb_buffer[bad_rb_idx++] = val1;
-
-		/* Copy both good and bad commands to temp buffer */
-		temp_rb_buffer[temp_rb_idx++] = val1;
-
-		rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
-	}
-	*good_rb_size = good_rb_idx;
-	*bad_rb_size = bad_rb_idx;
-	*rb_size = temp_rb_idx;
-}
-
-void
-adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
-			int num_rb_contents)
-{
-	int i;
-	unsigned int *ringcmds;
-	unsigned int rcmd_gpu;
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
-
-	if (!num_rb_contents)
-		return;
-
-	if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
-		adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_RPTR, 0);
-		BUG_ON(num_rb_contents > rb->buffer_desc.size);
-	}
-	ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
-	rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
-	for (i = 0; i < num_rb_contents; i++)
-		GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, rb_buff[i]);
-	rb->wptr += num_rb_contents;
-	adreno_ringbuffer_submit(rb);
-}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 9634e32..3aa0101 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,7 +27,6 @@
 
 struct kgsl_device;
 struct kgsl_device_private;
-struct adreno_ft_data;
 
 #define GSL_RB_MEMPTRS_SCRATCH_COUNT	 8
 struct kgsl_rbmemptrs {
@@ -99,10 +98,11 @@
 
 int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
 				struct kgsl_context *context,
-				struct kgsl_ibdesc *ibdesc,
-				unsigned int numibs,
-				uint32_t *timestamp,
-				unsigned int flags);
+				struct kgsl_cmdbatch *cmdbatch,
+				uint32_t *timestamp);
+
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+		struct kgsl_cmdbatch *cmdbatch);
 
 int adreno_ringbuffer_init(struct kgsl_device *device);
 
@@ -124,13 +124,6 @@
 
 void kgsl_cp_intrcallback(struct kgsl_device *device);
 
-void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
-				struct adreno_ft_data *ft_data);
-
-void
-adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
-			int num_rb_contents);
-
 unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
 						struct adreno_context *context,
 						unsigned int numcmds);
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/drivers/gpu/msm/adreno_trace.c
similarity index 70%
copy from arch/arm/boot/dts/apq8074-v2-liquid.dts
copy to drivers/gpu/msm/adreno_trace.c
index a0ecb50..607ba8c 100644
--- a/arch/arm/boot/dts/apq8074-v2-liquid.dts
+++ b/drivers/gpu/msm/adreno_trace.c
@@ -8,15 +8,11 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
+ *
  */
 
-/dts-v1/;
+#include "adreno.h"
 
-/include/ "apq8074-v2.dtsi"
-/include/ "msm8974-liquid.dtsi"
-
-/ {
-	model = "Qualcomm APQ 8074v2 LIQUID";
-	compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
-	qcom,msm-id = <184 9 0x20000>;
-};
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "adreno_trace.h"
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
new file mode 100644
index 0000000..6079b61
--- /dev/null
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -0,0 +1,270 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adreno_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(adreno_cmdbatch_queued,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
+	TP_ARGS(cmdbatch, queued),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, queued)
+		__field(unsigned int, flags)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->queued = queued;
+		__entry->flags = cmdbatch->flags;
+	),
+	TP_printk(
+		"ctx=%u ts=%u queued=%u flags=%s",
+			__entry->id, __entry->timestamp, __entry->queued,
+			__entry->flags ? __print_flags(__entry->flags, "|",
+				{ KGSL_CONTEXT_SYNC, "SYNC" },
+				{ KGSL_CONTEXT_END_OF_FRAME, "EOF" })
+				: "none"
+	)
+);
+
+DECLARE_EVENT_CLASS(adreno_cmdbatch_template,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+	TP_ARGS(cmdbatch, inflight),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, inflight)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->inflight = inflight;
+	),
+	TP_printk(
+		"ctx=%u ts=%u inflight=%u",
+			__entry->id, __entry->timestamp,
+			__entry->inflight
+	)
+);
+
+DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_submitted,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+	TP_ARGS(cmdbatch, inflight)
+);
+
+TRACE_EVENT(adreno_cmdbatch_retired,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+	TP_ARGS(cmdbatch, inflight),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, inflight)
+		__field(unsigned int, recovery)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->inflight = inflight;
+		__entry->recovery = cmdbatch->fault_recovery;
+	),
+	TP_printk(
+		"ctx=%u ts=%u inflight=%u recovery=%s",
+			__entry->id, __entry->timestamp,
+			__entry->inflight,
+			__entry->recovery ?
+				__print_flags(__entry->recovery, "|",
+				ADRENO_FT_TYPES) : "none"
+	)
+);
+
+TRACE_EVENT(adreno_cmdbatch_fault,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
+	TP_ARGS(cmdbatch, fault),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, fault)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->fault = fault;
+	),
+	TP_printk(
+		"ctx=%u ts=%u type=%s",
+			__entry->id, __entry->timestamp,
+			__print_symbolic(__entry->fault,
+				{ 0, "none" },
+				{ ADRENO_SOFT_FAULT, "soft" },
+				{ ADRENO_HARD_FAULT, "hard" },
+				{ ADRENO_TIMEOUT_FAULT, "timeout" })
+	)
+);
+
+TRACE_EVENT(adreno_cmdbatch_recovery,
+	TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
+	TP_ARGS(cmdbatch, action),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, timestamp)
+		__field(unsigned int, action)
+	),
+	TP_fast_assign(
+		__entry->id = cmdbatch->context->id;
+		__entry->timestamp = cmdbatch->timestamp;
+		__entry->action = action;
+	),
+	TP_printk(
+		"ctx=%u ts=%u action=%s",
+			__entry->id, __entry->timestamp,
+			__print_symbolic(__entry->action, ADRENO_FT_TYPES)
+	)
+);
+
+DECLARE_EVENT_CLASS(adreno_drawctxt_template,
+	TP_PROTO(struct adreno_context *drawctxt),
+	TP_ARGS(drawctxt),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+	),
+	TP_fast_assign(
+		__entry->id = drawctxt->base.id;
+	),
+	TP_printk("ctx=%u", __entry->id)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_sleep,
+	TP_PROTO(struct adreno_context *drawctxt),
+	TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_wake,
+	TP_PROTO(struct adreno_context *drawctxt),
+	TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, dispatch_queue_context,
+	TP_PROTO(struct adreno_context *drawctxt),
+	TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_invalidate,
+	TP_PROTO(struct adreno_context *drawctxt),
+	TP_ARGS(drawctxt)
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_start,
+	TP_PROTO(unsigned int id, unsigned int ts),
+	TP_ARGS(id, ts),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, ts)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->ts = ts;
+	),
+	TP_printk(
+		"ctx=%u ts=%u",
+			__entry->id, __entry->ts
+	)
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_done,
+	TP_PROTO(unsigned int id, unsigned int ts, int status),
+	TP_ARGS(id, ts, status),
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned int, ts)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->ts = ts;
+		__entry->status = status;
+	),
+	TP_printk(
+		"ctx=%u ts=%u status=%d",
+			__entry->id, __entry->ts, __entry->status
+	)
+);
+
+TRACE_EVENT(adreno_drawctxt_switch,
+	TP_PROTO(struct adreno_context *oldctx,
+		struct adreno_context *newctx,
+		unsigned int flags),
+	TP_ARGS(oldctx, newctx, flags),
+	TP_STRUCT__entry(
+		__field(unsigned int, oldctx)
+		__field(unsigned int, newctx)
+		__field(unsigned int, flags)
+	),
+	TP_fast_assign(
+		__entry->oldctx = oldctx ? oldctx->base.id : 0;
+		__entry->newctx = newctx ? newctx->base.id : 0;
+	),
+	TP_printk(
+		"oldctx=%u newctx=%u flags=%X",
+			__entry->oldctx, __entry->newctx, flags
+	)
+);
+
+TRACE_EVENT(adreno_gpu_fault,
+	TP_PROTO(unsigned int ctx, unsigned int ts,
+		unsigned int status, unsigned int rptr, unsigned int wptr,
+		unsigned int ib1base, unsigned int ib1size,
+		unsigned int ib2base, unsigned int ib2size),
+	TP_ARGS(ctx, ts, status, rptr, wptr, ib1base, ib1size, ib2base,
+		ib2size),
+	TP_STRUCT__entry(
+		__field(unsigned int, ctx)
+		__field(unsigned int, ts)
+		__field(unsigned int, status)
+		__field(unsigned int, rptr)
+		__field(unsigned int, wptr)
+		__field(unsigned int, ib1base)
+		__field(unsigned int, ib1size)
+		__field(unsigned int, ib2base)
+		__field(unsigned int, ib2size)
+	),
+	TP_fast_assign(
+		__entry->ctx = ctx;
+		__entry->ts = ts;
+		__entry->status = status;
+		__entry->rptr = rptr;
+		__entry->wptr = wptr;
+		__entry->ib1base = ib1base;
+		__entry->ib1size = ib1size;
+		__entry->ib2base = ib2base;
+		__entry->ib2size = ib2size;
+	),
+	TP_printk("ctx=%d ts=%d status=%X RB=%X/%X IB1=%X/%X IB2=%X/%X",
+		__entry->ctx, __entry->ts, __entry->status, __entry->wptr,
+		__entry->rptr, __entry->ib1base, __entry->ib1size,
+		__entry->ib2base, __entry->ib2size)
+);
+
+#endif /* _ADRENO_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 1ff989c..35a03de 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -14,6 +14,7 @@
 #include <linux/fb.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/list.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/interrupt.h>
@@ -62,59 +63,10 @@
 static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
 
 /**
- * kgsl_hang_check() - Check for GPU hang
- * data: KGSL device structure
- *
- * This function is called every KGSL_TIMEOUT_PART time when
- * GPU is active to check for hang. If a hang is detected we
- * trigger fault tolerance.
- */
-void kgsl_hang_check(struct work_struct *work)
-{
-	struct kgsl_device *device = container_of(work, struct kgsl_device,
-							hang_check_ws);
-	static unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
-
-	mutex_lock(&device->mutex);
-
-	if (device->state == KGSL_STATE_ACTIVE) {
-
-		/* Check to see if the GPU is hung */
-		if (adreno_ft_detect(device, prev_reg_val))
-			adreno_dump_and_exec_ft(device);
-
-		mod_timer(&device->hang_timer,
-			(jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
-	}
-
-	mutex_unlock(&device->mutex);
-}
-
-/**
- * hang_timer() - Hang timer function
- * data: KGSL device structure
- *
- * This function is called when hang timer expires, in this
- * function we check if GPU is in active state and queue the
- * work on device workqueue to check for the hang. We restart
- * the timer after KGSL_TIMEOUT_PART time.
- */
-void hang_timer(unsigned long data)
-{
-	struct kgsl_device *device = (struct kgsl_device *) data;
-
-	if (device->state == KGSL_STATE_ACTIVE) {
-		/* Have work run in a non-interrupt context. */
-		queue_work(device->work_queue, &device->hang_check_ws);
-	}
-}
-
-/**
  * kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
  * device: KGSL device
  * id: ID of the context submitting the command
- * ibdesc: Pointer to the list of IB descriptors
- * numib: Number of IBs in the list
+ * cmdbatch: Pointer to kgsl_cmdbatch describing these commands
  * timestamp: Timestamp assigned to the command batch
  * flags: Flags sent by the user
  * result: Result of the submission attempt
@@ -124,11 +76,11 @@
  * GPU specific modules.
  */
 void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
-		struct kgsl_ibdesc *ibdesc, int numibs,
+		struct kgsl_cmdbatch *cmdbatch,
 		unsigned int timestamp, unsigned int flags,
 		int result, unsigned int type)
 {
-	trace_kgsl_issueibcmds(device, id, ibdesc, numibs,
+	trace_kgsl_issueibcmds(device, id, cmdbatch,
 		timestamp, flags, result, type);
 }
 EXPORT_SYMBOL(kgsl_trace_issueibcmds);
@@ -499,7 +451,8 @@
 	context->device = dev_priv->device;
 	context->pagetable = dev_priv->process_priv->pagetable;
 	context->dev_priv = dev_priv;
-	context->pid = dev_priv->process_priv->pid;
+	context->pid = task_tgid_nr(current);
+	context->tid = task_pid_nr(current);
 
 	ret = kgsl_sync_timeline_create(context);
 	if (ret)
@@ -529,8 +482,8 @@
 EXPORT_SYMBOL(kgsl_context_init);
 
 /**
- * kgsl_context_detach - Release the "master" context reference
- * @context - The context that will be detached
+ * kgsl_context_detach() - Release the "master" context reference
+ * @context: The context that will be detached
  *
  * This is called when a context becomes unusable, because userspace
  * has requested for it to be destroyed. The context itself may
@@ -539,14 +492,12 @@
  * detached by checking the KGSL_CONTEXT_DETACHED bit in
  * context->priv.
  */
-void
-kgsl_context_detach(struct kgsl_context *context)
+int kgsl_context_detach(struct kgsl_context *context)
 {
-	struct kgsl_device *device;
-	if (context == NULL)
-		return;
+	int ret;
 
-	device = context->device;
+	if (context == NULL)
+		return -EINVAL;
 
 	/*
 	 * Mark the context as detached to keep others from using
@@ -554,19 +505,22 @@
 	 * we don't try to detach twice.
 	 */
 	if (test_and_set_bit(KGSL_CONTEXT_DETACHED, &context->priv))
-		return;
+		return -EINVAL;
 
-	trace_kgsl_context_detach(device, context);
+	trace_kgsl_context_detach(context->device, context);
 
-	device->ftbl->drawctxt_detach(context);
+	ret = context->device->ftbl->drawctxt_detach(context);
+
 	/*
 	 * Cancel events after the device-specific context is
 	 * detached, to avoid possibly freeing memory while
 	 * it is still in use by the GPU.
 	 */
-	kgsl_context_cancel_events(device, context);
+	kgsl_context_cancel_events(context->device, context);
 
 	kgsl_context_put(context);
+
+	return ret;
 }
 
 void
@@ -578,6 +532,8 @@
 
 	trace_kgsl_context_destroy(device, context);
 
+	BUG_ON(!kgsl_context_detached(context));
+
 	write_lock(&device->context_lock);
 	if (context->id != KGSL_CONTEXT_INVALID) {
 		idr_remove(&device->context_idr, context->id);
@@ -648,11 +604,12 @@
 	policy_saved = device->pwrscale.policy;
 	device->pwrscale.policy = NULL;
 	kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
-	/*
-	 * Make sure no user process is waiting for a timestamp
-	 * before supending.
-	 */
-	kgsl_active_count_wait(device);
+
+	/* Tell the device to drain the submission queue */
+	device->ftbl->drain(device);
+
+	/* Wait for the active count to hit zero */
+	kgsl_active_count_wait(device, 0);
 
 	/*
 	 * An interrupt could have snuck in and requested NAP in
@@ -662,13 +619,10 @@
 
 	/* Don't let the timer wake us during suspended sleep. */
 	del_timer_sync(&device->idle_timer);
-	del_timer_sync(&device->hang_timer);
 	switch (device->state) {
 		case KGSL_STATE_INIT:
 			break;
 		case KGSL_STATE_ACTIVE:
-			/* Wait for the device to become idle */
-			device->ftbl->idle(device);
 		case KGSL_STATE_NAP:
 		case KGSL_STATE_SLEEP:
 			/* make sure power is on to stop the device */
@@ -812,7 +766,7 @@
 	list_del(&private->list);
 	mutex_unlock(&kgsl_driver.process_mutex);
 
-	if (private->kobj.ktype)
+	if (private->kobj.state_in_sysfs)
 		kgsl_process_uninit_sysfs(private);
 	if (private->debug_root)
 		debugfs_remove_recursive(private->debug_root);
@@ -926,21 +880,23 @@
 
 		pt_name = task_tgid_nr(current);
 		private->pagetable = kgsl_mmu_getpagetable(mmu, pt_name);
-		if (private->pagetable == NULL) {
-			mutex_unlock(&private->process_private_mutex);
-			kgsl_put_process_private(cur_dev_priv->device,
-						private);
-			return NULL;
-		}
+		if (private->pagetable == NULL)
+			goto error;
 	}
 
-	kgsl_process_init_sysfs(private);
-	kgsl_process_init_debugfs(private);
+	if (kgsl_process_init_sysfs(cur_dev_priv->device, private))
+		goto error;
+	if (kgsl_process_init_debugfs(private))
+		goto error;
 
 done:
 	mutex_unlock(&private->process_private_mutex);
-
 	return private;
+
+error:
+	mutex_unlock(&private->process_private_mutex);
+	kgsl_put_process_private(cur_dev_priv->device, private);
+	return NULL;
 }
 
 int kgsl_close_device(struct kgsl_device *device)
@@ -948,7 +904,13 @@
 	int result = 0;
 	device->open_count--;
 	if (device->open_count == 0) {
+
+		/* Wait for the active count to go to 1 */
+		kgsl_active_count_wait(device, 1);
+
+		/* Fail if the wait times out */
 		BUG_ON(atomic_read(&device->active_cnt) > 1);
+
 		result = device->ftbl->stop(device);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
 		/*
@@ -986,8 +948,16 @@
 		if (context == NULL)
 			break;
 
-		if (context->dev_priv == dev_priv)
+		if (context->dev_priv == dev_priv) {
+			/*
+			 * Hold a reference to the context in case somebody
+			 * tries to put it while we are detaching
+			 */
+
+			_kgsl_context_get(context);
 			kgsl_context_detach(context);
+			kgsl_context_put(context);
+		}
 
 		next = next + 1;
 	}
@@ -1001,6 +971,7 @@
 
 	result = kgsl_close_device(device);
 	mutex_unlock(&device->mutex);
+
 	kfree(dev_priv);
 
 	kgsl_put_process_private(device, private);
@@ -1033,7 +1004,6 @@
 		 * Make sure the gates are open, so they don't block until
 		 * we start suspend or FT.
 		 */
-		complete_all(&device->ft_gate);
 		complete_all(&device->hwaccess_gate);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 		kgsl_active_count_put(device);
@@ -1429,93 +1399,610 @@
 	return result;
 }
 
-static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
-				      unsigned int cmd, void *data)
-{
-	int result = 0;
-	int i = 0;
-	struct kgsl_ringbuffer_issueibcmds *param = data;
-	struct kgsl_ibdesc *ibdesc;
-	struct kgsl_context *context;
+/*
+ * KGSL command batch management
+ * A command batch is a single submission from userland.  The cmdbatch
+ * encapsulates everything about the submission : command buffers, flags and
+ * sync points.
+ *
+ * Sync points are events that need to expire before the
+ * cmdbatch can be queued to the hardware. For each sync point a
+ * kgsl_cmdbatch_sync_event struct is created and added to a list in the
+ * cmdbatch. There can be multiple types of events both internal ones (GPU
+ * events) and external triggers. As the events expire the struct is deleted
+ * from the list. The GPU will submit the command batch as soon as the list
+ * goes empty indicating that all the sync points have been met.
+ */
 
-	context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
-	if (context == NULL) {
-		result = -EINVAL;
+/**
+ * struct kgsl_cmdbatch_sync_event
+ * @type: Syncpoint type
+ * @node: Local list node for the cmdbatch sync point list
+ * @cmdbatch: Pointer to the cmdbatch that owns the sync event
+ * @context: Pointer to the KGSL context that owns the cmdbatch
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ * @lock: Spin lock to protect the sync event list
+ */
+struct kgsl_cmdbatch_sync_event {
+	int type;
+	struct list_head node;
+	struct kgsl_cmdbatch *cmdbatch;
+	struct kgsl_context *context;
+	unsigned int timestamp;
+	struct kgsl_sync_fence_waiter *handle;
+	struct kgsl_device *device;
+	spinlock_t lock;
+};
+
+/**
+ * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
+ * @kref: Pointer to the kref structure for this object
+ *
+ * Actually destroy a command batch object.  Called from kgsl_cmdbatch_put
+ */
+void kgsl_cmdbatch_destroy_object(struct kref *kref)
+{
+	struct kgsl_cmdbatch *cmdbatch = container_of(kref,
+		struct kgsl_cmdbatch, refcount);
+
+	kgsl_context_put(cmdbatch->context);
+	kfree(cmdbatch->ibdesc);
+
+	kfree(cmdbatch);
+}
+EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
+
+/*
+ * a generic function to retire a pending sync event and (possibly)
+ * kick the dispatcher
+ */
+static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
+	struct kgsl_cmdbatch_sync_event *event)
+{
+	int sched = 0;
+
+	spin_lock(&event->cmdbatch->lock);
+	list_del(&event->node);
+	sched = list_empty(&event->cmdbatch->synclist) ? 1 : 0;
+	spin_unlock(&event->cmdbatch->lock);
+
+	/*
+	 * if this is the last event in the list then tell
+	 * the GPU device that the cmdbatch can be submitted
+	 */
+
+	if (sched && device->ftbl->drawctxt_sched)
+		device->ftbl->drawctxt_sched(device, event->cmdbatch->context);
+}
+
+
+/*
+ * This function is called by the GPU event when the sync event timestamp
+ * expires
+ */
+static void kgsl_cmdbatch_sync_func(struct kgsl_device *device, void *priv,
+		u32 id, u32 timestamp, u32 type)
+{
+	struct kgsl_cmdbatch_sync_event *event = priv;
+
+	kgsl_cmdbatch_sync_expire(device, event);
+
+	kgsl_context_put(event->context);
+	kgsl_cmdbatch_put(event->cmdbatch);
+
+	kfree(event);
+}
+
+/**
+ * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
+ * @cmdbatch: Pointer to the command batch object to destroy
+ *
+ * Start the process of destroying a command batch.  Cancel any pending events
+ * and decrement the refcount.
+ */
+void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+{
+	struct kgsl_cmdbatch_sync_event *event, *tmp;
+
+	spin_lock(&cmdbatch->lock);
+
+	/* Delete any pending sync points for this command batch */
+	list_for_each_entry_safe(event, tmp, &cmdbatch->synclist, node) {
+
+		if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP) {
+			/* Cancel the event if it still exists */
+			kgsl_cancel_event(cmdbatch->device, event->context,
+				event->timestamp, kgsl_cmdbatch_sync_func,
+				event);
+		} else if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) {
+			if (kgsl_sync_fence_async_cancel(event->handle)) {
+				list_del(&event->node);
+				kfree(event);
+				kgsl_cmdbatch_put(cmdbatch);
+			}
+		}
+	}
+
+	spin_unlock(&cmdbatch->lock);
+	kgsl_cmdbatch_put(cmdbatch);
+}
+EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+
+/*
+ * A callback that gets registered with kgsl_sync_fence_async_wait and is fired
+ * when a fence is expired
+ */
+static void kgsl_cmdbatch_sync_fence_func(void *priv)
+{
+	struct kgsl_cmdbatch_sync_event *event = priv;
+
+	spin_lock(&event->lock);
+	kgsl_cmdbatch_sync_expire(event->device, event);
+	kgsl_cmdbatch_put(event->cmdbatch);
+	spin_unlock(&event->lock);
+	kfree(event);
+}
+
+/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+ * @device: KGSL device
+ * @cmdbatch: KGSL cmdbatch to add the sync point to
+ * @priv: Private sructure passed by the user
+ *
+ * Add a new fence sync syncpoint to the cmdbatch.
+ */
+static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
+		struct kgsl_cmdbatch *cmdbatch, void *priv)
+{
+	struct kgsl_cmd_syncpoint_fence *sync = priv;
+	struct kgsl_cmdbatch_sync_event *event;
+
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+
+	if (event == NULL)
+		return -ENOMEM;
+
+	kref_get(&cmdbatch->refcount);
+
+	event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
+	event->cmdbatch = cmdbatch;
+	event->device = device;
+	spin_lock_init(&event->lock);
+
+	/*
+	 * Add it to the list first to account for the possiblity that the
+	 * callback will happen immediately after the call to
+	 * kgsl_sync_fence_async_wait
+	 */
+
+	spin_lock(&cmdbatch->lock);
+	list_add(&event->node, &cmdbatch->synclist);
+	spin_unlock(&cmdbatch->lock);
+
+	/*
+	 * There is a distinct race condition that can occur if the fence
+	 * callback is fired before the function has a chance to return.  The
+	 * event struct would be freed before we could write event->handle and
+	 * hilarity ensued.  Protect against this by protecting the call to
+	 * kgsl_sync_fence_async_wait and the kfree in the callback with a lock.
+	 */
+
+	spin_lock(&event->lock);
+
+	event->handle = kgsl_sync_fence_async_wait(sync->fd,
+		kgsl_cmdbatch_sync_fence_func, event);
+
+
+	if (IS_ERR_OR_NULL(event->handle)) {
+		int ret = PTR_ERR(event->handle);
+
+		spin_lock(&cmdbatch->lock);
+		list_del(&event->node);
+		spin_unlock(&cmdbatch->lock);
+
+		kgsl_cmdbatch_put(cmdbatch);
+		spin_unlock(&event->lock);
+		kfree(event);
+
+		return ret;
+	}
+
+	spin_unlock(&event->lock);
+	return 0;
+}
+
+/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+ * @device: KGSL device
+ * @cmdbatch: KGSL cmdbatch to add the sync point to
+ * @priv: Private sructure passed by the user
+ *
+ * Add a new sync point timestamp event to the cmdbatch.
+ */
+static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
+		struct kgsl_cmdbatch *cmdbatch, void *priv)
+{
+	struct kgsl_cmd_syncpoint_timestamp *sync = priv;
+	struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+		sync->context_id);
+	struct kgsl_cmdbatch_sync_event *event;
+	int ret = -EINVAL;
+
+	if (context == NULL)
+		return -EINVAL;
+
+	/* Sanity check - you can't create a sync point on your own context */
+	if (context == cmdbatch->context) {
+		KGSL_DRV_ERR(device,
+			"Cannot create a sync point on your own context %d\n",
+			context->id);
 		goto done;
 	}
 
-	if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
-		if (!param->numibs) {
-			result = -EINVAL;
-			goto done;
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+	if (event == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	kref_get(&cmdbatch->refcount);
+
+	event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
+	event->cmdbatch = cmdbatch;
+	event->context = context;
+	event->timestamp = sync->timestamp;
+
+	spin_lock(&cmdbatch->lock);
+	list_add(&event->node, &cmdbatch->synclist);
+	spin_unlock(&cmdbatch->lock);
+
+	mutex_lock(&device->mutex);
+	kgsl_active_count_get(device);
+	ret = kgsl_add_event(device, context->id, sync->timestamp,
+		kgsl_cmdbatch_sync_func, event, NULL);
+	kgsl_active_count_put(device);
+	mutex_unlock(&device->mutex);
+
+	if (ret) {
+		spin_lock(&cmdbatch->lock);
+		list_del(&event->node);
+		spin_unlock(&cmdbatch->lock);
+
+		kgsl_cmdbatch_put(cmdbatch);
+		kfree(event);
+	}
+
+done:
+	if (ret)
+		kgsl_context_put(context);
+
+	return ret;
+}
+
+/**
+ * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * @device: Pointer to the KGSL device struct for the GPU
+ * @cmdbatch: Pointer to the cmdbatch
+ * @sync: Pointer to the user-specified struct defining the syncpoint
+ *
+ * Create a new sync point in the cmdbatch based on the user specified
+ * parameters
+ */
+static int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
+	struct kgsl_cmdbatch *cmdbatch,
+	struct kgsl_cmd_syncpoint *sync)
+{
+	void *priv;
+	int ret, psize;
+	int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+			void *priv);
+
+	switch (sync->type) {
+	case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
+		psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
+		func = kgsl_cmdbatch_add_sync_timestamp;
+		break;
+	case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+		psize = sizeof(struct kgsl_cmd_syncpoint_fence);
+		func = kgsl_cmdbatch_add_sync_fence;
+		break;
+	default:
+		KGSL_DRV_ERR(device, "Invalid sync type 0x%x\n", sync->type);
+		return -EINVAL;
+	}
+
+	if (sync->size != psize) {
+		KGSL_DRV_ERR(device, "Invalid sync size %d\n", sync->size);
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sync->size, GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(priv, sync->priv, sync->size)) {
+		kfree(priv);
+		return -EFAULT;
+	}
+
+	ret = func(device, cmdbatch, priv);
+	kfree(priv);
+
+	return ret;
+}
+
+/**
+ * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @numibs: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Allocate an new cmdbatch structure and add enough room to store the list of
+ * indirect buffers
+ */
+static struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
+		struct kgsl_context *context, unsigned int flags,
+		unsigned int numibs)
+{
+	struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
+	if (cmdbatch == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	if (!(flags & KGSL_CONTEXT_SYNC)) {
+		cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs,
+			GFP_KERNEL);
+		if (cmdbatch->ibdesc == NULL) {
+			kfree(cmdbatch);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	kref_init(&cmdbatch->refcount);
+	INIT_LIST_HEAD(&cmdbatch->synclist);
+	spin_lock_init(&cmdbatch->lock);
+
+	cmdbatch->device = device;
+	cmdbatch->ibcount = (flags & KGSL_CONTEXT_SYNC) ? 0 : numibs;
+	cmdbatch->context = context;
+	cmdbatch->flags = flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
+
+	/*
+	 * Increase the reference count on the context so it doesn't disappear
+	 * during the lifetime of this command batch
+	 */
+	_kgsl_context_get(context);
+
+	return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_verify() - Perform a quick sanity check on a command batch
+ * @device: Pointer to a KGSL instance that owns the command batch
+ * @pagetable: Pointer to the pagetable for the current process
+ * @cmdbatch: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Do a quick sanity test on the list of indirect buffers in a command batch
+ * verifying that the size and GPU address
+ */
+static bool _kgsl_cmdbatch_verify(struct kgsl_device_private *dev_priv,
+	struct kgsl_cmdbatch *cmdbatch)
+{
+	int i;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+
+	for (i = 0; i < cmdbatch->ibcount; i++) {
+		if (cmdbatch->ibdesc[i].sizedwords == 0) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"invalid size ctx %d ib(%d) %X/%X\n",
+				cmdbatch->context->id, i,
+				cmdbatch->ibdesc[i].gpuaddr,
+				cmdbatch->ibdesc[i].sizedwords);
+
+			return false;
 		}
 
+		if (!kgsl_mmu_gpuaddr_in_range(private->pagetable,
+			cmdbatch->ibdesc[i].gpuaddr)) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Invalid address ctx %d ib(%d) %X/%X\n",
+				cmdbatch->context->id, i,
+				cmdbatch->ibdesc[i].gpuaddr,
+				cmdbatch->ibdesc[i].sizedwords);
+
+			return false;
+		}
+	}
+
+	return true;
+}
+
+/**
+ * _kgsl_cmdbatch_create_legacy() - Create a cmdbatch from a legacy ioctl struct
+ * @device: Pointer to the KGSL device struct for the GPU
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
+ *
+ * Create a command batch from the legacy issueibcmds format.
+ */
+static struct kgsl_cmdbatch *_kgsl_cmdbatch_create_legacy(
+		struct kgsl_device *device,
+		struct kgsl_context *context,
+		struct kgsl_ringbuffer_issueibcmds *param)
+{
+	struct kgsl_cmdbatch *cmdbatch =
+		kgsl_cmdbatch_create(device, context, param->flags, 1);
+
+	if (IS_ERR(cmdbatch))
+		return cmdbatch;
+
+	cmdbatch->ibdesc[0].gpuaddr = param->ibdesc_addr;
+	cmdbatch->ibdesc[0].sizedwords = param->numibs;
+	cmdbatch->ibcount = 1;
+	cmdbatch->flags = param->flags;
+
+	return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_create() - Create a cmdbatch from a ioctl struct
+ * @device: Pointer to the KGSL device struct for the GPU
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @flags: Flags passed in from the user command
+ * @cmdlist: Pointer to the list of commands from the user
+ * @numcmds: Number of commands in the list
+ * @synclist: Pointer to the list of syncpoints from the user
+ * @numsyncs: Number of syncpoints in the list
+ *
+ * Create a command batch from the standard issueibcmds format sent by the user.
+ */
+static struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
+		struct kgsl_context *context,
+		unsigned int flags,
+		unsigned int cmdlist, unsigned int numcmds,
+		unsigned int synclist, unsigned int numsyncs)
+{
+	struct kgsl_cmdbatch *cmdbatch =
+		kgsl_cmdbatch_create(device, context, flags, numcmds);
+	int ret = 0;
+
+	if (IS_ERR(cmdbatch))
+		return cmdbatch;
+
+	if (!(flags & KGSL_CONTEXT_SYNC)) {
+		if (copy_from_user(cmdbatch->ibdesc, (void __user *) cmdlist,
+			sizeof(struct kgsl_ibdesc) * numcmds)) {
+			ret = -EFAULT;
+			goto done;
+		}
+	}
+
+	if (synclist && numsyncs) {
+		struct kgsl_cmd_syncpoint sync;
+		void  __user *uptr = (void __user *) synclist;
+		int i;
+
+		for (i = 0; i < numsyncs; i++) {
+			memset(&sync, 0, sizeof(sync));
+
+			if (copy_from_user(&sync, uptr, sizeof(sync))) {
+				ret = -EFAULT;
+				break;
+			}
+
+			ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+
+			if (ret)
+				break;
+
+			uptr += sizeof(sync);
+		}
+	}
+
+done:
+	if (ret) {
+		kgsl_cmdbatch_destroy(cmdbatch);
+		return ERR_PTR(ret);
+	}
+
+	return cmdbatch;
+}
+
+static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
+				      unsigned int cmd, void *data)
+{
+	struct kgsl_ringbuffer_issueibcmds *param = data;
+	struct kgsl_device *device = dev_priv->device;
+	struct kgsl_context *context;
+	struct kgsl_cmdbatch *cmdbatch;
+	long result = -EINVAL;
+
+	/* The legacy functions don't support synchronization commands */
+	if (param->flags & KGSL_CONTEXT_SYNC)
+		return -EINVAL;
+
+	/* Get the context */
+	context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
+	if (context == NULL)
+		goto done;
+
+	if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
 		/*
-		 * Put a reasonable upper limit on the number of IBs that can be
-		 * submitted
+		 * Do a quick sanity check on the number of IBs in the
+		 * submission
 		 */
 
-		if (param->numibs > 10000) {
-			result = -EINVAL;
+		if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS)
 			goto done;
-		}
 
-		ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
-					GFP_KERNEL);
-		if (!ibdesc) {
-			KGSL_MEM_ERR(dev_priv->device,
-				"kzalloc(%d) failed\n",
-				sizeof(struct kgsl_ibdesc) * param->numibs);
-			result = -ENOMEM;
-			goto done;
-		}
+		cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
+				param->ibdesc_addr, param->numibs, 0, 0);
+	} else
+		cmdbatch = _kgsl_cmdbatch_create_legacy(device, context, param);
 
-		if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
-				sizeof(struct kgsl_ibdesc) * param->numibs)) {
-			result = -EFAULT;
-			KGSL_DRV_ERR(dev_priv->device,
-				"copy_from_user failed\n");
-			goto free_ibdesc;
-		}
-	} else {
-		KGSL_DRV_INFO(dev_priv->device,
-			"Using single IB submission mode for ib submission\n");
-		/* If user space driver is still using the old mode of
-		 * submitting single ib then we need to support that as well */
-		ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
-		if (!ibdesc) {
-			KGSL_MEM_ERR(dev_priv->device,
-				"kzalloc(%d) failed\n",
-				sizeof(struct kgsl_ibdesc));
-			result = -ENOMEM;
-			goto done;
-		}
-		ibdesc[0].gpuaddr = param->ibdesc_addr;
-		ibdesc[0].sizedwords = param->numibs;
-		param->numibs = 1;
+	if (IS_ERR(cmdbatch)) {
+		result = PTR_ERR(cmdbatch);
+		goto done;
 	}
 
-	for (i = 0; i < param->numibs; i++) {
-		struct kgsl_pagetable *pt = dev_priv->process_priv->pagetable;
+	/* Run basic sanity checking on the command */
+	if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch))
+		goto free_cmdbatch;
 
-		if (!kgsl_mmu_gpuaddr_in_range(pt, ibdesc[i].gpuaddr)) {
-			result = -ERANGE;
-			KGSL_DRV_ERR(dev_priv->device,
-				     "invalid ib base GPU virtual addr %x\n",
-				     ibdesc[i].gpuaddr);
-			goto free_ibdesc;
-		}
+	result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+		cmdbatch, &param->timestamp);
+
+free_cmdbatch:
+	if (result)
+		kgsl_cmdbatch_destroy(cmdbatch);
+
+done:
+	kgsl_context_put(context);
+	return result;
+}
+
+static long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
+				      unsigned int cmd, void *data)
+{
+	struct kgsl_submit_commands *param = data;
+	struct kgsl_device *device = dev_priv->device;
+	struct kgsl_context *context;
+	struct kgsl_cmdbatch *cmdbatch;
+
+	long result = -EINVAL;
+
+	/* The number of IBs are completely ignored for sync commands */
+	if (!(param->flags & KGSL_CONTEXT_SYNC)) {
+		if (param->numcmds == 0 || param->numcmds > KGSL_MAX_NUMIBS)
+			return -EINVAL;
+	} else if (param->numcmds != 0) {
+		KGSL_DEV_ERR_ONCE(device,
+			"Commands specified with the SYNC flag.  They will be ignored\n");
 	}
 
-	result = dev_priv->device->ftbl->issueibcmds(dev_priv,
-					     context,
-					     ibdesc,
-					     param->numibs,
-					     &param->timestamp,
-					     param->flags);
+	context = kgsl_context_get_owner(dev_priv, param->context_id);
+	if (context == NULL)
+		return -EINVAL;
 
-free_ibdesc:
-	kfree(ibdesc);
+	cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
+		(unsigned int) param->cmdlist, param->numcmds,
+		(unsigned int) param->synclist, param->numsyncs);
+
+	if (IS_ERR(cmdbatch)) {
+		result = PTR_ERR(cmdbatch);
+		goto done;
+	}
+
+	/* Run basic sanity checking on the command */
+	if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch))
+		goto free_cmdbatch;
+
+	result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+		cmdbatch, &param->timestamp);
+
+free_cmdbatch:
+	if (result)
+		kgsl_cmdbatch_destroy(cmdbatch);
+
 done:
 	kgsl_context_put(context);
 	return result;
@@ -1656,14 +2143,11 @@
 {
 	struct kgsl_drawctxt_destroy *param = data;
 	struct kgsl_context *context;
-	long result = -EINVAL;
+	long result;
 
 	context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
 
-	if (context) {
-		kgsl_context_detach(context);
-		result = 0;
-	}
+	result = kgsl_context_detach(context);
 
 	kgsl_context_put(context);
 	return result;
@@ -2770,8 +3254,9 @@
 			kgsl_ioctl_device_waittimestamp_ctxtid,
 			KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
-			kgsl_ioctl_rb_issueibcmds,
-			KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+			kgsl_ioctl_rb_issueibcmds, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS,
+			kgsl_ioctl_submit_commands, 0),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
 			kgsl_ioctl_cmdstream_readtimestamp,
 			KGSL_IOCTL_LOCK),
@@ -3453,7 +3938,6 @@
 
 
 	setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
-	setup_timer(&device->hang_timer, hang_timer, (unsigned long) device);
 	status = kgsl_create_device_workqueue(device);
 	if (status)
 		goto error_pwrctrl_close;
@@ -3509,11 +3993,10 @@
 	/* For a manual dump, make sure that the system is idle */
 
 	if (manual) {
-		kgsl_active_count_wait(device);
+		kgsl_active_count_wait(device, 0);
 
 		if (device->state == KGSL_STATE_ACTIVE)
 			kgsl_idle(device);
-
 	}
 
 	if (device->pm_dump_enable) {
@@ -3527,13 +4010,12 @@
 			pwr->power_flags, pwr->active_pwrlevel);
 
 		KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
-			pwr->interval_timeout);
+				pwr->interval_timeout);
 
 	}
 
 	/* Disable the idle timer so we don't get interrupted */
 	del_timer_sync(&device->idle_timer);
-	del_timer_sync(&device->hang_timer);
 
 	/* Force on the clocks */
 	kgsl_pwrctrl_wake(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 8d390a9..2e9d52e 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -78,6 +78,8 @@
 
 #define KGSL_MEMFREE_HIST_SIZE	((int)(PAGE_SIZE * 2))
 
+#define KGSL_MAX_NUMIBS 100000
+
 struct kgsl_memfree_hist_elem {
 	unsigned int pid;
 	unsigned int gpuaddr;
@@ -141,6 +143,7 @@
 
 struct kgsl_pagetable;
 struct kgsl_memdesc;
+struct kgsl_cmdbatch;
 
 struct kgsl_memdesc_ops {
 	int (*vmflags)(struct kgsl_memdesc *);
@@ -205,7 +208,6 @@
 #define MMU_CONFIG 1
 #endif
 
-void kgsl_hang_check(struct work_struct *work);
 void kgsl_mem_entry_destroy(struct kref *kref);
 int kgsl_postmortem_dump(struct kgsl_device *device, int manual);
 
@@ -237,7 +239,7 @@
 		unsigned int value);
 
 void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
-		struct kgsl_ibdesc *ibdesc, int numibs,
+		struct kgsl_cmdbatch *cmdbatch,
 		unsigned int timestamp, unsigned int flags,
 		int result, unsigned int type);
 
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 2a77632..110264b 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -123,7 +123,6 @@
 KGSL_DEBUGFS_LOG(ctxt_log);
 KGSL_DEBUGFS_LOG(mem_log);
 KGSL_DEBUGFS_LOG(pwr_log);
-KGSL_DEBUGFS_LOG(ft_log);
 
 static int memfree_hist_print(struct seq_file *s, void *unused)
 {
@@ -185,7 +184,6 @@
 	device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
 	device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
 	device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
-	device->ft_log = KGSL_LOG_LEVEL_DEFAULT;
 
 	debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
 			    &cmd_log_fops);
@@ -199,8 +197,6 @@
 				&pwr_log_fops);
 	debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
 				&memfree_hist_fops);
-	debugfs_create_file("log_level_ft", 0644, device->d_debugfs, device,
-				&ft_log_fops);
 
 	/* Create postmortem dump control files */
 
@@ -323,16 +319,53 @@
 	.release = single_release,
 };
 
-void
+
+/**
+ * kgsl_process_init_debugfs() - Initialize debugfs for a process
+ * @private: Pointer to process private structure created for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_debugfs() is called at the time of creating the
+ * process struct when a process opens kgsl device for the first time.
+ * The function creates the debugfs files for the process. If debugfs is
+ * disabled in the kernel, we ignore that error and return as successful.
+ */
+int
 kgsl_process_init_debugfs(struct kgsl_process_private *private)
 {
 	unsigned char name[16];
+	int ret = 0;
+	struct dentry *dentry;
 
 	snprintf(name, sizeof(name), "%d", private->pid);
 
 	private->debug_root = debugfs_create_dir(name, proc_d_debugfs);
-	debugfs_create_file("mem", 0400, private->debug_root, private,
+
+	if (!private->debug_root)
+		return -EINVAL;
+
+	/*
+	 * debugfs_create_dir() and debugfs_create_file() both
+	 * return -ENODEV if debugfs is disabled in the kernel.
+	 * We make a distinction between these two functions
+	 * failing and debugfs being disabled in the kernel.
+	 * In the first case, we abort process private struct
+	 * creation, in the second we continue without any changes.
+	 * So if debugfs is disabled in kernel, return as
+	 * success.
+	 */
+	dentry = debugfs_create_file("mem", 0400, private->debug_root, private,
 			    &process_mem_fops);
+
+	if (IS_ERR(dentry)) {
+		ret = PTR_ERR(dentry);
+
+		if (ret == -ENODEV)
+			ret = 0;
+	}
+
+	return ret;
 }
 
 void kgsl_core_debugfs_init(void)
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
index ae5601f..b2f137c 100644
--- a/drivers/gpu/msm/kgsl_debugfs.h
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -21,7 +21,7 @@
 void kgsl_core_debugfs_init(void);
 void kgsl_core_debugfs_close(void);
 
-void kgsl_device_debugfs_init(struct kgsl_device *device);
+int kgsl_device_debugfs_init(struct kgsl_device *device);
 
 extern struct dentry *kgsl_debugfs_dir;
 static inline struct dentry *kgsl_get_debugfs_dir(void)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 9629d3f..cd9c4f7 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -13,6 +13,7 @@
 #ifndef __KGSL_DEVICE_H
 #define __KGSL_DEVICE_H
 
+#include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/pm_qos.h>
 #include <linux/sched.h>
@@ -76,6 +77,7 @@
 struct kgsl_context;
 struct kgsl_power_stats;
 struct kgsl_event;
+struct kgsl_cmdbatch;
 
 struct kgsl_functable {
 	/* Mandatory functions - these functions must be implemented
@@ -87,7 +89,7 @@
 	void (*regwrite) (struct kgsl_device *device,
 		unsigned int offsetwords, unsigned int value);
 	int (*idle) (struct kgsl_device *device);
-	unsigned int (*isidle) (struct kgsl_device *device);
+	bool (*isidle) (struct kgsl_device *device);
 	int (*suspend_context) (struct kgsl_device *device);
 	int (*init) (struct kgsl_device *device);
 	int (*start) (struct kgsl_device *device);
@@ -101,9 +103,8 @@
 	unsigned int (*readtimestamp) (struct kgsl_device *device,
 		struct kgsl_context *context, enum kgsl_timestamp_type type);
 	int (*issueibcmds) (struct kgsl_device_private *dev_priv,
-		struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
-		unsigned int sizedwords, uint32_t *timestamp,
-		unsigned int flags);
+		struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
+		uint32_t *timestamps);
 	int (*setup_pt)(struct kgsl_device *device,
 		struct kgsl_pagetable *pagetable);
 	void (*cleanup_pt)(struct kgsl_device *device,
@@ -115,14 +116,15 @@
 	void * (*snapshot)(struct kgsl_device *device, void *snapshot,
 		int *remain, int hang);
 	irqreturn_t (*irq_handler)(struct kgsl_device *device);
+	int (*drain)(struct kgsl_device *device);
 	/* Optional functions - these functions are not mandatory.  The
 	   driver will check that the function pointer is not NULL before
 	   calling the hook */
-	void (*setstate) (struct kgsl_device *device, unsigned int context_id,
+	int (*setstate) (struct kgsl_device *device, unsigned int context_id,
 			uint32_t flags);
 	struct kgsl_context *(*drawctxt_create) (struct kgsl_device_private *,
 						uint32_t *flags);
-	void (*drawctxt_detach) (struct kgsl_context *context);
+	int (*drawctxt_detach) (struct kgsl_context *context);
 	void (*drawctxt_destroy) (struct kgsl_context *context);
 	long (*ioctl) (struct kgsl_device_private *dev_priv,
 		unsigned int cmd, void *data);
@@ -132,6 +134,8 @@
 	int (*postmortem_dump) (struct kgsl_device *device, int manual);
 	int (*next_event)(struct kgsl_device *device,
 		struct kgsl_event *event);
+	void (*drawctxt_sched)(struct kgsl_device *device,
+		struct kgsl_context *context);
 };
 
 /* MH register values */
@@ -155,6 +159,56 @@
 	unsigned int created;
 };
 
+/**
+ * struct kgsl_cmdbatch - KGSl command descriptor
+ * @device: KGSL GPU device that the command was created for
+ * @context: KGSL context that created the command
+ * @timestamp: Timestamp assigned to the command
+ * @flags: flags
+ * @priv: Internal flags
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
+ * @ibcount: Number of IBs in the command list
+ * @ibdesc: Pointer to the list of IBs
+ * @expires: Point in time when the cmdbatch is considered to be hung
+ * @invalid:  non-zero if the dispatcher determines the command and the owning
+ * context should be invalidated
+ * @refcount: kref structure to maintain the reference count
+ * @synclist: List of context/timestamp tuples to wait for before issuing
+ *
+ * This struture defines an atomic batch of command buffers issued from
+ * userspace.
+ */
+struct kgsl_cmdbatch {
+	struct kgsl_device *device;
+	struct kgsl_context *context;
+	spinlock_t lock;
+	uint32_t timestamp;
+	uint32_t flags;
+	unsigned long priv;
+	unsigned long fault_policy;
+	unsigned long fault_recovery;
+	uint32_t ibcount;
+	struct kgsl_ibdesc *ibdesc;
+	unsigned long expires;
+	int invalid;
+	struct kref refcount;
+	struct list_head synclist;
+};
+
+/**
+ * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
+ * @CMDBATCH_FLAG_SKIP - skip the entire command batch
+ * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
+ * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
+ */
+
+enum kgsl_cmdbatch_priv {
+	CMDBATCH_FLAG_SKIP = 0,
+	CMDBATCH_FLAG_FORCE_PREAMBLE,
+	CMDBATCH_FLAG_WFI,
+};
 
 struct kgsl_device {
 	struct device *dev;
@@ -190,9 +244,7 @@
 	struct completion hwaccess_gate;
 	const struct kgsl_functable *ftbl;
 	struct work_struct idle_check_ws;
-	struct work_struct hang_check_ws;
 	struct timer_list idle_timer;
-	struct timer_list hang_timer;
 	struct kgsl_pwrctrl pwrctrl;
 	int open_count;
 
@@ -201,12 +253,11 @@
 	uint32_t requested_state;
 
 	atomic_t active_cnt;
-	struct completion suspend_gate;
 
 	wait_queue_head_t wait_queue;
+	wait_queue_head_t active_cnt_wq;
 	struct workqueue_struct *work_queue;
 	struct device *parentdev;
-	struct completion ft_gate;
 	struct dentry *d_debugfs;
 	struct idr context_idr;
 	rwlock_t context_lock;
@@ -233,13 +284,13 @@
 	int drv_log;
 	int mem_log;
 	int pwr_log;
-	int ft_log;
 	int pm_dump_enable;
 	struct kgsl_pwrscale pwrscale;
 	struct kobject pwrscale_kobj;
 	struct work_struct ts_expired_ws;
 	struct list_head events;
 	struct list_head events_pending_list;
+	unsigned int events_last_timestamp;
 	s64 on_time;
 
 	/* Postmortem Control switches */
@@ -254,18 +305,15 @@
 
 #define KGSL_DEVICE_COMMON_INIT(_dev) \
 	.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
-	.suspend_gate = COMPLETION_INITIALIZER((_dev).suspend_gate),\
-	.ft_gate = COMPLETION_INITIALIZER((_dev).ft_gate),\
 	.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
 			kgsl_idle_check),\
-	.hang_check_ws = __WORK_INITIALIZER((_dev).hang_check_ws,\
-			kgsl_hang_check),\
 	.ts_expired_ws  = __WORK_INITIALIZER((_dev).ts_expired_ws,\
 			kgsl_process_events),\
 	.context_idr = IDR_INIT((_dev).context_idr),\
 	.events = LIST_HEAD_INIT((_dev).events),\
 	.events_pending_list = LIST_HEAD_INIT((_dev).events_pending_list), \
 	.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
+	.active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
 	.mutex = __MUTEX_INITIALIZER((_dev).mutex),\
 	.state = KGSL_STATE_INIT,\
 	.ver_major = DRIVER_VERSION_MAJOR,\
@@ -293,6 +341,7 @@
  * @events: list head of pending events for this context
  * @events_list: list node for the list of all contexts that have pending events
  * @pid: process that owns this context.
+ * @tid: task that created this context.
  * @pagefault: flag set if this context caused a pagefault.
  * @pagefault_ts: global timestamp of the pagefault, if KGSL_CONTEXT_PAGEFAULT
  * is set.
@@ -301,6 +350,7 @@
 	struct kref refcount;
 	uint32_t id;
 	pid_t pid;
+	pid_t tid;
 	struct kgsl_device_private *dev_priv;
 	unsigned long priv;
 	struct kgsl_device *device;
@@ -351,6 +401,9 @@
 int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
 	kgsl_event_func func, void *priv, void *owner);
 
+void kgsl_cancel_event(struct kgsl_device *device, struct kgsl_context *context,
+		unsigned int timestamp, kgsl_event_func func, void *priv);
+
 static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
 	unsigned int type, size_t size)
 {
@@ -438,8 +491,6 @@
 	return 0;
 }
 
-
-
 int kgsl_check_timestamp(struct kgsl_device *device,
 		struct kgsl_context *context, unsigned int timestamp);
 
@@ -595,4 +646,40 @@
 {
 	kgsl_signal_event(device, context, timestamp, KGSL_EVENT_CANCELLED);
 }
+
+void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
+
+void kgsl_cmdbatch_destroy_object(struct kref *kref);
+
+/**
+ * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
+ * @cmdbatch: Pointer to the command batch object
+ */
+static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+{
+	if (cmdbatch)
+		kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+}
+
+/**
+ * kgsl_cmdbatch_sync_pending() - return true if the cmdbatch is waiting
+ * @cmdbatch: Pointer to the command batch object to check
+ *
+ * Return non-zero if the specified command batch is still waiting for sync
+ * point dependencies to be satisfied
+ */
+static inline int kgsl_cmdbatch_sync_pending(struct kgsl_cmdbatch *cmdbatch)
+{
+	int ret;
+
+	if (cmdbatch == NULL)
+		return 0;
+
+	spin_lock(&cmdbatch->lock);
+	ret = list_empty(&cmdbatch->synclist) ? 0 : 1;
+	spin_unlock(&cmdbatch->lock);
+
+	return ret;
+}
+
 #endif  /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index e4f502a..c7ac0ad 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -48,7 +48,8 @@
 {
 	int id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
 
-	trace_kgsl_fire_event(id, timestamp, type, jiffies - event->created);
+	trace_kgsl_fire_event(id, timestamp, type, jiffies - event->created,
+		event->func);
 
 	if (event->func)
 		event->func(device, event->priv, id, timestamp, type);
@@ -235,7 +236,7 @@
 	 */
 
 	if (timestamp_cmp(cur_ts, ts) >= 0) {
-		trace_kgsl_fire_event(id, cur_ts, ts, 0);
+		trace_kgsl_fire_event(id, cur_ts, ts, 0, func);
 
 		func(device, priv, id, ts, KGSL_EVENT_TIMESTAMP_RETIRED);
 		kgsl_context_put(context);
@@ -266,7 +267,7 @@
 	event->owner = owner;
 	event->created = jiffies;
 
-	trace_kgsl_register_event(id, ts);
+	trace_kgsl_register_event(id, ts, func);
 
 	/* Add the event to either the owning context or the global list */
 
@@ -333,7 +334,11 @@
 		void *priv)
 {
 	struct kgsl_event *event;
-	struct list_head *head = _get_list_head(device, context);
+	struct list_head *head;
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	head = _get_list_head(device, context);
 
 	event = _find_event(device, head, timestamp, func, priv);
 
@@ -400,10 +405,19 @@
 	struct kgsl_context *context, *tmp;
 	uint32_t timestamp;
 
+	/*
+	 * Bail unless the global timestamp has advanced.  We can safely do this
+	 * outside of the mutex for speed
+	 */
+
+	timestamp = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+	if (timestamp == device->events_last_timestamp)
+		return;
+
 	mutex_lock(&device->mutex);
 
-	/* Process expired global events */
-	timestamp = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+	device->events_last_timestamp = timestamp;
+
 	_retire_events(device, &device->events, timestamp);
 	_mark_next_event(device, &device->events);
 
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 68052b1..2634e4f 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -482,15 +482,17 @@
 	return NULL;
 }
 
-static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
+static int kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
 					uint32_t flags)
 {
 	struct kgsl_gpummu_pt *gpummu_pt;
 	if (!kgsl_mmu_enabled())
-		return;
+		return 0;
 
 	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
-		kgsl_idle(mmu->device);
+		int ret = kgsl_idle(mmu->device);
+		if (ret)
+			return ret;
 		gpummu_pt = mmu->hwpagetable->priv;
 		kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
 			gpummu_pt->base.gpuaddr);
@@ -500,12 +502,16 @@
 		/* Invalidate all and tc */
 		kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE,  0x00000003);
 	}
+
+	return 0;
 }
 
-static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
+static int kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
 				struct kgsl_pagetable *pagetable,
 				unsigned int context_id)
 {
+	int ret = 0;
+
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
 		/* page table not current, then setup mmu to use new
 		 *  specified page table
@@ -518,10 +524,13 @@
 			kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
 
 			/* call device specific set page table */
-			kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
+			ret = kgsl_setstate(mmu, context_id,
+				KGSL_MMUFLAGS_TLBFLUSH |
 				KGSL_MMUFLAGS_PTUPDATE);
 		}
 	}
+
+	return ret;
 }
 
 static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
@@ -563,6 +572,7 @@
 
 	struct kgsl_device *device = mmu->device;
 	struct kgsl_gpummu_pt *gpummu_pt;
+	int ret;
 
 	if (mmu->flags & KGSL_FLAGS_STARTED)
 		return 0;
@@ -574,9 +584,6 @@
 	/* setup MMU and sub-client behavior */
 	kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
 
-	/* idle device */
-	kgsl_idle(device);
-
 	/* enable axi interrupts */
 	kgsl_regwrite(device, MH_INTERRUPT_MASK,
 			GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
@@ -607,10 +614,12 @@
 	kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
 		      (KGSL_PAGETABLE_BASE |
 		      (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
-	kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
-	mmu->flags |= KGSL_FLAGS_STARTED;
 
-	return 0;
+	ret = kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
+	if (!ret)
+		mmu->flags |= KGSL_FLAGS_STARTED;
+
+	return ret;
 }
 
 static int
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index ecda5a7..e296784 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -423,8 +423,12 @@
 	 * the GPU and trigger a snapshot. To stall the transaction return
 	 * EBUSY error.
 	 */
-	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) {
+		/* turn off GPU IRQ so we don't get faults from it too */
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+		adreno_dispatcher_irq_fault(device);
 		ret = -EBUSY;
+	}
 done:
 	return ret;
 }
@@ -1205,10 +1209,12 @@
 	return 0;
 }
 
-static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
+static int kgsl_iommu_setstate(struct kgsl_mmu *mmu,
 				struct kgsl_pagetable *pagetable,
 				unsigned int context_id)
 {
+	int ret = 0;
+
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
 		/* page table not current, then setup mmu to use new
 		 *  specified page table
@@ -1219,10 +1225,12 @@
 			flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
 							mmu->device->id) |
 							KGSL_MMUFLAGS_TLBFLUSH;
-			kgsl_setstate(mmu, context_id,
+			ret = kgsl_setstate(mmu, context_id,
 				KGSL_MMUFLAGS_PTUPDATE | flags);
 		}
 	}
+
+	return ret;
 }
 
 /*
@@ -1892,31 +1900,40 @@
  * cpu
  * Return - void
  */
-static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
 					uint32_t flags)
 {
 	struct kgsl_iommu *iommu = mmu->priv;
 	int temp;
 	int i;
+	int ret = 0;
 	phys_addr_t pt_base = kgsl_iommu_get_pt_base_addr(mmu,
 						mmu->hwpagetable);
 	phys_addr_t pt_val;
 
-	if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) {
+	ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+
+	if (ret) {
 		KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
-		return;
+		return ret;
 	}
 
 	/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
-	if (msm_soc_version_supports_iommu_v0())
-		kgsl_idle(mmu->device);
+	if (msm_soc_version_supports_iommu_v0()) {
+		ret = kgsl_idle(mmu->device);
+		if (ret)
+			return ret;
+	}
 
 	/* Acquire GPU-CPU sync Lock here */
 	_iommu_lock();
 
 	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
-		if (!msm_soc_version_supports_iommu_v0())
-			kgsl_idle(mmu->device);
+		if (!msm_soc_version_supports_iommu_v0()) {
+			ret = kgsl_idle(mmu->device);
+			if (ret)
+				goto unlock;
+		}
 		for (i = 0; i < iommu->unit_count; i++) {
 			/* get the lsb value which should not change when
 			 * changing ttbr0 */
@@ -1977,12 +1994,13 @@
 			}
 		}
 	}
-
+unlock:
 	/* Release GPU-CPU sync Lock here */
 	_iommu_unlock();
 
 	/* Disable smmu clock */
 	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+	return ret;
 }
 
 /*
@@ -2039,6 +2057,7 @@
 	.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
 	.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
 	.mmu_enable_clk = kgsl_iommu_enable_clk,
+	.mmu_disable_clk = kgsl_iommu_disable_clk,
 	.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
 	.mmu_get_default_ttbr0 = kgsl_iommu_get_default_ttbr0,
 	.mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index a7832e4..3a32953 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -103,15 +103,6 @@
 #define KGSL_PWR_CRIT(_dev, fmt, args...) \
 KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
 
-#define KGSL_FT_INFO(_dev, fmt, args...) \
-KGSL_LOG_INFO(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_WARN(_dev, fmt, args...) \
-KGSL_LOG_WARN(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_ERR(_dev, fmt, args...) \
-KGSL_LOG_ERR(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_CRIT(_dev, fmt, args...) \
-KGSL_LOG_CRIT(_dev->dev, _dev->ft_log, fmt, ##args)
-
 /* Core error messages - these are for core KGSL functions that have
    no device associated with them (such as memory) */
 
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 952019f..6635a7c 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -566,7 +566,7 @@
 }
 EXPORT_SYMBOL(kgsl_mmu_putpagetable);
 
-void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
 			uint32_t flags)
 {
 	struct kgsl_device *device = mmu->device;
@@ -574,14 +574,16 @@
 
 	if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
 		&& !adreno_is_a2xx(adreno_dev))
-		return;
+		return 0;
 
 	if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
-		return;
+		return 0;
 	else if (device->ftbl->setstate)
-		device->ftbl->setstate(device, context_id, flags);
+		return device->ftbl->setstate(device, context_id, flags);
 	else if (mmu->mmu_ops->mmu_device_setstate)
-		mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+		return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+	return 0;
 }
 EXPORT_SYMBOL(kgsl_setstate);
 
@@ -590,7 +592,6 @@
 	struct kgsl_mh *mh = &device->mh;
 	/* force mmu off to for now*/
 	kgsl_regwrite(device, MH_MMU_CONFIG, 0);
-	kgsl_idle(device);
 
 	/* define physical memory range accessible by the core */
 	kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index faba81e..a30ee3f 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -133,10 +133,10 @@
 	int (*mmu_close) (struct kgsl_mmu *mmu);
 	int (*mmu_start) (struct kgsl_mmu *mmu);
 	void (*mmu_stop) (struct kgsl_mmu *mmu);
-	void (*mmu_setstate) (struct kgsl_mmu *mmu,
+	int (*mmu_setstate) (struct kgsl_mmu *mmu,
 		struct kgsl_pagetable *pagetable,
 		unsigned int context_id);
-	void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
+	int (*mmu_device_setstate) (struct kgsl_mmu *mmu,
 					uint32_t flags);
 	void (*mmu_pagefault) (struct kgsl_mmu *mmu);
 	phys_addr_t (*mmu_get_current_ptbase)
@@ -147,6 +147,8 @@
 		(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
 	int (*mmu_enable_clk)
 		(struct kgsl_mmu *mmu, int ctx_id);
+	void (*mmu_disable_clk)
+		(struct kgsl_mmu *mmu);
 	phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
 				unsigned int unit_id,
 				enum kgsl_iommu_context_id ctx_id);
@@ -231,7 +233,7 @@
 int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
 		 struct kgsl_memdesc *memdesc);
 unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
-void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
 			uint32_t flags);
 int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
 					phys_addr_t pt_base);
@@ -260,19 +262,23 @@
 		return 0;
 }
 
-static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
+static inline int kgsl_mmu_setstate(struct kgsl_mmu *mmu,
 			struct kgsl_pagetable *pagetable,
 			unsigned int context_id)
 {
 	if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
-		mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+		return mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+
+	return 0;
 }
 
-static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
+static inline int kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
 						uint32_t flags)
 {
 	if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
-		mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+		return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+	return 0;
 }
 
 static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
@@ -320,6 +326,12 @@
 		return 0;
 }
 
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+{
+	if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
+		mmu->mmu_ops->mmu_disable_clk(mmu);
+}
+
 static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
 						unsigned int ts, bool ts_valid)
 {
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 5479ae9..07131f7 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1215,9 +1215,6 @@
 		} else {
 			device->pwrctrl.irq_last = 0;
 		}
-	} else if (device->state & (KGSL_STATE_HUNG |
-					KGSL_STATE_DUMP_AND_FT)) {
-		kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
 	}
 
 	mutex_unlock(&device->mutex);
@@ -1273,7 +1270,6 @@
 			kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
 			return -EBUSY;
 		}
-		del_timer_sync(&device->hang_timer);
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 		kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
@@ -1343,7 +1339,6 @@
 	case KGSL_STATE_NAP:
 	case KGSL_STATE_SLEEP:
 		del_timer_sync(&device->idle_timer);
-		del_timer_sync(&device->hang_timer);
 		/* make sure power is on to stop the device*/
 		kgsl_pwrctrl_enable(device);
 		device->ftbl->suspend_context(device);
@@ -1435,8 +1430,6 @@
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
 
-		mod_timer(&device->hang_timer,
-			(jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
 		pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
 				device->pwrctrl.pm_qos_latency);
 	case KGSL_STATE_ACTIVE:
@@ -1504,10 +1497,6 @@
 		return "SLEEP";
 	case KGSL_STATE_SUSPEND:
 		return "SUSPEND";
-	case KGSL_STATE_HUNG:
-		return "HUNG";
-	case KGSL_STATE_DUMP_AND_FT:
-		return "DNR";
 	case KGSL_STATE_SLUMBER:
 		return "SLUMBER";
 	default:
@@ -1539,7 +1528,6 @@
 		(device->state != KGSL_STATE_ACTIVE)) {
 		mutex_unlock(&device->mutex);
 		wait_for_completion(&device->hwaccess_gate);
-		wait_for_completion(&device->ft_gate);
 		mutex_lock(&device->mutex);
 
 		/* Stop the idle timer */
@@ -1595,8 +1583,6 @@
 	kgsl_pwrscale_idle(device);
 
 	if (atomic_dec_and_test(&device->active_cnt)) {
-		INIT_COMPLETION(device->suspend_gate);
-
 		if (device->state == KGSL_STATE_ACTIVE &&
 			device->requested_state == KGSL_STATE_NONE) {
 			kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
@@ -1605,29 +1591,41 @@
 
 		mod_timer(&device->idle_timer,
 			jiffies + device->pwrctrl.interval_timeout);
-
-		complete(&device->suspend_gate);
 	}
 
 	trace_kgsl_active_count(device,
 		(unsigned long) __builtin_return_address(0));
+
+	wake_up(&device->active_cnt_wq);
 }
 EXPORT_SYMBOL(kgsl_active_count_put);
 
+static int _check_active_count(struct kgsl_device *device, int count)
+{
+	/* Return 0 if the active count is greater than the desired value */
+	return atomic_read(&device->active_cnt) > count ? 0 : 1;
+}
+
 /**
  * kgsl_active_count_wait() - Wait for activity to finish.
  * @device: Pointer to a KGSL device
+ * @count: Active count value to wait for
  *
- * Block until all active_cnt users put() their reference.
+ * Block until the active_cnt value hits the desired value
  */
-void kgsl_active_count_wait(struct kgsl_device *device)
+int kgsl_active_count_wait(struct kgsl_device *device, int count)
 {
+	int ret = 0;
+
 	BUG_ON(!mutex_is_locked(&device->mutex));
 
-	if (atomic_read(&device->active_cnt) != 0) {
+	if (atomic_read(&device->active_cnt) > count) {
 		mutex_unlock(&device->mutex);
-		wait_for_completion(&device->suspend_gate);
+		ret = wait_event_timeout(device->active_cnt_wq,
+			_check_active_count(device, count), HZ);
 		mutex_lock(&device->mutex);
 	}
+
+	return ret == 0 ? -ETIMEDOUT : 0;
 }
 EXPORT_SYMBOL(kgsl_active_count_wait);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index b7d9226..71a0fdd 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -123,6 +123,6 @@
 int kgsl_active_count_get(struct kgsl_device *device);
 int kgsl_active_count_get_light(struct kgsl_device *device);
 void kgsl_active_count_put(struct kgsl_device *device);
-void kgsl_active_count_wait(struct kgsl_device *device);
+int kgsl_active_count_wait(struct kgsl_device *device, int count);
 
 #endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index e5e23f0..47554c4 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -48,9 +48,6 @@
 #ifdef CONFIG_MSM_SLEEP_STATS_DEVICE
 	&kgsl_pwrscale_policy_idlestats,
 #endif
-#ifdef CONFIG_MSM_DCVS
-	&kgsl_pwrscale_policy_msm,
-#endif
 	NULL
 };
 
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
deleted file mode 100644
index 073e474..0000000
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/slab.h>
-#include <mach/msm_dcvs.h>
-#include "kgsl.h"
-#include "kgsl_pwrscale.h"
-#include "kgsl_device.h"
-#include "a2xx_reg.h"
-#include "kgsl_trace.h"
-
-struct msm_priv {
-	struct kgsl_device		*device;
-	int				enabled;
-	unsigned int			cur_freq;
-	unsigned int			req_level;
-	int				floor_level;
-	struct msm_dcvs_core_info	*core_info;
-	int				gpu_busy;
-	int				dcvs_core_id;
-};
-
-/* reference to be used in idle and freq callbacks */
-static struct msm_priv *the_msm_priv;
-
-static int msm_idle_enable(int type_core_num,
-		enum msm_core_control_event event)
-{
-	struct msm_priv *priv = the_msm_priv;
-
-	switch (event) {
-	case MSM_DCVS_ENABLE_IDLE_PULSE:
-		priv->enabled = true;
-		break;
-	case MSM_DCVS_DISABLE_IDLE_PULSE:
-		priv->enabled = false;
-		break;
-	case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
-	case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
-		break;
-	}
-	return 0;
-}
-
-/* Set the requested frequency if it is within 5MHz (delta) of a
- * supported frequency.
- */
-static int msm_set_freq(int core_num, unsigned int freq)
-{
-	int i, delta = 5000000;
-	struct msm_priv *priv = the_msm_priv;
-	struct kgsl_device *device = priv->device;
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
-	/* msm_dcvs manager uses frequencies in kHz */
-	freq *= 1000;
-	for (i = 0; i < pwr->num_pwrlevels; i++)
-		if (abs(pwr->pwrlevels[i].gpu_freq - freq) < delta)
-			break;
-	if (i == pwr->num_pwrlevels)
-		return 0;
-
-	mutex_lock(&device->mutex);
-	priv->req_level = i;
-	if (priv->req_level <= priv->floor_level) {
-		kgsl_pwrctrl_pwrlevel_change(device, priv->req_level);
-		priv->cur_freq = pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
-	}
-	mutex_unlock(&device->mutex);
-
-	/* return current frequency in kHz */
-	return priv->cur_freq / 1000;
-}
-
-static int msm_set_min_freq(int core_num, unsigned int freq)
-{
-	int i, delta = 5000000;
-	struct msm_priv *priv = the_msm_priv;
-	struct kgsl_device *device = priv->device;
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
-	/* msm_dcvs manager uses frequencies in kHz */
-	freq *= 1000;
-	for (i = 0; i < pwr->num_pwrlevels; i++)
-		if (abs(pwr->pwrlevels[i].gpu_freq - freq) < delta)
-			break;
-	if (i == pwr->num_pwrlevels)
-		return 0;
-
-	mutex_lock(&device->mutex);
-	priv->floor_level = i;
-	if (priv->floor_level <= priv->req_level)
-		kgsl_pwrctrl_pwrlevel_change(device, priv->floor_level);
-	else if (priv->floor_level > priv->req_level)
-		kgsl_pwrctrl_pwrlevel_change(device, priv->req_level);
-
-	priv->cur_freq = pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
-	mutex_unlock(&device->mutex);
-
-	/* return current frequency in kHz */
-	return priv->cur_freq / 1000;
-}
-
-static unsigned int msm_get_freq(int core_num)
-{
-	struct msm_priv *priv = the_msm_priv;
-
-	/* return current frequency in kHz */
-	return priv->cur_freq / 1000;
-}
-
-static void msm_busy(struct kgsl_device *device,
-			struct kgsl_pwrscale *pwrscale)
-{
-	struct msm_priv *priv = pwrscale->priv;
-	if (priv->enabled && !priv->gpu_busy) {
-		msm_dcvs_idle(priv->dcvs_core_id, MSM_DCVS_IDLE_EXIT, 0);
-		trace_kgsl_mpdcvs(device, 1);
-		priv->gpu_busy = 1;
-	}
-	return;
-}
-
-static void msm_idle(struct kgsl_device *device,
-		struct kgsl_pwrscale *pwrscale)
-{
-	struct msm_priv *priv = pwrscale->priv;
-
-	if (priv->enabled && priv->gpu_busy)
-		if (device->ftbl->isidle(device)) {
-			msm_dcvs_idle(priv->dcvs_core_id,
-					MSM_DCVS_IDLE_ENTER, 0);
-			trace_kgsl_mpdcvs(device, 0);
-			priv->gpu_busy = 0;
-		}
-	return;
-}
-
-static void msm_sleep(struct kgsl_device *device,
-			struct kgsl_pwrscale *pwrscale)
-{
-	struct msm_priv *priv = pwrscale->priv;
-
-	if (priv->enabled && priv->gpu_busy) {
-		msm_dcvs_idle(priv->dcvs_core_id, MSM_DCVS_IDLE_ENTER, 0);
-		trace_kgsl_mpdcvs(device, 0);
-		priv->gpu_busy = 0;
-	}
-
-	return;
-}
-
-static void msm_set_io_fraction(struct kgsl_device *device,
-				unsigned int value)
-{
-	int i;
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
-	for (i = 0; i < pwr->num_pwrlevels; i++)
-		pwr->pwrlevels[i].io_fraction = value;
-
-}
-
-static void msm_restore_io_fraction(struct kgsl_device *device)
-{
-	int i;
-	struct kgsl_device_platform_data *pdata =
-				kgsl_device_get_drvdata(device);
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
-	for (i = 0; i < pdata->num_levels; i++)
-		pwr->pwrlevels[i].io_fraction =
-			pdata->pwrlevel[i].io_fraction;
-}
-
-static int msm_init(struct kgsl_device *device,
-		     struct kgsl_pwrscale *pwrscale)
-{
-	struct msm_priv *priv;
-	struct msm_dcvs_freq_entry *tbl;
-	int i, ret = -EINVAL, low_level;
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-	struct platform_device *pdev =
-		container_of(device->parentdev, struct platform_device, dev);
-	struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
-
-	if (the_msm_priv) {
-		priv = pwrscale->priv = the_msm_priv;
-	} else {
-		priv = pwrscale->priv = kzalloc(sizeof(struct msm_priv),
-			GFP_KERNEL);
-		if (pwrscale->priv == NULL)
-			return -ENOMEM;
-
-		priv->core_info = pdata->core_info;
-		tbl = priv->core_info->freq_tbl;
-		priv->floor_level = pwr->num_pwrlevels - 1;
-		/* Fill in frequency table from low to high, reversing order. */
-		low_level = pwr->num_pwrlevels - KGSL_PWRLEVEL_LAST_OFFSET;
-		for (i = 0; i <= low_level; i++)
-			tbl[i].freq =
-				pwr->pwrlevels[low_level - i].gpu_freq / 1000;
-		priv->dcvs_core_id =
-				msm_dcvs_register_core(MSM_DCVS_CORE_TYPE_GPU,
-				0,
-				priv->core_info,
-				msm_set_freq, msm_get_freq, msm_idle_enable,
-				msm_set_min_freq,
-				priv->core_info->sensors[0]);
-		if (priv->dcvs_core_id < 0) {
-			KGSL_PWR_ERR(device, "msm_dcvs_register_core failed");
-			goto err;
-		}
-		the_msm_priv = priv;
-	}
-	priv->device = device;
-	ret = msm_dcvs_freq_sink_start(priv->dcvs_core_id);
-	if (ret >= 0) {
-		if (device->ftbl->isidle(device)) {
-			priv->gpu_busy = 0;
-			msm_dcvs_idle(priv->dcvs_core_id,
-					MSM_DCVS_IDLE_ENTER, 0);
-		} else {
-			priv->gpu_busy = 1;
-		}
-		msm_set_io_fraction(device, 0);
-		return 0;
-	}
-
-	KGSL_PWR_ERR(device, "msm_dcvs_freq_sink_register failed\n");
-
-err:
-	if (!the_msm_priv)
-		kfree(pwrscale->priv);
-	pwrscale->priv = NULL;
-
-	return ret;
-}
-
-static void msm_close(struct kgsl_device *device,
-		      struct kgsl_pwrscale *pwrscale)
-{
-	struct msm_priv *priv = pwrscale->priv;
-
-	if (pwrscale->priv == NULL)
-		return;
-	msm_dcvs_freq_sink_stop(priv->dcvs_core_id);
-	pwrscale->priv = NULL;
-	msm_restore_io_fraction(device);
-}
-
-struct kgsl_pwrscale_policy kgsl_pwrscale_policy_msm = {
-	.name = "msm",
-	.init = msm_init,
-	.idle = msm_idle,
-	.busy = msm_busy,
-	.sleep = msm_sleep,
-	.close = msm_close,
-};
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index 40649d2..8fc1753 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -235,8 +235,10 @@
 	tz_pwrlevels[0] = j;
 	ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
 				sizeof(tz_pwrlevels), NULL, 0);
-	if (ret)
+	if (ret) {
+		KGSL_DRV_ERR(device, "Fall back to idle based GPU DCVS algo");
 		priv->idle_dcvs = 1;
+	}
 	return 0;
 }
 #else
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 2939df6..5950451 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -170,17 +170,32 @@
 	kobject_put(&private->kobj);
 }
 
-void
-kgsl_process_init_sysfs(struct kgsl_process_private *private)
+/**
+ * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
+ *
+ * @device: Pointer to kgsl device struct
+ * @private: Pointer to the structure for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_sysfs() is called at the time of creating the
+ * process struct when a process opens the kgsl device for the first time.
+ * This function creates the sysfs files for the process.
+ */
+int
+kgsl_process_init_sysfs(struct kgsl_device *device,
+		struct kgsl_process_private *private)
 {
 	unsigned char name[16];
-	int i, ret;
+	int i, ret = 0;
 
 	snprintf(name, sizeof(name), "%d", private->pid);
 
-	if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
-		kgsl_driver.prockobj, name))
-		return;
+	ret = kobject_init_and_add(&private->kobj, &ktype_mem_entry,
+		kgsl_driver.prockobj, name);
+
+	if (ret)
+		return ret;
 
 	for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
 		/* We need to check the value of sysfs_create_file, but we
@@ -191,6 +206,7 @@
 		ret = sysfs_create_file(&private->kobj,
 			&mem_stats[i].max_attr.attr);
 	}
+	return ret;
 }
 
 static int kgsl_drv_memstat_show(struct device *dev,
@@ -589,13 +605,16 @@
 
 	/*
 	 * Allocate space to store the list of pages to send to vmap.
-	 * This is an array of pointers so we can track 1024 pages per page of
-	 * allocation which means we can handle up to a 8MB buffer request with
-	 * two pages; well within the acceptable limits for using kmalloc.
+	 * This is an array of pointers so we can track 1024 pages per page
+	 * of allocation.  Since allocations can be as large as the user dares,
+	 * we have to use the kmalloc/vmalloc trick here to make sure we can
+	 * get the memory we need.
 	 */
 
-	pages = kmalloc(memdesc->sglen_alloc * sizeof(struct page *),
-		GFP_KERNEL);
+	if ((memdesc->sglen_alloc * sizeof(struct page *)) > PAGE_SIZE)
+		pages = vmalloc(memdesc->sglen_alloc * sizeof(struct page *));
+	else
+		pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
 
 	if (pages == NULL) {
 		ret = -ENOMEM;
@@ -706,7 +725,10 @@
 		kgsl_driver.stats.histogram[order]++;
 
 done:
-	kfree(pages);
+	if ((memdesc->sglen_alloc * sizeof(struct page *)) > PAGE_SIZE)
+		vfree(pages);
+	else
+		kfree(pages);
 
 	if (ret)
 		kgsl_sharedmem_free(memdesc);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 9f84690..3986c61 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -67,7 +67,8 @@
 
 void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
 
-void kgsl_process_init_sysfs(struct kgsl_process_private *private);
+int kgsl_process_init_sysfs(struct kgsl_device *device,
+		struct kgsl_process_private *private);
 void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
 
 int kgsl_sharedmem_init_sysfs(void);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 5379670..dc3ad21 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/err.h>
 #include <linux/file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -220,6 +221,15 @@
 	snprintf(str, size, "%u", kpt->timestamp);
 }
 
+static void kgsl_sync_timeline_release_obj(struct sync_timeline *sync_timeline)
+{
+	/*
+	 * Make sure to free the timeline only after destroy flag is set.
+	 * This is to avoid further accessing to the timeline from KGSL and
+	 * also to catch any unbalanced kref of timeline.
+	 */
+	BUG_ON(sync_timeline && (sync_timeline->destroyed != true));
+}
 static const struct sync_timeline_ops kgsl_sync_timeline_ops = {
 	.driver_name = "kgsl-timeline",
 	.dup = kgsl_sync_pt_dup,
@@ -227,6 +237,7 @@
 	.compare = kgsl_sync_pt_compare,
 	.timeline_value_str = kgsl_sync_timeline_value_str,
 	.pt_value_str = kgsl_sync_pt_value_str,
+	.release_obj = kgsl_sync_timeline_release_obj,
 };
 
 int kgsl_sync_timeline_create(struct kgsl_context *context)
@@ -271,3 +282,65 @@
 {
 	sync_timeline_destroy(context->timeline);
 }
+
+static void kgsl_sync_callback(struct sync_fence *fence,
+	struct sync_fence_waiter *waiter)
+{
+	struct kgsl_sync_fence_waiter *kwaiter =
+		(struct kgsl_sync_fence_waiter *) waiter;
+	kwaiter->func(kwaiter->priv);
+	sync_fence_put(kwaiter->fence);
+	kfree(kwaiter);
+}
+
+struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+	void (*func)(void *priv), void *priv)
+{
+	struct kgsl_sync_fence_waiter *kwaiter;
+	struct sync_fence *fence;
+	int status;
+
+	fence = sync_fence_fdget(fd);
+	if (fence == NULL)
+		return ERR_PTR(-EINVAL);
+
+	/* create the waiter */
+	kwaiter = kzalloc(sizeof(*kwaiter), GFP_ATOMIC);
+	if (kwaiter == NULL) {
+		sync_fence_put(fence);
+		return ERR_PTR(-ENOMEM);
+	}
+	kwaiter->fence = fence;
+	kwaiter->priv = priv;
+	kwaiter->func = func;
+	sync_fence_waiter_init((struct sync_fence_waiter *) kwaiter,
+		kgsl_sync_callback);
+
+	/* if status then error or signaled */
+	status = sync_fence_wait_async(fence,
+		(struct sync_fence_waiter *) kwaiter);
+	if (status) {
+		kfree(kwaiter);
+		sync_fence_put(fence);
+		if (status < 0)
+			kwaiter = ERR_PTR(status);
+		else
+			kwaiter = NULL;
+	}
+
+	return kwaiter;
+}
+
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *kwaiter)
+{
+	if (kwaiter == NULL)
+		return 0;
+
+	if (sync_fence_cancel_async(kwaiter->fence,
+		(struct sync_fence_waiter *) kwaiter) == 0) {
+		sync_fence_put(kwaiter->fence);
+		kfree(kwaiter);
+		return 1;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 63adf06..275eaf0 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,13 @@
 	unsigned int timestamp;
 };
 
+struct kgsl_sync_fence_waiter {
+	struct sync_fence_waiter waiter;
+	struct sync_fence *fence;
+	void (*func)(void *priv);
+	void *priv;
+};
+
 #if defined(CONFIG_SYNC)
 struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline,
 	unsigned int timestamp);
@@ -39,6 +46,9 @@
 void kgsl_sync_timeline_signal(struct sync_timeline *timeline,
 	unsigned int timestamp);
 void kgsl_sync_timeline_destroy(struct kgsl_context *context);
+struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+	void (*func)(void *priv), void *priv);
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter);
 #else
 static inline struct sync_pt
 *kgsl_sync_pt_create(struct sync_timeline *timeline, unsigned int timestamp)
@@ -72,6 +82,20 @@
 static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context)
 {
 }
+
+static inline struct
+kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+	void (*func)(void *priv), void *priv)
+{
+	return NULL;
+}
+
+static inline int
+kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter)
+{
+	return 1;
+}
+
 #endif
 
 #endif /* __KGSL_SYNC_H */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 831b13f..5f39b8b 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -37,14 +37,13 @@
 
 	TP_PROTO(struct kgsl_device *device,
 			int drawctxt_id,
-			struct kgsl_ibdesc *ibdesc,
-			int numibs,
+			struct kgsl_cmdbatch *cmdbatch,
 			int timestamp,
 			int flags,
 			int result,
 			unsigned int type),
 
-	TP_ARGS(device, drawctxt_id, ibdesc, numibs, timestamp, flags,
+	TP_ARGS(device, drawctxt_id, cmdbatch, timestamp, flags,
 		result, type),
 
 	TP_STRUCT__entry(
@@ -61,8 +60,8 @@
 	TP_fast_assign(
 		__assign_str(device_name, device->name);
 		__entry->drawctxt_id = drawctxt_id;
-		__entry->ibdesc_addr = ibdesc[0].gpuaddr;
-		__entry->numibs = numibs;
+		__entry->ibdesc_addr = cmdbatch->ibdesc[0].gpuaddr;
+		__entry->numibs = cmdbatch->ibcount;
 		__entry->timestamp = timestamp;
 		__entry->flags = flags;
 		__entry->result = result;
@@ -262,29 +261,6 @@
 	)
 );
 
-TRACE_EVENT(kgsl_mpdcvs,
-
-	TP_PROTO(struct kgsl_device *device, unsigned int state),
-
-	TP_ARGS(device, state),
-
-	TP_STRUCT__entry(
-		__string(device_name, device->name)
-		__field(unsigned int, state)
-	),
-
-	TP_fast_assign(
-		__assign_str(device_name, device->name);
-		__entry->state = state;
-	),
-
-	TP_printk(
-		"d_name=%s %s",
-		__get_str(device_name),
-		__entry->state ? "BUSY" : "IDLE"
-	)
-);
-
 TRACE_EVENT(kgsl_gpubusy,
 	TP_PROTO(struct kgsl_device *device, unsigned int busy,
 		unsigned int elapsed),
@@ -754,42 +730,46 @@
 );
 
 TRACE_EVENT(kgsl_register_event,
-		TP_PROTO(unsigned int id, unsigned int timestamp),
-		TP_ARGS(id, timestamp),
+		TP_PROTO(unsigned int id, unsigned int timestamp, void *func),
+		TP_ARGS(id, timestamp, func),
 		TP_STRUCT__entry(
 			__field(unsigned int, id)
 			__field(unsigned int, timestamp)
+			__field(void *, func)
 		),
 		TP_fast_assign(
 			__entry->id = id;
 			__entry->timestamp = timestamp;
+			__entry->func = func;
 		),
 		TP_printk(
-			"ctx=%u ts=%u",
-			__entry->id, __entry->timestamp)
+			"ctx=%u ts=%u cb=%pF",
+			__entry->id, __entry->timestamp, __entry->func)
 );
 
 TRACE_EVENT(kgsl_fire_event,
 		TP_PROTO(unsigned int id, unsigned int ts,
-			unsigned int type, unsigned int age),
-		TP_ARGS(id, ts, type, age),
+			unsigned int type, unsigned int age, void *func),
+		TP_ARGS(id, ts, type, age, func),
 		TP_STRUCT__entry(
 			__field(unsigned int, id)
 			__field(unsigned int, ts)
 			__field(unsigned int, type)
 			__field(unsigned int, age)
+			__field(void *, func)
 		),
 		TP_fast_assign(
 			__entry->id = id;
 			__entry->ts = ts;
 			__entry->type = type;
 			__entry->age = age;
+			__entry->func = func;
 		),
 		TP_printk(
-			"ctx=%u ts=%u type=%s age=%u",
+			"ctx=%u ts=%u type=%s age=%u cb=%pF",
 			__entry->id, __entry->ts,
 			__print_symbolic(__entry->type, KGSL_EVENT_TYPES),
-			__entry->age)
+			__entry->age, __entry->func)
 );
 
 TRACE_EVENT(kgsl_active_count,
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 883417f..0af57aa 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -353,7 +353,13 @@
 	return ts_diff < Z180_PACKET_COUNT;
 }
 
-static int z180_idle(struct kgsl_device *device)
+/**
+ * z180_idle() - Idle the 2D device
+ * @device: Pointer to the KGSL device struct for the Z180
+ *
+ * wait until the z180 submission queue is idle
+ */
+int z180_idle(struct kgsl_device *device)
 {
 	int status = 0;
 	struct z180_device *z180_dev = Z180_DEVICE(device);
@@ -373,10 +379,8 @@
 int
 z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
 			struct kgsl_context *context,
-			struct kgsl_ibdesc *ibdesc,
-			unsigned int numibs,
-			uint32_t *timestamp,
-			unsigned int ctrl)
+			struct kgsl_cmdbatch *cmdbatch,
+			uint32_t *timestamp)
 {
 	long result = 0;
 	unsigned int ofs        = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
@@ -389,6 +393,20 @@
 	struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
 	struct z180_device *z180_dev = Z180_DEVICE(device);
 	unsigned int sizedwords;
+	unsigned int numibs;
+	struct kgsl_ibdesc *ibdesc;
+
+	mutex_lock(&device->mutex);
+
+	kgsl_active_count_get(device);
+
+	if (cmdbatch == NULL) {
+		result = EINVAL;
+		goto error;
+	}
+
+	ibdesc = cmdbatch->ibdesc;
+	numibs = cmdbatch->ibcount;
 
 	if (device->state & KGSL_STATE_HUNG) {
 		result = -EINVAL;
@@ -430,7 +448,7 @@
 		context->id, cmd, sizedwords);
 	/* context switch */
 	if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
-	    (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
+	    (cmdbatch->flags & KGSL_CONTEXT_CTX_SWITCH)) {
 		KGSL_CMD_INFO(device, "context switch %d -> %d\n",
 			context->id, z180_dev->ringbuffer.prevctx);
 		kgsl_mmu_setstate(&device->mmu, pagetable,
@@ -438,10 +456,13 @@
 		cnt = PACKETSIZE_STATESTREAM;
 		ofs = 0;
 	}
-	kgsl_setstate(&device->mmu,
+
+	result = kgsl_setstate(&device->mmu,
 			KGSL_MEMSTORE_GLOBAL,
 			kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
 			device->id));
+	if (result < 0)
+		goto error;
 
 	result = wait_event_interruptible_timeout(device->wait_queue,
 				  room_in_rb(z180_dev),
@@ -482,9 +503,12 @@
 	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
 	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
 error:
+	kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+		*timestamp, cmdbatch->flags, result, 0);
 
-	kgsl_trace_issueibcmds(device, context->id, ibdesc, numibs,
-		*timestamp, ctrl, result, 0);
+	kgsl_active_count_put(device);
+
+	mutex_unlock(&device->mutex);
 
 	return (int)result;
 }
@@ -595,8 +619,12 @@
 
 static int z180_stop(struct kgsl_device *device)
 {
+	int ret;
+
 	device->ftbl->irqctrl(device, 0);
-	z180_idle(device);
+	ret = z180_idle(device);
+	if (ret)
+		return ret;
 
 	del_timer_sync(&device->idle_timer);
 
@@ -662,7 +690,7 @@
 	return status;
 }
 
-static unsigned int z180_isidle(struct kgsl_device *device)
+static bool z180_isidle(struct kgsl_device *device)
 {
 	struct z180_device *z180_dev = Z180_DEVICE(device);
 
@@ -875,7 +903,7 @@
 	return context;
 }
 
-static void
+static int
 z180_drawctxt_detach(struct kgsl_context *context)
 {
 	struct kgsl_device *device;
@@ -889,9 +917,13 @@
 	if (z180_dev->ringbuffer.prevctx == context->id) {
 		z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
 		device->mmu.hwpagetable = device->mmu.defaultpagetable;
+
+		/* Ignore the result - we are going down anyway */
 		kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
 				KGSL_MMUFLAGS_PTUPDATE);
 	}
+
+	return 0;
 }
 
 static void
@@ -965,6 +997,7 @@
 	.irqctrl = z180_irqctrl,
 	.gpuid = z180_gpuid,
 	.irq_handler = z180_irq_handler,
+	.drain = z180_idle, /* drain == idle for the z180 */
 	/* Optional functions */
 	.drawctxt_create = z180_drawctxt_create,
 	.drawctxt_detach = z180_drawctxt_detach,
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
index 1be0870..a36e92d 100644
--- a/drivers/gpu/msm/z180.h
+++ b/drivers/gpu/msm/z180.h
@@ -45,5 +45,6 @@
 };
 
 int z180_dump(struct kgsl_device *, int);
+int z180_idle(struct kgsl_device *);
 
 #endif /* __Z180_H */
diff --git a/drivers/gpu/msm/z180_postmortem.c b/drivers/gpu/msm/z180_postmortem.c
index 5d929cf..bc53c0e 100644
--- a/drivers/gpu/msm/z180_postmortem.c
+++ b/drivers/gpu/msm/z180_postmortem.c
@@ -58,6 +58,8 @@
 	unsigned int i;
 	unsigned int reg_val;
 
+	z180_idle(device);
+
 	KGSL_LOG_DUMP(device, "Z180 Register Dump\n");
 	for (i = 0; i < ARRAY_SIZE(regs_to_dump); i++) {
 		kgsl_regread(device,
diff --git a/drivers/gud/mobicore_driver/api.c b/drivers/gud/mobicore_driver/api.c
index 871f6cc..b47383a0 100644
--- a/drivers/gud/mobicore_driver/api.c
+++ b/drivers/gud/mobicore_driver/api.c
@@ -98,7 +98,11 @@
  */
 struct mc_instance *mobicore_open(void)
 {
-	return mc_alloc_instance();
+	struct mc_instance *instance = mc_alloc_instance();
+	if(instance) {
+		instance->admin = true;
+	}
+	return instance;
 }
 EXPORT_SYMBOL(mobicore_open);
 
diff --git a/drivers/gud/mobicore_driver/build_tag.h b/drivers/gud/mobicore_driver/build_tag.h
index 2a7772e..4a24275 100644
--- a/drivers/gud/mobicore_driver/build_tag.h
+++ b/drivers/gud/mobicore_driver/build_tag.h
@@ -26,4 +26,4 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #define MOBICORE_COMPONENT_BUILD_TAG \
-		"*** GC_MSM8960_Release_V019 ###"
+		"*** t-base-202_V001 ###"
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/mobicore_driver/main.c
index 6f91974..0451452 100644
--- a/drivers/gud/mobicore_driver/main.c
+++ b/drivers/gud/mobicore_driver/main.c
@@ -47,7 +47,7 @@
 
 /* Define a MobiCore device structure for use with dev_debug() etc */
 struct device_driver mcd_debug_name = {
-	.name = "mcdrvkmod"
+	.name = "MobiCore"
 };
 
 struct device mcd_debug_subname = {
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
index 4768f39..7854fc5 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
@@ -41,7 +41,15 @@
 /* Enable the use of vm_unamp instead of the deprecated do_munmap
  * and other 3.7 features
  */
+#ifndef CONFIG_ARCH_MSM8960
 #define MC_VM_UNMAP
+#endif
+
+
+#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8226)
+/* Perform clock enable/disable */
+#define MC_CRYPTO_CLOCK_MANAGEMENT
+#endif
 
 /* Enable Power Management for Crypto Engine */
 #define MC_CRYPTO_CLOCK_MANAGEMENT
diff --git a/drivers/gud/mobicore_driver/pm.c b/drivers/gud/mobicore_driver/pm.c
index 3ad2015..55a1ef7 100644
--- a/drivers/gud/mobicore_driver/pm.c
+++ b/drivers/gud/mobicore_driver/pm.c
@@ -67,8 +67,8 @@
 	MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
 	MCDRV_DBG(mcd,
 		  "MobiCore Request Sleep=%d!", flags->sleep_mode.SleepReq);
-	MCDRV_DBG(mcd, "MobiCore Sleep Ready=%d!",
-		  flags->sleep_mode.ReadyToSleep);
+	MCDRV_DBG(mcd,
+		  "MobiCore Sleep Ready=%d!", flags->sleep_mode.ReadyToSleep);
 }
 
 static int mc_suspend_notifier(struct notifier_block *nb,
diff --git a/drivers/gud/mobicore_driver/public/mc_linux.h b/drivers/gud/mobicore_driver/public/mc_linux.h
index 9c49aef..af027dc 100644
--- a/drivers/gud/mobicore_driver/public/mc_linux.h
+++ b/drivers/gud/mobicore_driver/public/mc_linux.h
@@ -43,6 +43,10 @@
 
 #include "version.h"
 
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
+
 #define MC_ADMIN_DEVNODE	"mobicore"
 #define MC_USER_DEVNODE		"mobicore-user"
 
diff --git a/drivers/gud/mobicore_kernelapi/clientlib.c b/drivers/gud/mobicore_kernelapi/clientlib.c
index 7038e02..16b52e5 100644
--- a/drivers/gud/mobicore_kernelapi/clientlib.c
+++ b/drivers/gud/mobicore_kernelapi/clientlib.c
@@ -299,7 +299,8 @@
 			{
 				session->device_id,
 				*uuid,
-				(uint32_t)wsm->phys_addr,
+				(uint32_t)(wsm->phys_addr) & 0xFFF,
+				wsm->handle,
 				len
 			}
 		};
@@ -926,7 +927,8 @@
 				{
 					session->session_id,
 					handle,
-					(uint32_t)(map_info->secure_virt_addr)
+					(uint32_t)(map_info->secure_virt_addr),
+					map_info->secure_virt_len
 				}
 			};
 
@@ -956,11 +958,11 @@
 			break;
 		}
 
-		struct mc_drv_rsp_unmap_bulk_mem_payload_t
+		/*struct mc_drv_rsp_unmap_bulk_mem_payload_t
 						rsp_unmap_bulk_mem_payload;
 		connection_read_datablock(dev_con,
 					  &rsp_unmap_bulk_mem_payload,
-					  sizeof(rsp_unmap_bulk_mem_payload));
+					  sizeof(rsp_unmap_bulk_mem_payload));*/
 
 		/*
 		 * Unregister mapped bulk buffer from Kernel Module and
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
index 3b8eb4b..eaf7e6c 100644
--- a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
+++ b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
@@ -109,6 +109,7 @@
 	uint32_t device_id;
 	struct mc_uuid_t uuid;
 	uint32_t tci;
+	uint32_t handle;
 	uint32_t len;
 };
 
@@ -119,10 +120,8 @@
 
 
 struct mc_drv_rsp_open_session_payload_t {
-	uint32_t device_id;
 	uint32_t session_id;
 	uint32_t device_session_id;
-	uint32_t mc_result;
 	uint32_t session_magic;
 };
 
@@ -186,7 +185,6 @@
 struct mc_drv_rsp_map_bulk_mem_payload_t {
 	uint32_t session_id;
 	uint32_t secure_virtual_adr;
-	uint32_t mc_result;
 };
 
 struct mc_drv_rsp_map_bulk_mem_t {
@@ -210,7 +208,6 @@
 struct mc_drv_rsp_unmap_bulk_mem_payload_t {
 	uint32_t response_id;
 	uint32_t session_id;
-	uint32_t mc_result;
 };
 
 struct mc_drv_rsp_unmap_bulk_mem_t {
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 4e77ca2..8b0fcf4 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -377,7 +377,8 @@
 	return adc_voltage;
 }
 
-int32_t qpnp_adc_scale_pmic_therm(int32_t adc_code,
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -421,7 +422,7 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_pmic_therm);
 
-int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *chip,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 {
@@ -429,7 +430,7 @@
 	int64_t low_output = 0, high_output = 0;
 	int rc = 0, sign = 0;
 
-	rc = qpnp_get_vadc_gain_and_offset(&btm_param, CALIB_ABSOLUTE);
+	rc = qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_ABSOLUTE);
 	if (rc < 0) {
 		pr_err("Could not acquire gain and offset\n");
 		return rc;
@@ -476,7 +477,8 @@
 /* Scales the ADC code to degC using the mapping
  * table for the XO thermistor.
  */
-int32_t qpnp_adc_tdkntcg_therm(int32_t adc_code,
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -499,7 +501,8 @@
 }
 EXPORT_SYMBOL(qpnp_adc_tdkntcg_therm);
 
-int32_t qpnp_adc_scale_batt_therm(int32_t adc_code,
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -517,7 +520,8 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_batt_therm);
 
-int32_t qpnp_adc_scale_qrd_batt_therm(int32_t adc_code,
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -535,7 +539,8 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_qrd_batt_therm);
 
-int32_t qpnp_adc_scale_therm_pu1(int32_t adc_code,
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -553,7 +558,8 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_therm_pu1);
 
-int32_t qpnp_adc_scale_therm_pu2(int32_t adc_code,
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -571,13 +577,14 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_therm_pu2);
 
-int32_t qpnp_adc_tm_scale_voltage_therm_pu2(uint32_t reg, int64_t *result)
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *chip,
+					uint32_t reg, int64_t *result)
 {
 	int64_t adc_voltage = 0;
 	struct qpnp_vadc_linear_graph param1;
 	int negative_offset;
 
-	qpnp_get_vadc_gain_and_offset(&param1, CALIB_RATIOMETRIC);
+	qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
 
 	adc_voltage = (reg - param1.adc_gnd) * param1.adc_vref;
 	if (adc_voltage < 0) {
@@ -597,12 +604,13 @@
 }
 EXPORT_SYMBOL(qpnp_adc_tm_scale_voltage_therm_pu2);
 
-int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_adc_tm_config *param)
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
+				struct qpnp_adc_tm_config *param)
 {
 	struct qpnp_vadc_linear_graph param1;
 	int rc;
 
-	qpnp_get_vadc_gain_and_offset(&param1, CALIB_RATIOMETRIC);
+	qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
 
 	rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
 		ARRAY_SIZE(adcmap_100k_104ef_104fb),
@@ -628,7 +636,8 @@
 }
 EXPORT_SYMBOL(qpnp_adc_tm_scale_therm_voltage_pu2);
 
-int32_t qpnp_adc_scale_batt_id(int32_t adc_code,
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -644,7 +653,8 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_batt_id);
 
-int32_t qpnp_adc_scale_default(int32_t adc_code,
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
 		const struct qpnp_vadc_chan_properties *chan_properties,
 		struct qpnp_vadc_result *adc_chan_result)
@@ -701,12 +711,13 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_default);
 
-int32_t qpnp_adc_usb_scaler(struct qpnp_adc_tm_btm_param *param,
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 {
 	struct qpnp_vadc_linear_graph usb_param;
 
-	qpnp_get_vadc_gain_and_offset(&usb_param, CALIB_RATIOMETRIC);
+	qpnp_get_vadc_gain_and_offset(chip, &usb_param, CALIB_RATIOMETRIC);
 
 	*low_threshold = param->low_thr * usb_param.dy;
 	do_div(*low_threshold, usb_param.adc_vref);
@@ -722,14 +733,15 @@
 }
 EXPORT_SYMBOL(qpnp_adc_usb_scaler);
 
-int32_t qpnp_adc_vbatt_rscaler(struct qpnp_adc_tm_btm_param *param,
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 {
 	struct qpnp_vadc_linear_graph vbatt_param;
 	int rc = 0, sign = 0;
 	int64_t low_thr = 0, high_thr = 0;
 
-	rc = qpnp_get_vadc_gain_and_offset(&vbatt_param, CALIB_ABSOLUTE);
+	rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param, CALIB_ABSOLUTE);
 	if (rc < 0)
 		return rc;
 
@@ -764,14 +776,15 @@
 }
 EXPORT_SYMBOL(qpnp_adc_vbatt_rscaler);
 
-int32_t qpnp_adc_btm_scaler(struct qpnp_adc_tm_btm_param *param,
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 {
 	struct qpnp_vadc_linear_graph btm_param;
 	int64_t low_output = 0, high_output = 0;
 	int rc = 0;
 
-	qpnp_get_vadc_gain_and_offset(&btm_param, CALIB_RATIOMETRIC);
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
 
 	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
 				param->low_temp);
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index f0793b2..8e6afc1 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -134,25 +134,28 @@
 	u8	revision;
 };
 
-struct qpnp_iadc_drv {
+struct qpnp_iadc_chip {
 	struct qpnp_adc_drv			*adc;
 	int32_t					rsense;
 	bool					external_rsense;
 	struct device				*iadc_hwmon;
-	bool					iadc_initialized;
+	struct list_head			list;
 	int64_t					die_temp;
 	struct delayed_work			iadc_work;
 	struct mutex				iadc_vadc_lock;
 	bool					iadc_mode_sel;
 	struct qpnp_iadc_comp			iadc_comp;
+	struct qpnp_vadc_chip			*vadc_dev;
+	struct work_struct			trigger_completion_work;
 	struct sensor_device_attribute		sens_attr[0];
+	bool					skip_auto_calibrations;
 };
 
-struct qpnp_iadc_drv	*qpnp_iadc;
+LIST_HEAD(qpnp_iadc_device_list);
 
-static int32_t qpnp_iadc_read_reg(uint32_t reg, u8 *data)
+static int32_t qpnp_iadc_read_reg(struct qpnp_iadc_chip *iadc,
+						uint32_t reg, u8 *data)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc;
 
 	rc = spmi_ext_register_readl(iadc->adc->spmi->ctrl, iadc->adc->slave,
@@ -165,9 +168,9 @@
 	return 0;
 }
 
-static int32_t qpnp_iadc_write_reg(uint32_t reg, u8 data)
+static int32_t qpnp_iadc_write_reg(struct qpnp_iadc_chip *iadc,
+						uint32_t reg, u8 data)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc;
 	u8 *buf;
 
@@ -182,41 +185,54 @@
 	return 0;
 }
 
-static void trigger_iadc_completion(struct work_struct *work)
+static int qpnp_iadc_is_valid(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
+	struct qpnp_iadc_chip *iadc_chip = NULL;
 
-	if (!iadc || !iadc->iadc_initialized)
+	list_for_each_entry(iadc_chip, &qpnp_iadc_device_list, list)
+		if (iadc == iadc_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static void qpnp_iadc_trigger_completion(struct work_struct *work)
+{
+	struct qpnp_iadc_chip *iadc = container_of(work,
+			struct qpnp_iadc_chip, trigger_completion_work);
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return;
 
 	complete(&iadc->adc->adc_rslt_completion);
 
 	return;
 }
-DECLARE_WORK(trigger_iadc_completion_work, trigger_iadc_completion);
 
 static irqreturn_t qpnp_iadc_isr(int irq, void *dev_id)
 {
-	schedule_work(&trigger_iadc_completion_work);
+	struct qpnp_iadc_chip *iadc = dev_id;
+
+	schedule_work(&iadc->trigger_completion_work);
 
 	return IRQ_HANDLED;
 }
 
-static int32_t qpnp_iadc_enable(bool state)
+static int32_t qpnp_iadc_enable(struct qpnp_iadc_chip *dev, bool state)
 {
 	int rc = 0;
 	u8 data = 0;
 
 	data = QPNP_IADC_ADC_EN;
 	if (state) {
-		rc = qpnp_iadc_write_reg(QPNP_IADC_EN_CTL1,
+		rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
 					data);
 		if (rc < 0) {
 			pr_err("IADC enable failed\n");
 			return rc;
 		}
 	} else {
-		rc = qpnp_iadc_write_reg(QPNP_IADC_EN_CTL1,
+		rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
 					(~data & QPNP_IADC_ADC_EN));
 		if (rc < 0) {
 			pr_err("IADC disable failed\n");
@@ -227,36 +243,36 @@
 	return 0;
 }
 
-static int32_t qpnp_iadc_status_debug(void)
+static int32_t qpnp_iadc_status_debug(struct qpnp_iadc_chip *dev)
 {
 	int rc = 0;
 	u8 mode = 0, status1 = 0, chan = 0, dig = 0, en = 0;
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_MODE_CTL, &mode);
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_MODE_CTL, &mode);
 	if (rc < 0) {
 		pr_err("mode ctl register read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_ADC_DIG_PARAM, &dig);
+	rc = qpnp_iadc_read_reg(dev, QPNP_ADC_DIG_PARAM, &dig);
 	if (rc < 0) {
 		pr_err("digital param read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_ADC_CH_SEL_CTL, &chan);
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_ADC_CH_SEL_CTL, &chan);
 	if (rc < 0) {
 		pr_err("channel read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_STATUS1, &status1);
+	rc = qpnp_iadc_read_reg(dev, QPNP_STATUS1, &status1);
 	if (rc < 0) {
 		pr_err("status1 read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_EN_CTL1, &en);
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_EN_CTL1, &en);
 	if (rc < 0) {
 		pr_err("en read failed with %d\n", rc);
 		return rc;
@@ -265,7 +281,7 @@
 	pr_debug("EOC not set with status:%x, dig:%x, ch:%x, mode:%x, en:%x\n",
 			status1, dig, chan, mode, en);
 
-	rc = qpnp_iadc_enable(false);
+	rc = qpnp_iadc_enable(dev, false);
 	if (rc < 0) {
 		pr_err("IADC disable failed with %d\n", rc);
 		return rc;
@@ -274,19 +290,20 @@
 	return 0;
 }
 
-static int32_t qpnp_iadc_read_conversion_result(uint16_t *data)
+static int32_t qpnp_iadc_read_conversion_result(struct qpnp_iadc_chip *iadc,
+								int16_t *data)
 {
 	uint8_t rslt_lsb, rslt_msb;
 	uint16_t rslt;
 	int32_t rc;
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_DATA0, &rslt_lsb);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA0, &rslt_lsb);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_DATA1, &rslt_msb);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA1, &rslt_msb);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		return rc;
@@ -295,7 +312,7 @@
 	rslt = (rslt_msb << 8) | rslt_lsb;
 	*data = rslt;
 
-	rc = qpnp_iadc_enable(false);
+	rc = qpnp_iadc_enable(iadc, false);
 	if (rc)
 		return rc;
 
@@ -362,32 +379,30 @@
 	return 0;
 }
 
-int32_t qpnp_iadc_comp_result(int64_t *result)
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc, int64_t *result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
-
 	return qpnp_iadc_comp(result, iadc->iadc_comp, iadc->die_temp);
 }
 EXPORT_SYMBOL(qpnp_iadc_comp_result);
 
-static int32_t qpnp_iadc_comp_info(void)
+static int32_t qpnp_iadc_comp_info(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc = 0;
 
-	rc = qpnp_iadc_read_reg(QPNP_INT_TEST_VAL, &iadc->iadc_comp.id);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_INT_TEST_VAL, &iadc->iadc_comp.id);
 	if (rc < 0) {
 		pr_err("qpnp adc comp id failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_REVISION2, &iadc->iadc_comp.revision);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2,
+						&iadc->iadc_comp.revision);
 	if (rc < 0) {
 		pr_err("qpnp adc revision read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_ATE_GAIN_CALIB_OFFSET,
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_ATE_GAIN_CALIB_OFFSET,
 						&iadc->iadc_comp.sys_gain);
 	if (rc < 0) {
 		pr_err("full scale read failed with %d\n", rc);
@@ -405,10 +420,10 @@
 	return rc;
 }
 
-static int32_t qpnp_iadc_configure(enum qpnp_iadc_channels channel,
+static int32_t qpnp_iadc_configure(struct qpnp_iadc_chip *iadc,
+					enum qpnp_iadc_channels channel,
 					uint16_t *raw_code, uint32_t mode_sel)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	u8 qpnp_iadc_mode_reg = 0, qpnp_iadc_ch_sel_reg = 0;
 	u8 qpnp_iadc_conv_req = 0, qpnp_iadc_dig_param_reg = 0;
 	int32_t rc = 0;
@@ -424,34 +439,34 @@
 
 	qpnp_iadc_conv_req = QPNP_IADC_CONV_REQ;
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_ADC_CH_SEL_CTL,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_ADC_CH_SEL_CTL,
 						qpnp_iadc_ch_sel_reg);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_ADC_DIG_PARAM,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_ADC_DIG_PARAM,
 						qpnp_iadc_dig_param_reg);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_HW_SETTLE_DELAY,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_HW_SETTLE_DELAY,
 				iadc->adc->amux_prop->hw_settle_time);
 	if (rc < 0) {
 		pr_err("qpnp adc configure error for hw settling time setup\n");
 		return rc;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_FAST_AVG_CTL,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_FAST_AVG_CTL,
 					iadc->adc->amux_prop->fast_avg_setup);
 	if (rc < 0) {
 		pr_err("qpnp adc fast averaging configure error\n");
@@ -460,11 +475,11 @@
 
 	INIT_COMPLETION(iadc->adc->adc_rslt_completion);
 
-	rc = qpnp_iadc_enable(true);
+	rc = qpnp_iadc_enable(iadc, true);
 	if (rc)
 		return rc;
 
-	rc = qpnp_iadc_write_reg(QPNP_CONV_REQ, qpnp_iadc_conv_req);
+	rc = qpnp_iadc_write_reg(iadc, QPNP_CONV_REQ, qpnp_iadc_conv_req);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
@@ -474,14 +489,14 @@
 				QPNP_ADC_COMPLETION_TIMEOUT);
 	if (!rc) {
 		u8 status1 = 0;
-		rc = qpnp_iadc_read_reg(QPNP_STATUS1, &status1);
+		rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
 		if (rc < 0)
 			return rc;
 		status1 &= (QPNP_STATUS1_REQ_STS | QPNP_STATUS1_EOC);
 		if (status1 == QPNP_STATUS1_EOC)
 			pr_debug("End of conversion status set\n");
 		else {
-			rc = qpnp_iadc_status_debug();
+			rc = qpnp_iadc_status_debug(iadc);
 			if (rc < 0) {
 				pr_err("status1 read failed with %d\n", rc);
 				return rc;
@@ -490,7 +505,7 @@
 		}
 	}
 
-	rc = qpnp_iadc_read_conversion_result(raw_code);
+	rc = qpnp_iadc_read_conversion_result(iadc, raw_code);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		return rc;
@@ -502,9 +517,8 @@
 #define IADC_CENTER	0xC000
 #define IADC_READING_RESOLUTION_N	542535
 #define IADC_READING_RESOLUTION_D	100000
-static int32_t qpnp_convert_raw_offset_voltage(void)
+static int32_t qpnp_convert_raw_offset_voltage(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	s64 numerator;
 
 	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
@@ -529,20 +543,20 @@
 	return 0;
 }
 
-int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed)
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+							bool batfet_closed)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	uint8_t rslt_lsb, rslt_msb;
 	int32_t rc = 0;
 	uint16_t raw_data;
 	uint32_t mode_sel = 0;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	mutex_lock(&iadc->adc->adc_lock);
 
-	rc = qpnp_iadc_configure(GAIN_CALIBRATION_17P857MV,
+	rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV,
 						&raw_data, mode_sel);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
@@ -560,7 +574,7 @@
 	 */
 	if (!batfet_closed || iadc->external_rsense) {
 		/* external offset calculation */
-		rc = qpnp_iadc_configure(OFFSET_CALIBRATION_CSP_CSN,
+		rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN,
 						&raw_data, mode_sel);
 		if (rc < 0) {
 			pr_err("qpnp adc result read failed with %d\n", rc);
@@ -568,7 +582,7 @@
 		}
 	} else {
 		/* internal offset calculation */
-		rc = qpnp_iadc_configure(OFFSET_CALIBRATION_CSP2_CSN2,
+		rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2,
 						&raw_data, mode_sel);
 		if (rc < 0) {
 			pr_err("qpnp adc result read failed with %d\n", rc);
@@ -585,7 +599,7 @@
 	pr_debug("raw gain:0x%x, raw offset:0x%x\n",
 		iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
 
-	rc = qpnp_convert_raw_offset_voltage();
+	rc = qpnp_convert_raw_offset_voltage(iadc);
 	if (rc < 0) {
 		pr_err("qpnp raw_voltage conversion failed\n");
 		goto fail;
@@ -597,28 +611,28 @@
 
 	pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb);
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_SEC_ACCESS,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
 					QPNP_IADC_SEC_ACCESS_DATA);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for sec access\n");
 		goto fail;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_MSB_OFFSET,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET,
 						rslt_msb);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for MSB write\n");
 		goto fail;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_SEC_ACCESS,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
 					QPNP_IADC_SEC_ACCESS_DATA);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for sec access\n");
 		goto fail;
 	}
 
-	rc = qpnp_iadc_write_reg(QPNP_IADC_LSB_OFFSET,
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET,
 						rslt_lsb);
 	if (rc < 0) {
 		pr_err("qpnp iadc configure error for LSB write\n");
@@ -632,25 +646,28 @@
 
 static void qpnp_iadc_work(struct work_struct *work)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
+	struct qpnp_iadc_chip *iadc = container_of(work,
+			struct qpnp_iadc_chip, iadc_work.work);
 	int rc = 0;
 
-	rc = qpnp_iadc_calibrate_for_trim(true);
-	if (rc)
-		pr_debug("periodic IADC calibration failed\n");
-	else
-		schedule_delayed_work(&iadc->iadc_work,
-			round_jiffies_relative(msecs_to_jiffies
-					(QPNP_IADC_CALIB_SECONDS)));
+	if (!iadc->skip_auto_calibrations) {
+		rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+		if (rc)
+			pr_debug("periodic IADC calibration failed\n");
+	}
+
+	schedule_delayed_work(&iadc->iadc_work,
+		round_jiffies_relative(msecs_to_jiffies
+				(QPNP_IADC_CALIB_SECONDS)));
 	return;
 }
 
-static int32_t qpnp_iadc_version_check(void)
+static int32_t qpnp_iadc_version_check(struct qpnp_iadc_chip *iadc)
 {
 	uint8_t revision;
 	int rc;
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_REVISION2, &revision);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2, &revision);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		return rc;
@@ -664,24 +681,31 @@
 	return 0;
 }
 
-int32_t qpnp_iadc_is_ready(void)
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
+	struct qpnp_iadc_chip *iadc;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
 
-	if (!iadc || !iadc->iadc_initialized)
-		return -EPROBE_DEFER;
-	else
-		return 0;
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-iadc", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(iadc, &qpnp_iadc_device_list, list)
+		if (iadc->adc->spmi->dev.of_node == node)
+			return iadc;
+	return ERR_PTR(-EPROBE_DEFER);
 }
-EXPORT_SYMBOL(qpnp_iadc_is_ready);
+EXPORT_SYMBOL(qpnp_get_iadc);
 
-int32_t qpnp_iadc_get_rsense(int32_t *rsense)
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc, int32_t *rsense)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	uint8_t	rslt_rsense;
 	int32_t	rc = 0, sign_bit = 0;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	if (iadc->external_rsense) {
@@ -689,7 +713,7 @@
 		return rc;
 	}
 
-	rc = qpnp_iadc_read_reg(QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
 	if (rc < 0) {
 		pr_err("qpnp adc rsense read failed with %d\n", rc);
 		return rc;
@@ -713,14 +737,13 @@
 }
 EXPORT_SYMBOL(qpnp_iadc_get_rsense);
 
-static int32_t qpnp_check_pmic_temp(void)
+static int32_t qpnp_check_pmic_temp(struct qpnp_iadc_chip *iadc)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	struct qpnp_vadc_result result_pmic_therm;
 	int64_t die_temp_offset;
 	int rc = 0;
 
-	rc = qpnp_vadc_read(DIE_TEMP, &result_pmic_therm);
+	rc = qpnp_vadc_read(iadc->vadc_dev, DIE_TEMP, &result_pmic_therm);
 	if (rc < 0)
 		return rc;
 
@@ -731,28 +754,30 @@
 
 	if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
 		iadc->die_temp = result_pmic_therm.physical;
-		rc = qpnp_iadc_calibrate_for_trim(true);
-		if (rc)
-			pr_err("periodic IADC calibration failed\n");
+		if (!iadc->skip_auto_calibrations) {
+			rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+			if (rc)
+				pr_err("IADC calibration failed rc = %d\n", rc);
+		}
 	}
 
 	return rc;
 }
 
-int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+				enum qpnp_iadc_channels channel,
 				struct qpnp_iadc_result *result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int32_t rc, rsense_n_ohms, sign = 0, num, mode_sel = 0;
 	int32_t rsense_u_ohms = 0;
 	int64_t result_current;
 	uint16_t raw_data;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	if (!iadc->iadc_mode_sel) {
-		rc = qpnp_check_pmic_temp();
+		rc = qpnp_check_pmic_temp(iadc);
 		if (rc) {
 			pr_err("Error checking pmic therm temp\n");
 			return rc;
@@ -761,13 +786,13 @@
 
 	mutex_lock(&iadc->adc->adc_lock);
 
-	rc = qpnp_iadc_configure(channel, &raw_data, mode_sel);
+	rc = qpnp_iadc_configure(iadc, channel, &raw_data, mode_sel);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		goto fail;
 	}
 
-	rc = qpnp_iadc_get_rsense(&rsense_n_ohms);
+	rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
 	pr_debug("current raw:0%x and rsense:%d\n",
 			raw_data, rsense_n_ohms);
 	rsense_u_ohms = rsense_n_ohms/1000;
@@ -788,7 +813,7 @@
 		result->result_uv = -result->result_uv;
 		result_current = -result_current;
 	}
-	rc = qpnp_iadc_comp_result(&result_current);
+	rc = qpnp_iadc_comp_result(iadc, &result_current);
 	if (rc < 0)
 		pr_err("Error during compensating the IADC\n");
 	rc = 0;
@@ -801,15 +826,15 @@
 }
 EXPORT_SYMBOL(qpnp_iadc_read);
 
-int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_calib *result)
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+					struct qpnp_iadc_calib *result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
-	rc = qpnp_check_pmic_temp();
+	rc = qpnp_check_pmic_temp(iadc);
 	if (rc) {
 		pr_err("Error checking pmic therm temp\n");
 		return rc;
@@ -833,19 +858,32 @@
 }
 EXPORT_SYMBOL(qpnp_iadc_get_gain_and_offset);
 
-int32_t qpnp_iadc_vadc_sync_read(
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
+{
+	iadc->skip_auto_calibrations = true;
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_skip_calibration);
+
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
+{
+	iadc->skip_auto_calibrations = false;
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_resume_calibration);
+
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
 	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
 	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	int rc = 0;
 
-	if (!iadc || !iadc->iadc_initialized)
+	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
 	mutex_lock(&iadc->iadc_vadc_lock);
 
-	rc = qpnp_check_pmic_temp();
+	rc = qpnp_check_pmic_temp(iadc);
 	if (rc) {
 		pr_err("PMIC die temp check failed\n");
 		goto fail;
@@ -853,18 +891,18 @@
 
 	iadc->iadc_mode_sel = true;
 
-	rc = qpnp_vadc_iadc_sync_request(v_channel);
+	rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel);
 	if (rc) {
 		pr_err("Configuring VADC failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_iadc_read(i_channel, i_result);
+	rc = qpnp_iadc_read(iadc, i_channel, i_result);
 	if (rc)
 		pr_err("Configuring IADC failed\n");
 	/* Intentional fall through to release VADC */
 
-	rc = qpnp_vadc_iadc_sync_complete_request(v_channel,
+	rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel,
 							v_result);
 	if (rc)
 		pr_err("Releasing VADC failed\n");
@@ -881,10 +919,11 @@
 			struct device_attribute *devattr, char *buf)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct qpnp_iadc_chip *iadc = dev_get_drvdata(dev);
 	struct qpnp_iadc_result result;
 	int rc = -1;
 
-	rc = qpnp_iadc_read(attr->index, &result);
+	rc = qpnp_iadc_read(iadc, attr->index, &result);
 
 	if (rc)
 		return 0;
@@ -896,9 +935,9 @@
 static struct sensor_device_attribute qpnp_adc_attr =
 	SENSOR_ATTR(NULL, S_IRUGO, qpnp_iadc_show, NULL, 0);
 
-static int32_t qpnp_iadc_init_hwmon(struct spmi_device *spmi)
+static int32_t qpnp_iadc_init_hwmon(struct qpnp_iadc_chip *iadc,
+						struct spmi_device *spmi)
 {
-	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	struct device_node *child;
 	struct device_node *node = spmi->dev.of_node;
 	int rc = 0, i = 0, channel;
@@ -930,19 +969,11 @@
 
 static int __devinit qpnp_iadc_probe(struct spmi_device *spmi)
 {
-	struct qpnp_iadc_drv *iadc;
+	struct qpnp_iadc_chip *iadc;
 	struct qpnp_adc_drv *adc_qpnp;
 	struct device_node *node = spmi->dev.of_node;
 	struct device_node *child;
-	int rc, count_adc_channel_list = 0;
-
-	if (!node)
-		return -EINVAL;
-
-	if (qpnp_iadc) {
-		pr_err("IADC already in use\n");
-		return -EBUSY;
-	}
+	int rc, count_adc_channel_list = 0, i = 0;
 
 	for_each_child_of_node(node, child)
 		count_adc_channel_list++;
@@ -952,7 +983,7 @@
 		return -EINVAL;
 	}
 
-	iadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_iadc_drv) +
+	iadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_iadc_chip) +
 		(sizeof(struct sensor_device_attribute) *
 				count_adc_channel_list), GFP_KERNEL);
 	if (!iadc) {
@@ -976,6 +1007,14 @@
 		goto fail;
 	}
 
+	iadc->vadc_dev = qpnp_get_vadc(&spmi->dev, "iadc");
+	if (IS_ERR(iadc->vadc_dev)) {
+		rc = PTR_ERR(iadc->vadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("vadc property missing, rc=%d\n", rc);
+		goto fail;
+	}
+
 	mutex_init(&iadc->adc->adc_lock);
 
 	rc = of_property_read_u32(node, "qcom,rsense",
@@ -992,50 +1031,56 @@
 	IRQF_TRIGGER_RISING, "qpnp_iadc_interrupt", iadc);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to request adc irq\n");
-		goto fail;
+		return rc;
 	} else
 		enable_irq_wake(iadc->adc->adc_irq_eoc);
 
-	dev_set_drvdata(&spmi->dev, iadc);
-	qpnp_iadc = iadc;
-
-	rc = qpnp_iadc_init_hwmon(spmi);
+	rc = qpnp_iadc_init_hwmon(iadc, spmi);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
-		goto fail;
+		return rc;
 	}
 	iadc->iadc_hwmon = hwmon_device_register(&iadc->adc->spmi->dev);
 
-	rc = qpnp_iadc_version_check();
+	rc = qpnp_iadc_version_check(iadc);
 	if (rc) {
 		dev_err(&spmi->dev, "IADC version not supported\n");
 		goto fail;
 	}
 
 	mutex_init(&iadc->iadc_vadc_lock);
+	INIT_WORK(&iadc->trigger_completion_work, qpnp_iadc_trigger_completion);
 	INIT_DELAYED_WORK(&iadc->iadc_work, qpnp_iadc_work);
-	rc = qpnp_iadc_comp_info();
+	rc = qpnp_iadc_comp_info(iadc);
 	if (rc) {
 		dev_err(&spmi->dev, "abstracting IADC comp info failed!\n");
 		goto fail;
 	}
-	iadc->iadc_initialized = true;
 
-	rc = qpnp_iadc_calibrate_for_trim(true);
+	dev_set_drvdata(&spmi->dev, iadc);
+	list_add(&iadc->list, &qpnp_iadc_device_list);
+	rc = qpnp_iadc_calibrate_for_trim(iadc, true);
 	if (rc)
 		dev_err(&spmi->dev, "failed to calibrate for USR trim\n");
+
 	schedule_delayed_work(&iadc->iadc_work,
 			round_jiffies_relative(msecs_to_jiffies
 					(QPNP_IADC_CALIB_SECONDS)));
 	return 0;
 fail:
-	qpnp_iadc = NULL;
+	for_each_child_of_node(node, child) {
+		device_remove_file(&spmi->dev,
+			&iadc->sens_attr[i].dev_attr);
+		i++;
+	}
+	hwmon_device_unregister(iadc->iadc_hwmon);
+
 	return rc;
 }
 
 static int __devexit qpnp_iadc_remove(struct spmi_device *spmi)
 {
-	struct qpnp_iadc_drv *iadc = dev_get_drvdata(&spmi->dev);
+	struct qpnp_iadc_chip *iadc = dev_get_drvdata(&spmi->dev);
 	struct device_node *node = spmi->dev.of_node;
 	struct device_node *child;
 	int i = 0;
@@ -1047,6 +1092,7 @@
 			&iadc->sens_attr[i].dev_attr);
 		i++;
 	}
+	hwmon_device_unregister(iadc->iadc_hwmon);
 	dev_set_drvdata(&spmi->dev, NULL);
 
 	return 0;
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 53e43d1..2fe69fb 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -101,19 +101,21 @@
 #define QPNP_ADC_COMPLETION_TIMEOUT				HZ
 #define QPNP_VADC_ERR_COUNT					5
 
-struct qpnp_vadc_drv {
+struct qpnp_vadc_chip {
+	struct device			*dev;
 	struct qpnp_adc_drv		*adc;
+	struct list_head		list;
 	struct dentry			*dent;
 	struct device			*vadc_hwmon;
 	bool				vadc_init_calib;
-	bool				vadc_initialized;
 	int				max_channels_available;
 	bool				vadc_iadc_sync_lock;
 	u8				id;
+	struct work_struct		trigger_completion_work;
 	struct sensor_device_attribute	sens_attr[0];
 };
 
-struct qpnp_vadc_drv *qpnp_vadc;
+LIST_HEAD(qpnp_vadc_device_list);
 
 static struct qpnp_vadc_scale_fn vadc_scale_fn[] = {
 	[SCALE_DEFAULT] = {qpnp_adc_scale_default},
@@ -125,9 +127,9 @@
 	[SCALE_QRD_BATT_THERM] = {qpnp_adc_scale_qrd_batt_therm},
 };
 
-static int32_t qpnp_vadc_read_reg(int16_t reg, u8 *data)
+static int32_t qpnp_vadc_read_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+								u8 *data)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	int rc;
 
 	rc = spmi_ext_register_readl(vadc->adc->spmi->ctrl, vadc->adc->slave,
@@ -140,9 +142,9 @@
 	return 0;
 }
 
-static int32_t qpnp_vadc_write_reg(int16_t reg, u8 data)
+static int32_t qpnp_vadc_write_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+								u8 data)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	int rc;
 	u8 *buf;
 
@@ -158,24 +160,24 @@
 	return 0;
 }
 
-static int32_t qpnp_vadc_warm_rst_configure(void)
+static int32_t qpnp_vadc_warm_rst_configure(struct qpnp_vadc_chip *vadc)
 {
 	int rc = 0;
 	u8 data = 0;
 
-	rc = qpnp_vadc_write_reg(QPNP_VADC_ACCESS, QPNP_VADC_ACCESS_DATA);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, QPNP_VADC_ACCESS_DATA);
 	if (rc < 0) {
 		pr_err("VADC write access failed\n");
 		return rc;
 	}
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_PERH_RESET_CTL3, &data);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, &data);
 	if (rc < 0) {
 		pr_err("VADC perh reset ctl3 read failed\n");
 		return rc;
 	}
 
-	rc = qpnp_vadc_write_reg(QPNP_VADC_ACCESS, QPNP_VADC_ACCESS_DATA);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, QPNP_VADC_ACCESS_DATA);
 	if (rc < 0) {
 		pr_err("VADC write access failed\n");
 		return rc;
@@ -183,7 +185,7 @@
 
 	data |= QPNP_FOLLOW_WARM_RB;
 
-	rc = qpnp_vadc_write_reg(QPNP_VADC_PERH_RESET_CTL3, data);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, data);
 	if (rc < 0) {
 		pr_err("VADC perh reset ctl3 write failed\n");
 		return rc;
@@ -192,21 +194,21 @@
 	return 0;
 }
 
-static int32_t qpnp_vadc_enable(bool state)
+static int32_t qpnp_vadc_enable(struct qpnp_vadc_chip *vadc, bool state)
 {
 	int rc = 0;
 	u8 data = 0;
 
 	data = QPNP_VADC_ADC_EN;
 	if (state) {
-		rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1,
 					data);
 		if (rc < 0) {
 			pr_err("VADC enable failed\n");
 			return rc;
 		}
 	} else {
-		rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1,
 					(~data & QPNP_VADC_ADC_EN));
 		if (rc < 0) {
 			pr_err("VADC disable failed\n");
@@ -217,42 +219,42 @@
 	return 0;
 }
 
-static int32_t qpnp_vadc_status_debug(void)
+static int32_t qpnp_vadc_status_debug(struct qpnp_vadc_chip *vadc)
 {
 	int rc = 0;
 	u8 mode = 0, status1 = 0, chan = 0, dig = 0, en = 0, status2 = 0;
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_MODE_CTL, &mode);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_MODE_CTL, &mode);
 	if (rc < 0) {
 		pr_err("mode ctl register read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_ADC_DIG_PARAM, &dig);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_ADC_DIG_PARAM, &dig);
 	if (rc < 0) {
 		pr_err("digital param read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_ADC_CH_SEL_CTL, &chan);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_ADC_CH_SEL_CTL, &chan);
 	if (rc < 0) {
 		pr_err("channel read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1);
 	if (rc < 0) {
 		pr_err("status1 read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS2, &status2);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS2, &status2);
 	if (rc < 0) {
 		pr_err("status2 read failed with %d\n", rc);
 		return rc;
 	}
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_EN_CTL1, &en);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_EN_CTL1, &en);
 	if (rc < 0) {
 		pr_err("en read failed with %d\n", rc);
 		return rc;
@@ -261,7 +263,7 @@
 	pr_err("EOC not set - status1/2:%x/%x, dig:%x, ch:%x, mode:%x, en:%x\n",
 			status1, status2, dig, chan, mode, en);
 
-	rc = qpnp_vadc_enable(false);
+	rc = qpnp_vadc_enable(vadc, false);
 	if (rc < 0) {
 		pr_err("VADC disable failed with %d\n", rc);
 		return rc;
@@ -269,10 +271,9 @@
 
 	return 0;
 }
-static int32_t qpnp_vadc_configure(
+static int32_t qpnp_vadc_configure(struct qpnp_vadc_chip *vadc,
 			struct qpnp_adc_amux_properties *chan_prop)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	u8 decimation = 0, conv_sequence = 0, conv_sequence_trig = 0;
 	u8 mode_ctrl = 0;
 	int rc = 0;
@@ -280,7 +281,7 @@
 	/* Mode selection */
 	mode_ctrl |= ((chan_prop->mode_sel << QPNP_VADC_OP_MODE_SHIFT) |
 			(QPNP_VADC_ADC_TRIM_EN | QPNP_VADC_AMUX_TRIM_EN));
-	rc = qpnp_vadc_write_reg(QPNP_VADC_MODE_CTL, mode_ctrl);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MODE_CTL, mode_ctrl);
 	if (rc < 0) {
 		pr_err("Mode configure write error\n");
 		return rc;
@@ -288,7 +289,7 @@
 
 
 	/* Channel selection */
-	rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_CH_SEL_CTL,
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ADC_CH_SEL_CTL,
 						chan_prop->amux_channel);
 	if (rc < 0) {
 		pr_err("Channel configure error\n");
@@ -298,14 +299,14 @@
 	/* Digital parameter setup */
 	decimation = chan_prop->decimation <<
 				QPNP_VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT;
-	rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_DIG_PARAM, decimation);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ADC_DIG_PARAM, decimation);
 	if (rc < 0) {
 		pr_err("Digital parameter configure write error\n");
 		return rc;
 	}
 
 	/* HW settling time delay */
-	rc = qpnp_vadc_write_reg(QPNP_VADC_HW_SETTLE_DELAY,
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HW_SETTLE_DELAY,
 						chan_prop->hw_settle_time);
 	if (rc < 0) {
 		pr_err("HW settling time setup error\n");
@@ -315,7 +316,7 @@
 	if (chan_prop->mode_sel == (ADC_OP_NORMAL_MODE <<
 					QPNP_VADC_OP_MODE_SHIFT)) {
 		/* Normal measurement mode */
-		rc = qpnp_vadc_write_reg(QPNP_VADC_FAST_AVG_CTL,
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_FAST_AVG_CTL,
 						chan_prop->fast_avg_setup);
 		if (rc < 0) {
 			pr_err("Fast averaging configure error\n");
@@ -327,7 +328,7 @@
 		conv_sequence = ((ADC_SEQ_HOLD_100US <<
 				QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT) |
 				ADC_CONV_SEQ_TIMEOUT_5MS);
-		rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_CTL,
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_CTL,
 							conv_sequence);
 		if (rc < 0) {
 			pr_err("Conversion sequence error\n");
@@ -337,7 +338,7 @@
 		conv_sequence_trig = ((QPNP_VADC_CONV_SEQ_RISING_EDGE <<
 				QPNP_VADC_CONV_SEQ_EDGE_SHIFT) |
 				chan_prop->trigger_channel);
-		rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_TRIG_CTL,
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_TRIG_CTL,
 							conv_sequence_trig);
 		if (rc < 0) {
 			pr_err("Conversion trigger error\n");
@@ -347,13 +348,13 @@
 
 	INIT_COMPLETION(vadc->adc->adc_rslt_completion);
 
-	rc = qpnp_vadc_enable(true);
+	rc = qpnp_vadc_enable(vadc, true);
 	if (rc)
 		return rc;
 
 	if (!vadc->vadc_iadc_sync_lock) {
 		/* Request conversion */
-		rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_REQ,
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_REQ,
 					QPNP_VADC_CONV_REQ_SET);
 		if (rc < 0) {
 			pr_err("Request conversion failed\n");
@@ -364,18 +365,19 @@
 	return 0;
 }
 
-static int32_t qpnp_vadc_read_conversion_result(int32_t *data)
+static int32_t qpnp_vadc_read_conversion_result(struct qpnp_vadc_chip *vadc,
+								int32_t *data)
 {
 	uint8_t rslt_lsb, rslt_msb;
 	int rc = 0, status = 0;
 
-	status = qpnp_vadc_read_reg(QPNP_VADC_DATA0, &rslt_lsb);
+	status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA0, &rslt_lsb);
 	if (status < 0) {
 		pr_err("qpnp adc result read failed for data0\n");
 		goto fail;
 	}
 
-	status = qpnp_vadc_read_reg(QPNP_VADC_DATA1, &rslt_msb);
+	status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA1, &rslt_msb);
 	if (status < 0) {
 		pr_err("qpnp adc result read failed for data1\n");
 		goto fail;
@@ -390,14 +392,14 @@
 	}
 
 fail:
-	rc = qpnp_vadc_enable(false);
+	rc = qpnp_vadc_enable(vadc, false);
 	if (rc)
 		return rc;
 
 	return status;
 }
 
-static int32_t qpnp_vadc_read_status(int mode_sel)
+static int32_t qpnp_vadc_read_status(struct qpnp_vadc_chip *vadc, int mode_sel)
 {
 	u8 status1, status2, status2_conv_seq_state;
 	u8 status_err = QPNP_VADC_CONV_TIMEOUT_ERR;
@@ -405,13 +407,13 @@
 
 	switch (mode_sel) {
 	case (ADC_OP_CONVERSION_SEQUENCER << QPNP_VADC_OP_MODE_SHIFT):
-		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1);
 		if (rc) {
 			pr_err("qpnp_vadc read mask interrupt failed\n");
 			return rc;
 		}
 
-		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS2, &status2);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS2, &status2);
 		if (rc) {
 			pr_err("qpnp_vadc read mask interrupt failed\n");
 			return rc;
@@ -437,32 +439,45 @@
 	return 0;
 }
 
+static int qpnp_vadc_is_valid(struct qpnp_vadc_chip *vadc)
+{
+	struct qpnp_vadc_chip *vadc_chip = NULL;
+
+	list_for_each_entry(vadc_chip, &qpnp_vadc_device_list, list)
+		if (vadc == vadc_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
 static void qpnp_vadc_work(struct work_struct *work)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	struct qpnp_vadc_chip *vadc = container_of(work,
+			struct qpnp_vadc_chip, trigger_completion_work);
 
-	if (!vadc || !vadc->vadc_initialized)
+	if (qpnp_vadc_is_valid(vadc) < 0)
 		return;
 
 	complete(&vadc->adc->adc_rslt_completion);
 
 	return;
 }
-DECLARE_WORK(trigger_completion_work, qpnp_vadc_work);
 
 static irqreturn_t qpnp_vadc_isr(int irq, void *dev_id)
 {
-	schedule_work(&trigger_completion_work);
+	struct qpnp_vadc_chip *vadc = dev_id;
+
+	schedule_work(&vadc->trigger_completion_work);
 
 	return IRQ_HANDLED;
 }
 
-static int32_t qpnp_vadc_version_check(void)
+static int32_t qpnp_vadc_version_check(struct qpnp_vadc_chip *dev)
 {
 	uint8_t revision;
 	int rc;
 
-	rc = qpnp_vadc_read_reg(QPNP_VADC_REVISION2, &revision);
+	rc = qpnp_vadc_read_reg(dev, QPNP_VADC_REVISION2, &revision);
 	if (rc < 0) {
 		pr_err("qpnp adc result read failed with %d\n", rc);
 		return rc;
@@ -510,13 +525,17 @@
 	return 0;
 }
 
-int32_t qpnp_vbat_sns_comp_result(int64_t *result)
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *vadc,
+						int64_t *result)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	struct qpnp_vadc_result die_temp_result;
 	int rc = 0;
 
-	rc = qpnp_vadc_conv_seq_request(ADC_SEQ_NONE,
+	rc = qpnp_vadc_is_valid(vadc);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
 			DIE_TEMP, &die_temp_result);
 	if (rc < 0) {
 		pr_err("Error reading die_temp\n");
@@ -532,9 +551,9 @@
 }
 EXPORT_SYMBOL(qpnp_vbat_sns_comp_result);
 
-static void qpnp_vadc_625mv_channel_sel(uint32_t *ref_channel_sel)
+static void qpnp_vadc_625mv_channel_sel(struct qpnp_vadc_chip *vadc,
+				uint32_t *ref_channel_sel)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	uint32_t dt_index = 0;
 
 	/* Check if the buffered 625mV channel exists */
@@ -551,9 +570,8 @@
 	}
 }
 
-static int32_t qpnp_vadc_calib_device(void)
+static int32_t qpnp_vadc_calib_device(struct qpnp_vadc_chip *vadc)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	struct qpnp_adc_amux_properties conv;
 	int rc, calib_read_1, calib_read_2, count = 0;
 	u8 status1 = 0;
@@ -565,14 +583,14 @@
 	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
 	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
 
-	rc = qpnp_vadc_configure(&conv);
+	rc = qpnp_vadc_configure(vadc, &conv);
 	if (rc) {
 		pr_err("qpnp_vadc configure failed with %d\n", rc);
 		goto calib_fail;
 	}
 
 	while (status1 != QPNP_VADC_STATUS1_EOC) {
-		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1);
 		if (rc < 0)
 			return rc;
 		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
@@ -585,19 +603,19 @@
 		}
 	}
 
-	rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+	rc = qpnp_vadc_read_conversion_result(vadc, &calib_read_1);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		goto calib_fail;
 	}
 
-	qpnp_vadc_625mv_channel_sel(&ref_channel_sel);
+	qpnp_vadc_625mv_channel_sel(vadc, &ref_channel_sel);
 	conv.amux_channel = ref_channel_sel;
 	conv.decimation = DECIMATION_TYPE2;
 	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
 	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
 	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
-	rc = qpnp_vadc_configure(&conv);
+	rc = qpnp_vadc_configure(vadc, &conv);
 	if (rc) {
 		pr_err("qpnp adc configure failed with %d\n", rc);
 		goto calib_fail;
@@ -606,7 +624,7 @@
 	status1 = 0;
 	count = 0;
 	while (status1 != QPNP_VADC_STATUS1_EOC) {
-		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1);
 		if (rc < 0)
 			return rc;
 		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
@@ -619,7 +637,7 @@
 		}
 	}
 
-	rc = qpnp_vadc_read_conversion_result(&calib_read_2);
+	rc = qpnp_vadc_read_conversion_result(vadc, &calib_read_2);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		goto calib_fail;
@@ -643,7 +661,7 @@
 	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
 	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
 	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
-	rc = qpnp_vadc_configure(&conv);
+	rc = qpnp_vadc_configure(vadc, &conv);
 	if (rc) {
 		pr_err("qpnp adc configure failed with %d\n", rc);
 		goto calib_fail;
@@ -652,7 +670,7 @@
 	status1 = 0;
 	count = 0;
 	while (status1 != QPNP_VADC_STATUS1_EOC) {
-		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1);
 		if (rc < 0)
 			return rc;
 		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
@@ -665,7 +683,7 @@
 		}
 	}
 
-	rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+	rc = qpnp_vadc_read_conversion_result(vadc, &calib_read_1);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		goto calib_fail;
@@ -676,7 +694,7 @@
 	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
 	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
 	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
-	rc = qpnp_vadc_configure(&conv);
+	rc = qpnp_vadc_configure(vadc, &conv);
 	if (rc) {
 		pr_err("qpnp adc configure failed with %d\n", rc);
 		goto calib_fail;
@@ -685,7 +703,7 @@
 	status1 = 0;
 	count = 0;
 	while (status1 != QPNP_VADC_STATUS1_EOC) {
-		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1);
 		if (rc < 0)
 			return rc;
 		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
@@ -698,7 +716,7 @@
 		}
 	}
 
-	rc = qpnp_vadc_read_conversion_result(&calib_read_2);
+	rc = qpnp_vadc_read_conversion_result(vadc, &calib_read_2);
 	if (rc) {
 		pr_err("qpnp adc read adc failed with %d\n", rc);
 		goto calib_fail;
@@ -719,11 +737,15 @@
 	return rc;
 }
 
-int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_linear_graph *param,
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *vadc,
+				struct qpnp_vadc_linear_graph *param,
 				enum qpnp_adc_calib_type calib_type)
 {
+	int rc = 0;
 
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	rc = qpnp_vadc_is_valid(vadc);
+	if (rc < 0)
+		return rc;
 
 	switch (calib_type) {
 	case CALIB_RATIOMETRIC:
@@ -752,36 +774,44 @@
 }
 EXPORT_SYMBOL(qpnp_get_vadc_gain_and_offset);
 
-int32_t qpnp_vadc_is_ready(void)
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	struct qpnp_vadc_chip *vadc;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
 
-	if (!vadc || !vadc->vadc_initialized)
-		return -EPROBE_DEFER;
-	else
-		return 0;
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-vadc", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(vadc, &qpnp_vadc_device_list, list)
+		if (vadc->adc->spmi->dev.of_node == node)
+			return vadc;
+	return ERR_PTR(-EPROBE_DEFER);
 }
-EXPORT_SYMBOL(qpnp_vadc_is_ready);
+EXPORT_SYMBOL(qpnp_get_vadc);
 
-int32_t qpnp_vadc_conv_seq_request(enum qpnp_vadc_trigger trigger_channel,
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_trigger trigger_channel,
 					enum qpnp_vadc_channels channel,
 					struct qpnp_vadc_result *result)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	int rc = 0, scale_type, amux_prescaling, dt_index = 0;
 	uint32_t ref_channel;
 
-	if (!vadc || !vadc->vadc_initialized)
+	if (qpnp_vadc_is_valid(vadc))
 		return -EPROBE_DEFER;
 
 	mutex_lock(&vadc->adc->adc_lock);
 
 	if (!vadc->vadc_init_calib) {
-		rc = qpnp_vadc_version_check();
+		rc = qpnp_vadc_version_check(vadc);
 		if (rc)
 			goto fail_unlock;
 
-		rc = qpnp_vadc_calib_device();
+		rc = qpnp_vadc_calib_device(vadc);
 		if (rc) {
 			pr_err("Calibration failed\n");
 			goto fail_unlock;
@@ -790,7 +820,7 @@
 	}
 
 	if (channel == REF_625MV) {
-		qpnp_vadc_625mv_channel_sel(&ref_channel);
+		qpnp_vadc_625mv_channel_sel(vadc, &ref_channel);
 		channel = ref_channel;
 	}
 
@@ -826,7 +856,7 @@
 
 	vadc->adc->amux_prop->trigger_channel = trigger_channel;
 
-	rc = qpnp_vadc_configure(vadc->adc->amux_prop);
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
 	if (rc) {
 		pr_err("qpnp vadc configure failed with %d\n", rc);
 		goto fail_unlock;
@@ -836,14 +866,14 @@
 					QPNP_ADC_COMPLETION_TIMEOUT);
 	if (!rc) {
 		u8 status1 = 0;
-		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1);
 		if (rc < 0)
 			goto fail_unlock;
 		status1 &= (QPNP_VADC_STATUS1_REQ_STS | QPNP_VADC_STATUS1_EOC);
 		if (status1 == QPNP_VADC_STATUS1_EOC)
 			pr_debug("End of conversion status set\n");
 		else {
-			rc = qpnp_vadc_status_debug();
+			rc = qpnp_vadc_status_debug(vadc);
 			if (rc < 0)
 				pr_err("VADC disable failed\n");
 			rc = -EINVAL;
@@ -852,12 +882,13 @@
 	}
 
 	if (trigger_channel < ADC_SEQ_NONE) {
-		rc = qpnp_vadc_read_status(vadc->adc->amux_prop->mode_sel);
+		rc = qpnp_vadc_read_status(vadc,
+					vadc->adc->amux_prop->mode_sel);
 		if (rc)
 			pr_debug("Conversion sequence timed out - %d\n", rc);
 	}
 
-	rc = qpnp_vadc_read_conversion_result(&result->adc_code);
+	rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
 	if (rc) {
 		pr_err("qpnp vadc read adc code failed with %d\n", rc);
 		goto fail_unlock;
@@ -882,7 +913,7 @@
 		goto fail_unlock;
 	}
 
-	vadc_scale_fn[scale_type].chan(result->adc_code,
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
 		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
 
 fail_unlock:
@@ -892,23 +923,22 @@
 }
 EXPORT_SYMBOL(qpnp_vadc_conv_seq_request);
 
-int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel,
 				struct qpnp_vadc_result *result)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
-	enum qpnp_vadc_channels;
 	struct qpnp_vadc_result die_temp_result;
 	int rc = 0;
 
 	if (channel == VBAT_SNS) {
-		rc = qpnp_vadc_conv_seq_request(ADC_SEQ_NONE,
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
 				channel, result);
 		if (rc < 0) {
 			pr_err("Error reading vbatt\n");
 			return rc;
 		}
 
-		rc = qpnp_vadc_conv_seq_request(ADC_SEQ_NONE,
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
 				DIE_TEMP, &die_temp_result);
 		if (rc < 0) {
 			pr_err("Error reading die_temp\n");
@@ -922,41 +952,37 @@
 
 		return 0;
 	} else
-		return qpnp_vadc_conv_seq_request(ADC_SEQ_NONE,
+		return qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
 				channel, result);
 }
 EXPORT_SYMBOL(qpnp_vadc_read);
 
-static void qpnp_vadc_lock(void)
+static void qpnp_vadc_lock(struct qpnp_vadc_chip *vadc)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
-
 	mutex_lock(&vadc->adc->adc_lock);
 }
 
-static void qpnp_vadc_unlock(void)
+static void qpnp_vadc_unlock(struct qpnp_vadc_chip *vadc)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
-
 	mutex_unlock(&vadc->adc->adc_lock);
 }
 
-int32_t qpnp_vadc_iadc_sync_request(enum qpnp_vadc_channels channel)
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	int rc = 0, dt_index = 0;
 
-	if (!vadc || !vadc->vadc_initialized)
+	if (qpnp_vadc_is_valid(vadc))
 		return -EPROBE_DEFER;
 
-	qpnp_vadc_lock();
+	qpnp_vadc_lock(vadc);
 
 	if (!vadc->vadc_init_calib) {
-		rc = qpnp_vadc_version_check();
+		rc = qpnp_vadc_version_check(vadc);
 		if (rc)
 			goto fail;
 
-		rc = qpnp_vadc_calib_device();
+		rc = qpnp_vadc_calib_device(vadc);
 		if (rc) {
 			pr_err("Calibration failed\n");
 			goto fail;
@@ -986,7 +1012,7 @@
 					<< QPNP_VADC_OP_MODE_SHIFT);
 	vadc->vadc_iadc_sync_lock = true;
 
-	rc = qpnp_vadc_configure(vadc->adc->amux_prop);
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
 	if (rc) {
 		pr_err("qpnp vadc configure failed with %d\n", rc);
 		goto fail;
@@ -995,15 +1021,15 @@
 	return rc;
 fail:
 	vadc->vadc_iadc_sync_lock = false;
-	qpnp_vadc_unlock();
+	qpnp_vadc_unlock(vadc);
 	return rc;
 }
 EXPORT_SYMBOL(qpnp_vadc_iadc_sync_request);
 
-int32_t qpnp_vadc_iadc_sync_complete_request(enum qpnp_vadc_channels channel,
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *vadc,
+					enum qpnp_vadc_channels channel,
 						struct qpnp_vadc_result *result)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	int rc = 0, scale_type, amux_prescaling, dt_index = 0;
 
 	vadc->adc->amux_prop->amux_channel = channel;
@@ -1012,7 +1038,7 @@
 		!= channel) && (dt_index < vadc->max_channels_available))
 		dt_index++;
 
-	rc = qpnp_vadc_read_conversion_result(&result->adc_code);
+	rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
 	if (rc) {
 		pr_err("qpnp vadc read adc code failed with %d\n", rc);
 		goto fail;
@@ -1037,12 +1063,12 @@
 		goto fail;
 	}
 
-	vadc_scale_fn[scale_type].chan(result->adc_code,
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
 		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
 
 fail:
 	vadc->vadc_iadc_sync_lock = false;
-	qpnp_vadc_unlock();
+	qpnp_vadc_unlock(vadc);
 	return rc;
 }
 EXPORT_SYMBOL(qpnp_vadc_iadc_sync_complete_request);
@@ -1051,10 +1077,11 @@
 			struct device_attribute *devattr, char *buf)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(dev);
 	struct qpnp_vadc_result result;
 	int rc = -1;
 
-	rc = qpnp_vadc_read(attr->index, &result);
+	rc = qpnp_vadc_read(vadc, attr->index, &result);
 
 	if (rc) {
 		pr_err("VADC read error with %d\n", rc);
@@ -1068,9 +1095,9 @@
 static struct sensor_device_attribute qpnp_adc_attr =
 	SENSOR_ATTR(NULL, S_IRUGO, qpnp_adc_show, NULL, 0);
 
-static int32_t qpnp_vadc_init_hwmon(struct spmi_device *spmi)
+static int32_t qpnp_vadc_init_hwmon(struct qpnp_vadc_chip *vadc,
+					struct spmi_device *spmi)
 {
-	struct qpnp_vadc_drv *vadc = qpnp_vadc;
 	struct device_node *child;
 	struct device_node *node = spmi->dev.of_node;
 	int rc = 0, i = 0, channel;
@@ -1102,21 +1129,13 @@
 
 static int __devinit qpnp_vadc_probe(struct spmi_device *spmi)
 {
-	struct qpnp_vadc_drv *vadc;
+	struct qpnp_vadc_chip *vadc;
 	struct qpnp_adc_drv *adc_qpnp;
 	struct device_node *node = spmi->dev.of_node;
 	struct device_node *child;
-	int rc, count_adc_channel_list = 0;
+	int rc, count_adc_channel_list = 0, i = 0;
 	u8 fab_id = 0;
 
-	if (!node)
-		return -EINVAL;
-
-	if (qpnp_vadc) {
-		pr_err("VADC already in use\n");
-		return -EBUSY;
-	}
-
 	for_each_child_of_node(node, child)
 		count_adc_channel_list++;
 
@@ -1125,7 +1144,7 @@
 		return -EINVAL;
 	}
 
-	vadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_vadc_drv) +
+	vadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_vadc_chip) +
 		(sizeof(struct sensor_device_attribute) *
 				count_adc_channel_list), GFP_KERNEL);
 	if (!vadc) {
@@ -1133,20 +1152,19 @@
 		return -ENOMEM;
 	}
 
+	vadc->dev = &(spmi->dev);
 	adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
 			GFP_KERNEL);
 	if (!adc_qpnp) {
 		dev_err(&spmi->dev, "Unable to allocate memory\n");
-		rc = -ENOMEM;
-		goto fail;
+		return -ENOMEM;
 	}
 
 	vadc->adc = adc_qpnp;
-
 	rc = qpnp_adc_get_devicetree_data(spmi, vadc->adc);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to read device tree\n");
-		goto fail;
+		return rc;
 	}
 	mutex_init(&vadc->adc->adc_lock);
 
@@ -1156,46 +1174,54 @@
 	if (rc) {
 		dev_err(&spmi->dev,
 			"failed to request adc irq with error %d\n", rc);
-		goto fail;
+		return rc;
 	} else {
 		enable_irq_wake(vadc->adc->adc_irq_eoc);
 	}
 
-	qpnp_vadc = vadc;
-	dev_set_drvdata(&spmi->dev, vadc);
-	rc = qpnp_vadc_init_hwmon(spmi);
+	rc = qpnp_vadc_init_hwmon(vadc, spmi);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
-		goto fail;
+		return rc;
 	}
 	vadc->vadc_hwmon = hwmon_device_register(&vadc->adc->spmi->dev);
 	vadc->vadc_init_calib = false;
 	vadc->max_channels_available = count_adc_channel_list;
-	rc = qpnp_vadc_read_reg(QPNP_INT_TEST_VAL, &fab_id);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_INT_TEST_VAL, &fab_id);
 	if (rc < 0) {
 		pr_err("qpnp adc comp id failed with %d\n", rc);
-		goto fail;
+		goto err_setup;
 	}
 	vadc->id = fab_id;
 
-	rc = qpnp_vadc_warm_rst_configure();
+	rc = qpnp_vadc_warm_rst_configure(vadc);
 	if (rc < 0) {
 		pr_err("Setting perp reset on warm reset failed %d\n", rc);
-		goto fail;
+		goto err_setup;
 	}
 
-	vadc->vadc_initialized = true;
+	INIT_WORK(&vadc->trigger_completion_work, qpnp_vadc_work);
 	vadc->vadc_iadc_sync_lock = false;
 
+	dev_set_drvdata(&spmi->dev, vadc);
+	list_add(&vadc->list, &qpnp_vadc_device_list);
+
 	return 0;
-fail:
-	qpnp_vadc = NULL;
+
+err_setup:
+	for_each_child_of_node(node, child) {
+		device_remove_file(&spmi->dev,
+			&vadc->sens_attr[i].dev_attr);
+		i++;
+	}
+	hwmon_device_unregister(vadc->vadc_hwmon);
+
 	return rc;
 }
 
 static int __devexit qpnp_vadc_remove(struct spmi_device *spmi)
 {
-	struct qpnp_vadc_drv *vadc = dev_get_drvdata(&spmi->dev);
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(&spmi->dev);
 	struct device_node *node = spmi->dev.of_node;
 	struct device_node *child;
 	int i = 0;
@@ -1205,7 +1231,8 @@
 			&vadc->sens_attr[i].dev_attr);
 		i++;
 	}
-	vadc->vadc_initialized = false;
+	hwmon_device_unregister(vadc->vadc_hwmon);
+	list_del(&vadc->list);
 	dev_set_drvdata(&spmi->dev, NULL);
 
 	return 0;
diff --git a/drivers/input/misc/cm36283.c b/drivers/input/misc/cm36283.c
new file mode 100644
index 0000000..6280013
--- /dev/null
+++ b/drivers/input/misc/cm36283.c
@@ -0,0 +1,1658 @@
+/* drivers/input/misc/cm36283.c - cm36283 optical sensors driver
+ *
+ * Copyright (C) 2012 Capella Microsystems Inc.
+ * Author: Frank Hsieh <pengyueh@gmail.com>
+ *                                    
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/lightsensor.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/mach-types.h>
+#include <linux/cm36283.h>
+#include <linux/capella_cm3602.h>
+#include <asm/setup.h>
+#include <linux/wakelock.h>
+#include <linux/jiffies.h>
+
+#define D(x...) pr_info(x)
+
+#define I2C_RETRY_COUNT 10
+
+#define NEAR_DELAY_TIME ((100 * HZ) / 1000)
+
+#define CONTROL_INT_ISR_REPORT        0x00
+#define CONTROL_ALS                   0x01
+#define CONTROL_PS                    0x02
+
+static int record_init_fail = 0;
+static void sensor_irq_do_work(struct work_struct *work);
+static DECLARE_WORK(sensor_irq_work, sensor_irq_do_work);
+
+struct cm36283_info {
+	struct class *cm36283_class;
+	struct device *ls_dev;
+	struct device *ps_dev;
+
+	struct input_dev *ls_input_dev;
+	struct input_dev *ps_input_dev;
+
+	struct early_suspend early_suspend;
+	struct i2c_client *i2c_client;
+	struct workqueue_struct *lp_wq;
+
+	int intr_pin;
+	int als_enable;
+	int ps_enable;
+	int ps_irq_flag;
+
+	uint16_t *adc_table;
+	uint16_t cali_table[10];
+	int irq;
+
+	int ls_calibrate;
+	
+	int (*power)(int, uint8_t); /* power to the chip */
+
+	uint32_t als_kadc;
+	uint32_t als_gadc;
+	uint16_t golden_adc;
+
+	struct wake_lock ps_wake_lock;
+	int psensor_opened;
+	int lightsensor_opened;
+	uint8_t slave_addr;
+
+	uint8_t ps_close_thd_set;
+	uint8_t ps_away_thd_set;	
+	int current_level;
+	uint16_t current_adc;
+
+	uint16_t ps_conf1_val;
+	uint16_t ps_conf3_val;
+
+	uint16_t ls_cmd;
+	uint8_t record_clear_int_fail;
+};
+struct cm36283_info *lp_info;
+int fLevel=-1;
+static struct mutex als_enable_mutex, als_disable_mutex, als_get_adc_mutex;
+static struct mutex ps_enable_mutex, ps_disable_mutex, ps_get_adc_mutex;
+static struct mutex CM36283_control_mutex;
+static int lightsensor_enable(struct cm36283_info *lpi);
+static int lightsensor_disable(struct cm36283_info *lpi);
+static int initial_cm36283(struct cm36283_info *lpi);
+static void psensor_initial_cmd(struct cm36283_info *lpi);
+
+int32_t als_kadc;
+
+static int control_and_report(struct cm36283_info *lpi, uint8_t mode, uint16_t param);
+
+static int I2C_RxData(uint16_t slaveAddr, uint8_t cmd, uint8_t *rxData, int length)
+{
+	uint8_t loop_i;
+	int val;
+	struct cm36283_info *lpi = lp_info;
+  uint8_t subaddr[1];
+
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = slaveAddr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = subaddr,
+		 },
+		{
+		 .addr = slaveAddr,
+		 .flags = I2C_M_RD,
+		 .len = length,
+		 .buf = rxData,
+		 },		 
+	};
+  subaddr[0] = cmd;
+
+	for (loop_i = 0; loop_i < I2C_RETRY_COUNT; loop_i++) {
+
+		if (i2c_transfer(lp_info->i2c_client->adapter, msgs, 2) > 0)
+			break;
+
+		val = gpio_get_value(lpi->intr_pin);
+		/*check intr GPIO when i2c error*/
+		if (loop_i == 0 || loop_i == I2C_RETRY_COUNT -1)
+			D("[PS][CM36283 error] %s, i2c err, slaveAddr 0x%x ISR gpio %d  = %d, record_init_fail %d \n",
+				__func__, slaveAddr, lpi->intr_pin, val, record_init_fail);
+
+		msleep(10);
+	}
+	if (loop_i >= I2C_RETRY_COUNT) {
+		printk(KERN_ERR "[PS_ERR][CM36283 error] %s retry over %d\n",
+			__func__, I2C_RETRY_COUNT);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int I2C_TxData(uint16_t slaveAddr, uint8_t *txData, int length)
+{
+	uint8_t loop_i;
+	int val;
+	struct cm36283_info *lpi = lp_info;
+	struct i2c_msg msg[] = {
+		{
+		 .addr = slaveAddr,
+		 .flags = 0,
+		 .len = length,
+		 .buf = txData,
+		 },
+	};
+
+	for (loop_i = 0; loop_i < I2C_RETRY_COUNT; loop_i++) {
+		if (i2c_transfer(lp_info->i2c_client->adapter, msg, 1) > 0)
+			break;
+
+		val = gpio_get_value(lpi->intr_pin);
+		/*check intr GPIO when i2c error*/
+		if (loop_i == 0 || loop_i == I2C_RETRY_COUNT -1)
+			D("[PS][CM36283 error] %s, i2c err, slaveAddr 0x%x, value 0x%x, ISR gpio%d  = %d, record_init_fail %d\n",
+				__func__, slaveAddr, txData[0], lpi->intr_pin, val, record_init_fail);
+
+		msleep(10);
+	}
+
+	if (loop_i >= I2C_RETRY_COUNT) {
+		printk(KERN_ERR "[PS_ERR][CM36283 error] %s retry over %d\n",
+			__func__, I2C_RETRY_COUNT);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int _cm36283_I2C_Read_Word(uint16_t slaveAddr, uint8_t cmd, uint16_t *pdata)
+{
+	uint8_t buffer[2];
+	int ret = 0;
+
+	if (pdata == NULL)
+		return -EFAULT;
+
+	ret = I2C_RxData(slaveAddr, cmd, buffer, 2);
+	if (ret < 0) {
+		pr_err(
+			"[PS_ERR][CM3218 error]%s: I2C_RxData fail [0x%x, 0x%x]\n",
+			__func__, slaveAddr, cmd);
+		return ret;
+	}
+
+	*pdata = (buffer[1]<<8)|buffer[0];
+#if 0
+	/* Debug use */
+	printk(KERN_DEBUG "[CM3218] %s: I2C_RxData[0x%x, 0x%x] = 0x%x\n",
+		__func__, slaveAddr, cmd, *pdata);
+#endif
+	return ret;
+}
+
+static int _cm36283_I2C_Write_Word(uint16_t SlaveAddress, uint8_t cmd, uint16_t data)
+{
+	char buffer[3];
+	int ret = 0;
+#if 0
+	/* Debug use */
+	printk(KERN_DEBUG
+	"[CM3218] %s: _cm36283_I2C_Write_Word[0x%x, 0x%x, 0x%x]\n",
+		__func__, SlaveAddress, cmd, data);
+#endif
+	buffer[0] = cmd;
+	buffer[1] = (uint8_t)(data&0xff);
+	buffer[2] = (uint8_t)((data&0xff00)>>8);	
+	
+	ret = I2C_TxData(SlaveAddress, buffer, 3);
+	if (ret < 0) {
+		pr_err("[PS_ERR][CM3218 error]%s: I2C_TxData fail\n", __func__);
+		return -EIO;
+	}
+
+	return ret;
+}
+
+static int get_ls_adc_value(uint16_t *als_step, bool resume)
+{
+	struct cm36283_info *lpi = lp_info;
+	uint32_t tmpResult;
+	int ret = 0;
+
+	if (als_step == NULL)
+		return -EFAULT;
+
+	/* Read ALS data: */
+	ret = _cm36283_I2C_Read_Word(lpi->slave_addr, ALS_DATA, als_step);
+	if (ret < 0) {
+		pr_err(
+			"[LS][CM3218 error]%s: _cm36283_I2C_Read_Word fail\n",
+			__func__);
+		return -EIO;
+	}
+
+  if (!lpi->ls_calibrate) {
+		tmpResult = (uint32_t)(*als_step) * lpi->als_gadc / lpi->als_kadc;
+		if (tmpResult > 0xFFFF)
+			*als_step = 0xFFFF;
+		else
+		  *als_step = tmpResult;  			
+	}
+
+	D("[LS][CM3218] %s: raw adc = 0x%X, ls_calibrate = %d\n",
+		__func__, *als_step, lpi->ls_calibrate);
+
+	return ret;
+}
+
+static int set_lsensor_range(uint16_t low_thd, uint16_t high_thd)
+{
+	int ret = 0;
+	struct cm36283_info *lpi = lp_info;
+
+	_cm36283_I2C_Write_Word(lpi->slave_addr, ALS_THDH, high_thd);
+	_cm36283_I2C_Write_Word(lpi->slave_addr, ALS_THDL, low_thd);
+
+	return ret;
+}
+
+static int get_ps_adc_value(uint16_t *data)
+{
+	int ret = 0;
+	struct cm36283_info *lpi = lp_info;
+
+	if (data == NULL)
+		return -EFAULT;	
+
+	ret = _cm36283_I2C_Read_Word(lpi->slave_addr, PS_DATA, data);
+	
+	(*data) &= 0xFF;
+	
+	if (ret < 0) {
+		pr_err(
+			"[PS][CM36283 error]%s: _cm36283_I2C_Read_Word fail\n",
+			__func__);
+		return -EIO;
+	} else {
+		pr_err(
+			"[PS][CM36283 OK]%s: _cm36283_I2C_Read_Word OK 0x%x\n",
+			__func__, *data);
+	}
+
+	return ret;
+}
+
+static uint16_t mid_value(uint16_t value[], uint8_t size)
+{
+	int i = 0, j = 0;
+	uint16_t temp = 0;
+
+	if (size < 3)
+		return 0;
+
+	for (i = 0; i < (size - 1); i++)
+		for (j = (i + 1); j < size; j++)
+			if (value[i] > value[j]) {
+				temp = value[i];
+				value[i] = value[j];
+				value[j] = temp;
+			}
+	return value[((size - 1) / 2)];
+}
+
+static int get_stable_ps_adc_value(uint16_t *ps_adc)
+{
+	uint16_t value[3] = {0, 0, 0}, mid_val = 0;
+	int ret = 0;
+	int i = 0;
+	int wait_count = 0;
+	struct cm36283_info *lpi = lp_info;
+
+	for (i = 0; i < 3; i++) {
+		/*wait interrupt GPIO high*/
+		while (gpio_get_value(lpi->intr_pin) == 0) {
+			msleep(10);
+			wait_count++;
+			if (wait_count > 12) {
+				pr_err("[PS_ERR][CM36283 error]%s: interrupt GPIO low,"
+					" get_ps_adc_value\n", __func__);
+				return -EIO;
+			}
+		}
+
+		ret = get_ps_adc_value(&value[i]);
+		if (ret < 0) {
+			pr_err("[PS_ERR][CM36283 error]%s: get_ps_adc_value\n",
+				__func__);
+			return -EIO;
+		}
+
+		if (wait_count < 60/10) {/*wait gpio less than 60ms*/
+			msleep(60 - (10*wait_count));
+		}
+		wait_count = 0;
+	}
+
+	/*D("Sta_ps: Before sort, value[0, 1, 2] = [0x%x, 0x%x, 0x%x]",
+		value[0], value[1], value[2]);*/
+	mid_val = mid_value(value, 3);
+	D("Sta_ps: After sort, value[0, 1, 2] = [0x%x, 0x%x, 0x%x]",
+		value[0], value[1], value[2]);
+	*ps_adc = (mid_val & 0xFF);
+
+	return 0;
+}
+
+static void sensor_irq_do_work(struct work_struct *work)
+{
+	struct cm36283_info *lpi = lp_info;
+	uint16_t intFlag;
+  _cm36283_I2C_Read_Word(lpi->slave_addr, INT_FLAG, &intFlag);
+	control_and_report(lpi, CONTROL_INT_ISR_REPORT, intFlag);  
+	  
+	enable_irq(lpi->irq);
+}
+
+static irqreturn_t cm36283_irq_handler(int irq, void *data)
+{
+	struct cm36283_info *lpi = data;
+
+	disable_irq_nosync(lpi->irq);
+	queue_work(lpi->lp_wq, &sensor_irq_work);
+
+	return IRQ_HANDLED;
+}
+
+static int als_power(int enable)
+{
+	struct cm36283_info *lpi = lp_info;
+
+	if (lpi->power)
+		lpi->power(LS_PWR_ON, 1);
+
+	return 0;
+}
+
+static void ls_initial_cmd(struct cm36283_info *lpi)
+{	
+	/*must disable l-sensor interrupt befrore IST create*//*disable ALS func*/
+	lpi->ls_cmd &= CM36283_ALS_INT_MASK;
+  lpi->ls_cmd |= CM36283_ALS_SD;
+  _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd);  
+}
+
+static void psensor_initial_cmd(struct cm36283_info *lpi)
+{
+	/*must disable p-sensor interrupt befrore IST create*//*disable ALS func*/		
+  lpi->ps_conf1_val |= CM36283_PS_SD;
+  lpi->ps_conf1_val &= CM36283_PS_INT_MASK;  
+  _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF1, lpi->ps_conf1_val);   
+  _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF3, lpi->ps_conf3_val);
+  _cm36283_I2C_Write_Word(lpi->slave_addr, PS_THD, (lpi->ps_close_thd_set <<8)| lpi->ps_away_thd_set);
+
+	D("[PS][CM36283] %s, finish\n", __func__);	
+}
+
+static int psensor_enable(struct cm36283_info *lpi)
+{
+	int ret = -EIO;
+	
+	mutex_lock(&ps_enable_mutex);
+	D("[PS][CM36283] %s\n", __func__);
+
+	if ( lpi->ps_enable ) {
+		D("[PS][CM36283] %s: already enabled\n", __func__);
+		ret = 0;
+	} else
+  	ret = control_and_report(lpi, CONTROL_PS, 1);
+	
+	mutex_unlock(&ps_enable_mutex);
+	return ret;
+}
+
+static int psensor_disable(struct cm36283_info *lpi)
+{
+	int ret = -EIO;
+	
+	mutex_lock(&ps_disable_mutex);
+	D("[PS][CM36283] %s\n", __func__);
+
+	if ( lpi->ps_enable == 0 ) {
+		D("[PS][CM36283] %s: already disabled\n", __func__);
+		ret = 0;
+	} else
+  	ret = control_and_report(lpi, CONTROL_PS,0);
+	
+	mutex_unlock(&ps_disable_mutex);
+	return ret;
+}
+
+static int psensor_open(struct inode *inode, struct file *file)
+{
+	struct cm36283_info *lpi = lp_info;
+
+	D("[PS][CM36283] %s\n", __func__);
+
+	if (lpi->psensor_opened)
+		return -EBUSY;
+
+	lpi->psensor_opened = 1;
+
+	return 0;
+}
+
+static int psensor_release(struct inode *inode, struct file *file)
+{
+	struct cm36283_info *lpi = lp_info;
+
+	D("[PS][CM36283] %s\n", __func__);
+
+	lpi->psensor_opened = 0;
+
+	return psensor_disable(lpi);
+	//return 0;
+}
+
+static long psensor_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	int val;
+	struct cm36283_info *lpi = lp_info;
+
+	D("[PS][CM36283] %s cmd %d\n", __func__, _IOC_NR(cmd));
+
+	switch (cmd) {
+	case CAPELLA_CM3602_IOCTL_ENABLE:
+		if (get_user(val, (unsigned long __user *)arg))
+			return -EFAULT;
+		if (val)
+			return psensor_enable(lpi);
+		else
+			return psensor_disable(lpi);
+		break;
+	case CAPELLA_CM3602_IOCTL_GET_ENABLED:
+		return put_user(lpi->ps_enable, (unsigned long __user *)arg);
+		break;
+	default:
+		pr_err("[PS][CM36283 error]%s: invalid cmd %d\n",
+			__func__, _IOC_NR(cmd));
+		return -EINVAL;
+	}
+}
+
+static const struct file_operations psensor_fops = {
+	.owner = THIS_MODULE,
+	.open = psensor_open,
+	.release = psensor_release,
+	.unlocked_ioctl = psensor_ioctl
+};
+
+struct miscdevice psensor_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "proximity",
+	.fops = &psensor_fops
+};
+
+void lightsensor_set_kvalue(struct cm36283_info *lpi)
+{
+	if (!lpi) {
+		pr_err("[LS][CM36283 error]%s: ls_info is empty\n", __func__);
+		return;
+	}
+
+	D("[LS][CM36283] %s: ALS calibrated als_kadc=0x%x\n",
+			__func__, als_kadc);
+
+	if (als_kadc >> 16 == ALS_CALIBRATED)
+		lpi->als_kadc = als_kadc & 0xFFFF;
+	else {
+		lpi->als_kadc = 0;
+		D("[LS][CM36283] %s: no ALS calibrated\n", __func__);
+	}
+
+	if (lpi->als_kadc && lpi->golden_adc > 0) {
+		lpi->als_kadc = (lpi->als_kadc > 0 && lpi->als_kadc < 0x1000) ?
+				lpi->als_kadc : lpi->golden_adc;
+		lpi->als_gadc = lpi->golden_adc;
+	} else {
+		lpi->als_kadc = 1;
+		lpi->als_gadc = 1;
+	}
+	D("[LS][CM36283] %s: als_kadc=0x%x, als_gadc=0x%x\n",
+		__func__, lpi->als_kadc, lpi->als_gadc);
+}
+
+
+static int lightsensor_update_table(struct cm36283_info *lpi)
+{
+	uint32_t tmpData[10];
+	int i;
+	for (i = 0; i < 10; i++) {
+		tmpData[i] = (uint32_t)(*(lpi->adc_table + i))
+				* lpi->als_kadc / lpi->als_gadc ;
+		if( tmpData[i] <= 0xFFFF ){
+      lpi->cali_table[i] = (uint16_t) tmpData[i];		
+    } else {
+      lpi->cali_table[i] = 0xFFFF;    
+    }         
+		D("[LS][CM36283] %s: Calibrated adc_table: data[%d], %x\n",
+			__func__, i, lpi->cali_table[i]);
+	}
+
+	return 0;
+}
+
+
+static int lightsensor_enable(struct cm36283_info *lpi)
+{
+	int ret = -EIO;
+	
+	mutex_lock(&als_enable_mutex);
+	D("[LS][CM36283] %s\n", __func__);
+
+	if (lpi->als_enable) {
+		D("[LS][CM36283] %s: already enabled\n", __func__);
+		ret = 0;
+	} else
+  	ret = control_and_report(lpi, CONTROL_ALS, 1);
+	
+	mutex_unlock(&als_enable_mutex);
+	return ret;
+}
+
+static int lightsensor_disable(struct cm36283_info *lpi)
+{
+	int ret = -EIO;
+	mutex_lock(&als_disable_mutex);
+	D("[LS][CM36283] %s\n", __func__);
+
+	if ( lpi->als_enable == 0 ) {
+		D("[LS][CM36283] %s: already disabled\n", __func__);
+		ret = 0;
+	} else
+    ret = control_and_report(lpi, CONTROL_ALS, 0);
+	
+	mutex_unlock(&als_disable_mutex);
+	return ret;
+}
+
+static int lightsensor_open(struct inode *inode, struct file *file)
+{
+	struct cm36283_info *lpi = lp_info;
+	int rc = 0;
+
+	D("[LS][CM36283] %s\n", __func__);
+	if (lpi->lightsensor_opened) {
+		pr_err("[LS][CM36283 error]%s: already opened\n", __func__);
+		rc = -EBUSY;
+	}
+	lpi->lightsensor_opened = 1;
+	return rc;
+}
+
+static int lightsensor_release(struct inode *inode, struct file *file)
+{
+	struct cm36283_info *lpi = lp_info;
+
+	D("[LS][CM36283] %s\n", __func__);
+	lpi->lightsensor_opened = 0;
+	return 0;
+}
+
+static long lightsensor_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	int rc, val;
+	struct cm36283_info *lpi = lp_info;
+
+	/*D("[CM36283] %s cmd %d\n", __func__, _IOC_NR(cmd));*/
+
+	switch (cmd) {
+	case LIGHTSENSOR_IOCTL_ENABLE:
+		if (get_user(val, (unsigned long __user *)arg)) {
+			rc = -EFAULT;
+			break;
+		}
+		D("[LS][CM36283] %s LIGHTSENSOR_IOCTL_ENABLE, value = %d\n",
+			__func__, val);
+		rc = val ? lightsensor_enable(lpi) : lightsensor_disable(lpi);
+		break;
+	case LIGHTSENSOR_IOCTL_GET_ENABLED:
+		val = lpi->als_enable;
+		D("[LS][CM36283] %s LIGHTSENSOR_IOCTL_GET_ENABLED, enabled %d\n",
+			__func__, val);
+		rc = put_user(val, (unsigned long __user *)arg);
+		break;
+	default:
+		pr_err("[LS][CM36283 error]%s: invalid cmd %d\n",
+			__func__, _IOC_NR(cmd));
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static const struct file_operations lightsensor_fops = {
+	.owner = THIS_MODULE,
+	.open = lightsensor_open,
+	.release = lightsensor_release,
+	.unlocked_ioctl = lightsensor_ioctl
+};
+
+static struct miscdevice lightsensor_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "lightsensor",
+	.fops = &lightsensor_fops
+};
+
+
+static ssize_t ps_adc_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+
+	uint16_t value;
+	int ret;
+	struct cm36283_info *lpi = lp_info;
+	int intr_val;
+
+	intr_val = gpio_get_value(lpi->intr_pin);
+
+	get_ps_adc_value(&value);
+
+	ret = sprintf(buf, "ADC[0x%04X], ENABLE = %d, intr_pin = %d\n", value, lpi->ps_enable, intr_val);
+
+	return ret;
+}
+
+static ssize_t ps_enable_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int ps_en;
+	struct cm36283_info *lpi = lp_info;
+
+	ps_en = -1;
+	sscanf(buf, "%d", &ps_en);
+
+	if (ps_en != 0 && ps_en != 1
+		&& ps_en != 10 && ps_en != 13 && ps_en != 16)
+		return -EINVAL;
+
+	if (ps_en) {
+		D("[PS][CM36283] %s: ps_en=%d\n",
+			__func__, ps_en);
+		psensor_enable(lpi);
+	} else
+		psensor_disable(lpi);
+
+	D("[PS][CM36283] %s\n", __func__);
+
+	return count;
+}
+
+static DEVICE_ATTR(ps_adc, 0664, ps_adc_show, ps_enable_store);
+
+unsigned PS_cmd_test_value;
+static ssize_t ps_parameters_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct cm36283_info *lpi = lp_info;
+
+	ret = sprintf(buf, "PS_close_thd_set = 0x%x, PS_away_thd_set = 0x%x, PS_cmd_cmd:value = 0x%x\n",
+		lpi->ps_close_thd_set, lpi->ps_away_thd_set, PS_cmd_test_value);
+
+	return ret;
+}
+
+static ssize_t ps_parameters_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+
+	struct cm36283_info *lpi = lp_info;
+	char *token[10];
+	int i;
+
+	printk(KERN_INFO "[PS][CM36283] %s\n", buf);
+	for (i = 0; i < 3; i++)
+		token[i] = strsep((char **)&buf, " ");
+
+	lpi->ps_close_thd_set = simple_strtoul(token[0], NULL, 16);
+	lpi->ps_away_thd_set = simple_strtoul(token[1], NULL, 16);	
+	PS_cmd_test_value = simple_strtoul(token[2], NULL, 16);
+	printk(KERN_INFO
+		"[PS][CM36283]Set PS_close_thd_set = 0x%x, PS_away_thd_set = 0x%x, PS_cmd_cmd:value = 0x%x\n",
+		lpi->ps_close_thd_set, lpi->ps_away_thd_set, PS_cmd_test_value);
+
+	D("[PS][CM36283] %s\n", __func__);
+
+	return count;
+}
+
+static DEVICE_ATTR(ps_parameters, 0664,
+	ps_parameters_show, ps_parameters_store);
+
+
+static ssize_t ps_conf_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct cm36283_info *lpi = lp_info;
+	return sprintf(buf, "PS_CONF1 = 0x%x, PS_CONF3 = 0x%x\n", lpi->ps_conf1_val, lpi->ps_conf3_val);
+}
+static ssize_t ps_conf_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int code1, code2;
+	struct cm36283_info *lpi = lp_info;
+
+	sscanf(buf, "0x%x 0x%x", &code1, &code2);
+
+	D("[PS]%s: store value PS conf1 reg = 0x%x PS conf3 reg = 0x%x\n", __func__, code1, code2);
+
+  lpi->ps_conf1_val = code1;
+  lpi->ps_conf3_val = code2;
+
+	_cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF3, lpi->ps_conf3_val );  
+	_cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF1, lpi->ps_conf1_val );
+
+	return count;
+}
+static DEVICE_ATTR(ps_conf, 0664, ps_conf_show, ps_conf_store);
+
+static ssize_t ps_thd_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct cm36283_info *lpi = lp_info;
+  ret = sprintf(buf, "%s ps_close_thd_set = 0x%x, ps_away_thd_set = 0x%x\n", __func__, lpi->ps_close_thd_set, lpi->ps_away_thd_set);
+  return ret;	
+}
+static ssize_t ps_thd_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int code;
+	struct cm36283_info *lpi = lp_info;
+
+	sscanf(buf, "0x%x", &code);
+
+	D("[PS]%s: store value = 0x%x\n", __func__, code);
+
+	lpi->ps_away_thd_set = code &0xFF;
+	lpi->ps_close_thd_set = (code &0xFF00)>>8;	
+
+	D("[PS]%s: ps_close_thd_set = 0x%x, ps_away_thd_set = 0x%x\n", __func__, lpi->ps_close_thd_set, lpi->ps_away_thd_set);
+
+	return count;
+}
+static DEVICE_ATTR(ps_thd, 0664, ps_thd_show, ps_thd_store);
+
+static ssize_t ps_hw_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct cm36283_info *lpi = lp_info;
+
+	ret = sprintf(buf, "PS1: reg = 0x%x, PS3: reg = 0x%x, ps_close_thd_set = 0x%x, ps_away_thd_set = 0x%x\n",
+		lpi->ps_conf1_val, lpi->ps_conf3_val, lpi->ps_close_thd_set, lpi->ps_away_thd_set);
+
+	return ret;
+}
+static ssize_t ps_hw_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int code;
+//	struct cm36283_info *lpi = lp_info;
+
+	sscanf(buf, "0x%x", &code);
+
+	D("[PS]%s: store value = 0x%x\n", __func__, code);
+
+	return count;
+}
+static DEVICE_ATTR(ps_hw, 0664, ps_hw_show, ps_hw_store);
+
+static ssize_t ls_adc_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct cm36283_info *lpi = lp_info;
+
+	D("[LS][CM36283] %s: ADC = 0x%04X, Level = %d \n",
+		__func__, lpi->current_adc, lpi->current_level);
+	ret = sprintf(buf, "ADC[0x%04X] => level %d\n",
+		lpi->current_adc, lpi->current_level);
+
+	return ret;
+}
+
+static DEVICE_ATTR(ls_adc, 0664, ls_adc_show, NULL);
+
+static ssize_t ls_enable_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+
+	int ret = 0;
+	struct cm36283_info *lpi = lp_info;
+
+	ret = sprintf(buf, "Light sensor Auto Enable = %d\n",
+			lpi->als_enable);
+
+	return ret;
+}
+
+static ssize_t ls_enable_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	int ret = 0;
+	int ls_auto;
+	struct cm36283_info *lpi = lp_info;
+
+	ls_auto = -1;
+	sscanf(buf, "%d", &ls_auto);
+
+	if (ls_auto != 0 && ls_auto != 1 && ls_auto != 147)
+		return -EINVAL;
+
+	if (ls_auto) {
+		lpi->ls_calibrate = (ls_auto == 147) ? 1 : 0;
+		ret = lightsensor_enable(lpi);
+	} else {
+		lpi->ls_calibrate = 0;
+		ret = lightsensor_disable(lpi);
+	}
+
+	D("[LS][CM36283] %s: lpi->als_enable = %d, lpi->ls_calibrate = %d, ls_auto=%d\n",
+		__func__, lpi->als_enable, lpi->ls_calibrate, ls_auto);
+
+	if (ret < 0)
+		pr_err(
+		"[LS][CM36283 error]%s: set auto light sensor fail\n",
+		__func__);
+
+	return count;
+}
+
+static DEVICE_ATTR(ls_auto, 0664,
+	ls_enable_show, ls_enable_store);
+
+static ssize_t ls_kadc_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct cm36283_info *lpi = lp_info;
+	int ret;
+
+	ret = sprintf(buf, "kadc = 0x%x",
+			lpi->als_kadc);
+
+	return ret;
+}
+
+static ssize_t ls_kadc_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cm36283_info *lpi = lp_info;
+	int kadc_temp = 0;
+
+	sscanf(buf, "%d", &kadc_temp);
+
+	mutex_lock(&als_get_adc_mutex);
+  if(kadc_temp != 0) {
+		lpi->als_kadc = kadc_temp;
+		if(  lpi->als_gadc != 0){
+  		if (lightsensor_update_table(lpi) < 0)
+				printk(KERN_ERR "[LS][CM36283 error] %s: update ls table fail\n", __func__);
+  	} else {
+			printk(KERN_INFO "[LS]%s: als_gadc =0x%x wait to be set\n",
+					__func__, lpi->als_gadc);
+  	}		
+	} else {
+		printk(KERN_INFO "[LS]%s: als_kadc can't be set to zero\n",
+				__func__);
+	}
+				
+	mutex_unlock(&als_get_adc_mutex);
+	return count;
+}
+
+static DEVICE_ATTR(ls_kadc, 0664, ls_kadc_show, ls_kadc_store);
+
+static ssize_t ls_gadc_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct cm36283_info *lpi = lp_info;
+	int ret;
+
+	ret = sprintf(buf, "gadc = 0x%x\n", lpi->als_gadc);
+
+	return ret;
+}
+
+static ssize_t ls_gadc_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cm36283_info *lpi = lp_info;
+	int gadc_temp = 0;
+
+	sscanf(buf, "%d", &gadc_temp);
+	
+	mutex_lock(&als_get_adc_mutex);
+  if(gadc_temp != 0) {
+		lpi->als_gadc = gadc_temp;
+		if(  lpi->als_kadc != 0){
+  		if (lightsensor_update_table(lpi) < 0)
+				printk(KERN_ERR "[LS][CM36283 error] %s: update ls table fail\n", __func__);
+  	} else {
+			printk(KERN_INFO "[LS]%s: als_kadc =0x%x wait to be set\n",
+					__func__, lpi->als_kadc);
+  	}		
+	} else {
+		printk(KERN_INFO "[LS]%s: als_gadc can't be set to zero\n",
+				__func__);
+	}
+	mutex_unlock(&als_get_adc_mutex);
+	return count;
+}
+
+static DEVICE_ATTR(ls_gadc, 0664, ls_gadc_show, ls_gadc_store);
+
+static ssize_t ls_adc_table_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	unsigned length = 0;
+	int i;
+
+	for (i = 0; i < 10; i++) {
+		length += sprintf(buf + length,
+			"[CM36283]Get adc_table[%d] =  0x%x ; %d, Get cali_table[%d] =  0x%x ; %d, \n",
+			i, *(lp_info->adc_table + i),
+			*(lp_info->adc_table + i),
+			i, *(lp_info->cali_table + i),
+			*(lp_info->cali_table + i));
+	}
+	return length;
+}
+
+static ssize_t ls_adc_table_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+
+	struct cm36283_info *lpi = lp_info;
+	char *token[10];
+	uint16_t tempdata[10];
+	int i;
+
+	printk(KERN_INFO "[LS][CM36283]%s\n", buf);
+	for (i = 0; i < 10; i++) {
+		token[i] = strsep((char **)&buf, " ");
+		tempdata[i] = simple_strtoul(token[i], NULL, 16);
+		if (tempdata[i] < 1 || tempdata[i] > 0xffff) {
+			printk(KERN_ERR
+			"[LS][CM36283 error] adc_table[%d] =  0x%x Err\n",
+			i, tempdata[i]);
+			return count;
+		}
+	}
+	mutex_lock(&als_get_adc_mutex);
+	for (i = 0; i < 10; i++) {
+		lpi->adc_table[i] = tempdata[i];
+		printk(KERN_INFO
+		"[LS][CM36283]Set lpi->adc_table[%d] =  0x%x\n",
+		i, *(lp_info->adc_table + i));
+	}
+	if (lightsensor_update_table(lpi) < 0)
+		printk(KERN_ERR "[LS][CM36283 error] %s: update ls table fail\n",
+		__func__);
+	mutex_unlock(&als_get_adc_mutex);
+	D("[LS][CM36283] %s\n", __func__);
+
+	return count;
+}
+
+static DEVICE_ATTR(ls_adc_table, 0664,
+	ls_adc_table_show, ls_adc_table_store);
+
+static ssize_t ls_conf_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct cm36283_info *lpi = lp_info;
+	return sprintf(buf, "ALS_CONF = %x\n", lpi->ls_cmd);
+}
+static ssize_t ls_conf_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cm36283_info *lpi = lp_info;
+	int value = 0;
+	sscanf(buf, "0x%x", &value);
+
+	lpi->ls_cmd = value;
+	printk(KERN_INFO "[LS]set ALS_CONF = %x\n", lpi->ls_cmd);
+	
+	_cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd);
+	return count;
+}
+static DEVICE_ATTR(ls_conf, 0664, ls_conf_show, ls_conf_store);
+
+static ssize_t ls_fLevel_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "fLevel = %d\n", fLevel);
+}
+static ssize_t ls_fLevel_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cm36283_info *lpi = lp_info;
+	int value=0;
+	sscanf(buf, "%d", &value);
+	(value>=0)?(value=min(value,10)):(value=max(value,-1));
+	fLevel=value;
+	input_report_abs(lpi->ls_input_dev, ABS_MISC, fLevel);
+	input_sync(lpi->ls_input_dev);
+	printk(KERN_INFO "[LS]set fLevel = %d\n", fLevel);
+
+	msleep(1000);
+	fLevel=-1;
+	return count;
+}
+static DEVICE_ATTR(ls_flevel, 0664, ls_fLevel_show, ls_fLevel_store);
+
+static int lightsensor_setup(struct cm36283_info *lpi)
+{
+	int ret;
+
+	lpi->ls_input_dev = input_allocate_device();
+	if (!lpi->ls_input_dev) {
+		pr_err(
+			"[LS][CM36283 error]%s: could not allocate ls input device\n",
+			__func__);
+		return -ENOMEM;
+	}
+	lpi->ls_input_dev->name = "cm36283-ls";
+	set_bit(EV_ABS, lpi->ls_input_dev->evbit);
+	input_set_abs_params(lpi->ls_input_dev, ABS_MISC, 0, 9, 0, 0);
+
+	ret = input_register_device(lpi->ls_input_dev);
+	if (ret < 0) {
+		pr_err("[LS][CM36283 error]%s: can not register ls input device\n",
+				__func__);
+		goto err_free_ls_input_device;
+	}
+
+	ret = misc_register(&lightsensor_misc);
+	if (ret < 0) {
+		pr_err("[LS][CM36283 error]%s: can not register ls misc device\n",
+				__func__);
+		goto err_unregister_ls_input_device;
+	}
+
+	return ret;
+
+err_unregister_ls_input_device:
+	input_unregister_device(lpi->ls_input_dev);
+err_free_ls_input_device:
+	input_free_device(lpi->ls_input_dev);
+	return ret;
+}
+
+static int psensor_setup(struct cm36283_info *lpi)
+{
+	int ret;
+
+	lpi->ps_input_dev = input_allocate_device();
+	if (!lpi->ps_input_dev) {
+		pr_err(
+			"[PS][CM36283 error]%s: could not allocate ps input device\n",
+			__func__);
+		return -ENOMEM;
+	}
+	lpi->ps_input_dev->name = "cm36283-ps";
+	set_bit(EV_ABS, lpi->ps_input_dev->evbit);
+	input_set_abs_params(lpi->ps_input_dev, ABS_DISTANCE, 0, 1, 0, 0);
+
+	ret = input_register_device(lpi->ps_input_dev);
+	if (ret < 0) {
+		pr_err(
+			"[PS][CM36283 error]%s: could not register ps input device\n",
+			__func__);
+		goto err_free_ps_input_device;
+	}
+
+	ret = misc_register(&psensor_misc);
+	if (ret < 0) {
+		pr_err(
+			"[PS][CM36283 error]%s: could not register ps misc device\n",
+			__func__);
+		goto err_unregister_ps_input_device;
+	}
+
+	return ret;
+
+err_unregister_ps_input_device:
+	input_unregister_device(lpi->ps_input_dev);
+err_free_ps_input_device:
+	input_free_device(lpi->ps_input_dev);
+	return ret;
+}
+
+
+static int initial_cm36283(struct cm36283_info *lpi)
+{
+	int val, ret;
+	uint16_t idReg;
+
+	val = gpio_get_value(lpi->intr_pin);
+	D("[PS][CM36283] %s, INTERRUPT GPIO val = %d\n", __func__, val);
+
+	ret = _cm36283_I2C_Read_Word(lpi->slave_addr, ID_REG, &idReg);
+	if ((ret < 0) || (idReg != 0xC082)) {
+  		if (record_init_fail == 0)
+  			record_init_fail = 1;
+  		return -ENOMEM;/*If devices without cm36283 chip and did not probe driver*/	
+  }
+  
+	return 0;
+}
+
+static int cm36283_setup(struct cm36283_info *lpi)
+{
+	int ret = 0;
+
+	als_power(1);
+	msleep(5);
+	ret = gpio_request(lpi->intr_pin, "gpio_cm36283_intr");
+	if (ret < 0) {
+		pr_err("[PS][CM36283 error]%s: gpio %d request failed (%d)\n",
+			__func__, lpi->intr_pin, ret);
+		return ret;
+	}
+
+	ret = gpio_direction_input(lpi->intr_pin);
+	if (ret < 0) {
+		pr_err(
+			"[PS][CM36283 error]%s: fail to set gpio %d as input (%d)\n",
+			__func__, lpi->intr_pin, ret);
+		goto fail_free_intr_pin;
+	}
+
+
+	ret = initial_cm36283(lpi);
+	if (ret < 0) {
+		pr_err(
+			"[PS_ERR][CM36283 error]%s: fail to initial cm36283 (%d)\n",
+			__func__, ret);
+		goto fail_free_intr_pin;
+	}
+	
+	/*Default disable P sensor and L sensor*/
+  ls_initial_cmd(lpi);
+	psensor_initial_cmd(lpi);
+
+	ret = request_any_context_irq(lpi->irq,
+			cm36283_irq_handler,
+			IRQF_TRIGGER_LOW,
+			"cm36283",
+			lpi);
+	if (ret < 0) {
+		pr_err(
+			"[PS][CM36283 error]%s: req_irq(%d) fail for gpio %d (%d)\n",
+			__func__, lpi->irq,
+			lpi->intr_pin, ret);
+		goto fail_free_intr_pin;
+	}
+
+	return ret;
+
+fail_free_intr_pin:
+	gpio_free(lpi->intr_pin);
+	return ret;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cm36283_early_suspend(struct early_suspend *h)
+{
+	struct cm36283_info *lpi = lp_info;
+
+	D("[LS][CM36283] %s\n", __func__);
+
+	if (lpi->als_enable)
+		lightsensor_disable(lpi);
+
+}
+
+static void cm36283_late_resume(struct early_suspend *h)
+{
+	struct cm36283_info *lpi = lp_info;
+
+	D("[LS][CM36283] %s\n", __func__);
+
+	if (!lpi->als_enable)
+		lightsensor_enable(lpi);
+}
+#endif
+
+static int cm36283_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int ret = 0;
+	struct cm36283_info *lpi;
+	struct cm36283_platform_data *pdata;
+
+	D("[PS][CM36283] %s\n", __func__);
+
+
+	lpi = kzalloc(sizeof(struct cm36283_info), GFP_KERNEL);
+	if (!lpi)
+		return -ENOMEM;
+
+	/*D("[CM36283] %s: client->irq = %d\n", __func__, client->irq);*/
+
+	lpi->i2c_client = client;
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		pr_err("[PS][CM36283 error]%s: Assign platform_data error!!\n",
+			__func__);
+		ret = -EBUSY;
+		goto err_platform_data_null;
+	}
+
+	lpi->irq = client->irq;
+
+	i2c_set_clientdata(client, lpi);
+	
+  lpi->intr_pin = pdata->intr;
+	lpi->adc_table = pdata->levels;
+	lpi->power = pdata->power;
+	
+	lpi->slave_addr = pdata->slave_addr;
+	
+	lpi->ps_away_thd_set = pdata->ps_away_thd_set;
+	lpi->ps_close_thd_set = pdata->ps_close_thd_set;	
+	lpi->ps_conf1_val = pdata->ps_conf1_val;
+	lpi->ps_conf3_val = pdata->ps_conf3_val;
+	
+	lpi->ls_cmd  = pdata->ls_cmd;
+	
+	lpi->record_clear_int_fail=0;
+	
+	D("[PS][CM36283] %s: ls_cmd 0x%x\n",
+		__func__, lpi->ls_cmd);
+	
+	if (pdata->ls_cmd == 0) {
+		lpi->ls_cmd  = CM36283_ALS_IT_160ms | CM36283_ALS_GAIN_2;
+	}
+
+	lp_info = lpi;
+
+	mutex_init(&CM36283_control_mutex);
+
+	mutex_init(&als_enable_mutex);
+	mutex_init(&als_disable_mutex);
+	mutex_init(&als_get_adc_mutex);
+
+	ret = lightsensor_setup(lpi);
+	if (ret < 0) {
+		pr_err("[LS][CM36283 error]%s: lightsensor_setup error!!\n",
+			__func__);
+		goto err_lightsensor_setup;
+	}
+
+	mutex_init(&ps_enable_mutex);
+	mutex_init(&ps_disable_mutex);
+	mutex_init(&ps_get_adc_mutex);
+
+	ret = psensor_setup(lpi);
+	if (ret < 0) {
+		pr_err("[PS][CM36283 error]%s: psensor_setup error!!\n",
+			__func__);
+		goto err_psensor_setup;
+	}
+
+  //SET LUX STEP FACTOR HERE
+  // if adc raw value one step = 5/100 = 1/20 = 0.05 lux
+  // the following will set the factor 0.05 = 1/20
+  // and lpi->golden_adc = 1;  
+  // set als_kadc = (ALS_CALIBRATED <<16) | 20;
+
+  als_kadc = (ALS_CALIBRATED <<16) | 20;
+  lpi->golden_adc = 1;
+
+  //ls calibrate always set to 1 
+  lpi->ls_calibrate = 1;
+
+	lightsensor_set_kvalue(lpi);
+	ret = lightsensor_update_table(lpi);
+	if (ret < 0) {
+		pr_err("[LS][CM36283 error]%s: update ls table fail\n",
+			__func__);
+		goto err_lightsensor_update_table;
+	}
+
+	lpi->lp_wq = create_singlethread_workqueue("cm36283_wq");
+	if (!lpi->lp_wq) {
+		pr_err("[PS][CM36283 error]%s: can't create workqueue\n", __func__);
+		ret = -ENOMEM;
+		goto err_create_singlethread_workqueue;
+	}
+	wake_lock_init(&(lpi->ps_wake_lock), WAKE_LOCK_SUSPEND, "proximity");
+
+	ret = cm36283_setup(lpi);
+	if (ret < 0) {
+		pr_err("[PS_ERR][CM36283 error]%s: cm36283_setup error!\n", __func__);
+		goto err_cm36283_setup;
+	}
+	lpi->cm36283_class = class_create(THIS_MODULE, "optical_sensors");
+	if (IS_ERR(lpi->cm36283_class)) {
+		ret = PTR_ERR(lpi->cm36283_class);
+		lpi->cm36283_class = NULL;
+		goto err_create_class;
+	}
+
+	lpi->ls_dev = device_create(lpi->cm36283_class,
+				NULL, 0, "%s", "lightsensor");
+	if (unlikely(IS_ERR(lpi->ls_dev))) {
+		ret = PTR_ERR(lpi->ls_dev);
+		lpi->ls_dev = NULL;
+		goto err_create_ls_device;
+	}
+
+	/* register the attributes */
+	ret = device_create_file(lpi->ls_dev, &dev_attr_ls_adc);
+	if (ret)
+		goto err_create_ls_device_file;
+
+	/* register the attributes */
+	ret = device_create_file(lpi->ls_dev, &dev_attr_ls_auto);
+	if (ret)
+		goto err_create_ls_device_file;
+
+	/* register the attributes */
+	ret = device_create_file(lpi->ls_dev, &dev_attr_ls_kadc);
+	if (ret)
+		goto err_create_ls_device_file;
+
+	ret = device_create_file(lpi->ls_dev, &dev_attr_ls_gadc);
+	if (ret)
+		goto err_create_ls_device_file;
+
+	ret = device_create_file(lpi->ls_dev, &dev_attr_ls_adc_table);
+	if (ret)
+		goto err_create_ls_device_file;
+
+	ret = device_create_file(lpi->ls_dev, &dev_attr_ls_conf);
+	if (ret)
+		goto err_create_ls_device_file;
+
+	ret = device_create_file(lpi->ls_dev, &dev_attr_ls_flevel);
+	if (ret)
+		goto err_create_ls_device_file;
+
+	lpi->ps_dev = device_create(lpi->cm36283_class,
+				NULL, 0, "%s", "proximity");
+	if (unlikely(IS_ERR(lpi->ps_dev))) {
+		ret = PTR_ERR(lpi->ps_dev);
+		lpi->ps_dev = NULL;
+		goto err_create_ls_device_file;
+	}
+
+	/* register the attributes */
+	ret = device_create_file(lpi->ps_dev, &dev_attr_ps_adc);
+	if (ret)
+		goto err_create_ps_device;
+
+	ret = device_create_file(lpi->ps_dev,
+		&dev_attr_ps_parameters);
+	if (ret)
+		goto err_create_ps_device;
+
+	/* register the attributes */
+	ret = device_create_file(lpi->ps_dev, &dev_attr_ps_conf);
+	if (ret)
+		goto err_create_ps_device;
+
+	/* register the attributes */
+	ret = device_create_file(lpi->ps_dev, &dev_attr_ps_thd);
+	if (ret)
+		goto err_create_ps_device;
+
+	ret = device_create_file(lpi->ps_dev, &dev_attr_ps_hw);
+	if (ret)
+		goto err_create_ps_device;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	lpi->early_suspend.level =
+			EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	lpi->early_suspend.suspend = cm36283_early_suspend;
+	lpi->early_suspend.resume = cm36283_late_resume;
+	register_early_suspend(&lpi->early_suspend);
+#endif
+
+	D("[PS][CM36283] %s: Probe success!\n", __func__);
+
+	return ret;
+
+err_create_ps_device:
+	device_unregister(lpi->ps_dev);
+err_create_ls_device_file:
+	device_unregister(lpi->ls_dev);
+err_create_ls_device:
+	class_destroy(lpi->cm36283_class);
+err_create_class:
+err_cm36283_setup:
+	destroy_workqueue(lpi->lp_wq);
+	wake_lock_destroy(&(lpi->ps_wake_lock));
+
+	input_unregister_device(lpi->ls_input_dev);
+	input_free_device(lpi->ls_input_dev);
+	input_unregister_device(lpi->ps_input_dev);
+	input_free_device(lpi->ps_input_dev);
+err_create_singlethread_workqueue:
+err_lightsensor_update_table:
+	misc_deregister(&psensor_misc);
+err_psensor_setup:
+	mutex_destroy(&CM36283_control_mutex);
+	mutex_destroy(&ps_enable_mutex);
+	mutex_destroy(&ps_disable_mutex);
+	mutex_destroy(&ps_get_adc_mutex);
+	misc_deregister(&lightsensor_misc);
+err_lightsensor_setup:
+	mutex_destroy(&als_enable_mutex);
+	mutex_destroy(&als_disable_mutex);
+	mutex_destroy(&als_get_adc_mutex);
+err_platform_data_null:
+	kfree(lpi);
+	return ret;
+}
+   
+static int control_and_report( struct cm36283_info *lpi, uint8_t mode, uint16_t param ) {
+  int ret=0;
+	uint16_t adc_value = 0;
+	uint16_t ps_data = 0;	
+	int level = 0, i, val;
+	
+  mutex_lock(&CM36283_control_mutex);
+
+  if( mode == CONTROL_ALS ){
+    if(param){
+      lpi->ls_cmd &= CM36283_ALS_SD_MASK;      
+    } else {
+      lpi->ls_cmd |= CM36283_ALS_SD;
+    }
+    _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd);
+    lpi->als_enable=param;
+  } else if( mode == CONTROL_PS ){
+    if(param){ 
+      lpi->ps_conf1_val &= CM36283_PS_SD_MASK;
+      lpi->ps_conf1_val |= CM36283_PS_INT_IN_AND_OUT;      
+    } else {
+      lpi->ps_conf1_val |= CM36283_PS_SD;
+      lpi->ps_conf1_val &= CM36283_PS_INT_MASK;
+    }
+    _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF1, lpi->ps_conf1_val);    
+    lpi->ps_enable=param;  
+  }
+  if((mode == CONTROL_ALS)||(mode == CONTROL_PS)){  
+    if( param==1 ){
+		  msleep(100);  
+    }
+  }
+     	
+  if(lpi->als_enable){
+    if( mode == CONTROL_ALS ||
+      ( mode == CONTROL_INT_ISR_REPORT && 
+      ((param&INT_FLAG_ALS_IF_L)||(param&INT_FLAG_ALS_IF_H)))){
+    
+    	  lpi->ls_cmd &= CM36283_ALS_INT_MASK;
+    	  ret = _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd);  
+      
+        get_ls_adc_value(&adc_value, 0);
+          
+        if( lpi->ls_calibrate ) {
+        	for (i = 0; i < 10; i++) {
+      	  	if (adc_value <= (*(lpi->cali_table + i))) {
+      		  	level = i;
+      			  if (*(lpi->cali_table + i))
+      				  break;
+      		  }
+      		  if ( i == 9) {/*avoid  i = 10, because 'cali_table' of size is 10 */
+      			  level = i;
+      			  break;
+      		  }
+      	  }
+        } else {
+      	  for (i = 0; i < 10; i++) {
+      		  if (adc_value <= (*(lpi->adc_table + i))) {
+      			  level = i;
+      			  if (*(lpi->adc_table + i))
+      				  break;
+      		  }
+      		  if ( i == 9) {/*avoid  i = 10, because 'cali_table' of size is 10 */
+      			  level = i;
+      			  break;
+      		  }
+      	  }
+    	  }
+    
+    	  ret = set_lsensor_range(((i == 0) || (adc_value == 0)) ? 0 :
+    		   	*(lpi->cali_table + (i - 1)) + 1,
+    		    *(lpi->cali_table + i));
+    	  
+        lpi->ls_cmd |= CM36283_ALS_INT_EN;
+    	  
+        ret = _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd);  
+    	  
+    		if ((i == 0) || (adc_value == 0))
+    			D("[LS][CM3628] %s: ADC=0x%03X, Level=%d, l_thd equal 0, h_thd = 0x%x \n",
+    				__func__, adc_value, level, *(lpi->cali_table + i));
+    		else
+    			D("[LS][CM3628] %s: ADC=0x%03X, Level=%d, l_thd = 0x%x, h_thd = 0x%x \n",
+    				__func__, adc_value, level, *(lpi->cali_table + (i - 1)) + 1, *(lpi->cali_table + i));
+    		lpi->current_level = level;
+    		lpi->current_adc = adc_value;    
+        input_report_abs(lpi->ls_input_dev, ABS_MISC, level);
+        input_sync(lpi->ls_input_dev);
+    }
+  }
+
+#define PS_CLOSE 1
+#define PS_AWAY  (1<<1)
+#define PS_CLOSE_AND_AWAY PS_CLOSE+PS_AWAY
+
+  if(lpi->ps_enable){
+    int ps_status = 0;
+    if( mode == CONTROL_PS )
+      ps_status = PS_CLOSE_AND_AWAY;   
+    else if(mode == CONTROL_INT_ISR_REPORT ){  
+      if ( param & INT_FLAG_PS_IF_CLOSE )
+        ps_status |= PS_CLOSE;      
+      if ( param & INT_FLAG_PS_IF_AWAY )
+        ps_status |= PS_AWAY;
+    }
+      
+    if (ps_status!=0){
+      switch(ps_status){
+        case PS_CLOSE_AND_AWAY:
+          get_stable_ps_adc_value(&ps_data);
+          val = (ps_data >= lpi->ps_close_thd_set) ? 0 : 1;
+          break;
+        case PS_AWAY:
+          val = 1;
+          break;
+        case PS_CLOSE:
+          val = 0;
+          break;
+        };
+      input_report_abs(lpi->ps_input_dev, ABS_DISTANCE, val);      
+      input_sync(lpi->ps_input_dev);        
+    }
+  }
+
+  mutex_unlock(&CM36283_control_mutex);
+  return ret;
+}
+
+
+static const struct i2c_device_id cm36283_i2c_id[] = {
+	{CM36283_I2C_NAME, 0},
+	{}
+};
+
+static struct i2c_driver cm36283_driver = {
+	.id_table = cm36283_i2c_id,
+	.probe = cm36283_probe,
+	.driver = {
+		.name = CM36283_I2C_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init cm36283_init(void)
+{
+	return i2c_add_driver(&cm36283_driver);
+}
+
+static void __exit cm36283_exit(void)
+{
+	i2c_del_driver(&cm36283_driver);
+}
+
+module_init(cm36283_init);
+module_exit(cm36283_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CM36283 Driver");
+MODULE_AUTHOR("Frank Hsieh <pengyueh@gmail.com>");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 519b7e4..ded2b6e 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -302,9 +302,6 @@
 
 /**
  *  Set/get enable function is just needed by sensor HAL.
- *  Normally, the open function does all the initialization
- *  and power work. And close undo that open does.
- *  Just keeping the function simple.
  */
 
 static ssize_t mpu3050_attr_set_enable(struct device *dev,
@@ -316,8 +313,25 @@
 
 	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
-	sensor->enable = (u32)val;
-
+	sensor->enable = (u32)val == 0 ? 0 : 1;
+	if (sensor->enable) {
+		pm_runtime_get_sync(sensor->dev);
+		gpio_set_value(sensor->enable_gpio, 1);
+		if (sensor->use_poll)
+			schedule_delayed_work(&sensor->input_work,
+				msecs_to_jiffies(sensor->poll_interval));
+		else
+			i2c_smbus_write_byte_data(sensor->client,
+					MPU3050_INT_CFG,
+					MPU3050_ACTIVE_LOW |
+					MPU3050_OPEN_DRAIN |
+					MPU3050_RAW_RDY_EN);
+	} else {
+		if (sensor->use_poll)
+			cancel_delayed_work_sync(&sensor->input_work);
+		gpio_set_value(sensor->enable_gpio, 0);
+		pm_runtime_put(sensor->dev);
+	}
 	return count;
 }
 
@@ -792,14 +806,8 @@
 		goto err_input_cleanup;
 	}
 
-	if (sensor->use_poll)
-		schedule_delayed_work(&sensor->input_work,
-				msecs_to_jiffies(sensor->poll_interval));
-	else
-		i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG,
-				MPU3050_ACTIVE_LOW |
-				MPU3050_OPEN_DRAIN |
-				MPU3050_RAW_RDY_EN);
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY);
 
 	return 0;
 
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index 8de6b1e..83b2aa1 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -76,12 +76,14 @@
 #define FT_PMODE_HIBERNATE	0x03
 #define FT_FACTORYMODE_VALUE	0x40
 #define FT_WORKMODE_VALUE	0x00
-#define FT_RST_CMD_REG		0xFC
+#define FT_RST_CMD_REG1		0xFC
+#define FT_RST_CMD_REG2		0xBC
 #define FT_READ_ID_REG		0x90
 #define FT_ERASE_APP_REG	0x61
 #define FT_ERASE_PANEL_REG	0x63
 #define FT_FW_START_REG		0xBF
 
+#define FT_STATUS_NUM_TP_MASK	0x0F
 
 #define FT_VTG_MIN_UV		2600000
 #define FT_VTG_MAX_UV		3300000
@@ -97,6 +99,7 @@
 
 #define FT5316_ID		0x0A
 #define FT5306I_ID		0x55
+#define FT6X06_ID		0x06
 
 #define FT_UPGRADE_AA		0xAA
 #define FT_UPGRADE_55		0x55
@@ -133,6 +136,14 @@
 #define FT6208_UPGRADE_READID_DELAY	10
 #define FT6208_UPGRADE_EARSE_DELAY	2000
 
+/*upgrade config of FT6x06*/
+#define FT6X06_UPGRADE_AA_DELAY		100
+#define FT6X06_UPGRADE_55_DELAY		30
+#define FT6X06_UPGRADE_ID_1		0x79
+#define FT6X06_UPGRADE_ID_2		0x08
+#define FT6X06_UPGRADE_READID_DELAY	10
+#define FT6X06_UPGRADE_EARSE_DELAY	2000
+
 #define FT_UPGRADE_INFO(x, y)	do { \
 		x->delay_55 = y##_UPGRADE_55_DELAY; \
 		x->delay_aa = y##_UPGRADE_AA_DELAY; \
@@ -161,7 +172,7 @@
 #define FT_FW_LAST_PKT		0x6ffa
 #define FT_EARSE_DLY_MS		100
 
-#define FT_UPGRADE_LOOP		3
+#define FT_UPGRADE_LOOP		10
 #define FT_CAL_START		0x04
 #define FT_CAL_FIN		0x00
 #define FT_CAL_STORE		0x05
@@ -329,6 +340,7 @@
 static int ft5x06_handle_touchdata(struct ft5x06_ts_data *data)
 {
 	struct ts_event *event = &data->event;
+	int num_points;
 	int ret, i;
 	u8 buf[POINT_READ_BUF] = { 0 };
 	u8 pointid = FT_MAX_ID;
@@ -342,7 +354,9 @@
 	memset(event, 0, sizeof(struct ts_event));
 
 	event->touch_point = 0;
-	for (i = 0; i < CFG_MAX_TOUCH_POINTS; i++) {
+	num_points = buf[2] & FT_STATUS_NUM_TP_MASK;
+
+	for (i = 0; i < num_points; i++) {
 		pointid = (buf[FT_TOUCH_ID_POS + FT_TOUCH_STEP * i]) >> 4;
 		if (pointid >= FT_MAX_ID)
 			break;
@@ -490,6 +504,7 @@
 {
 	struct ft5x06_ts_data *data = dev_get_drvdata(dev);
 	char txbuf[2], i;
+	int err;
 
 	if (data->loading_fw) {
 		dev_info(dev, "Firmware loading in process...\n");
@@ -517,25 +532,66 @@
 		ft5x06_i2c_write(data->client, txbuf, sizeof(txbuf));
 	}
 
+	if (data->pdata->power_on) {
+		err = data->pdata->power_on(false);
+		if (err) {
+			dev_err(dev, "power off failed");
+			goto pwr_off_fail;
+		}
+	} else {
+		err = ft5x06_power_on(data, false);
+		if (err) {
+			dev_err(dev, "power off failed");
+			goto pwr_off_fail;
+		}
+	}
+
 	data->suspended = true;
 
 	return 0;
+
+pwr_off_fail:
+	if (gpio_is_valid(data->pdata->reset_gpio)) {
+		gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
+		msleep(FT_RESET_DLY);
+		gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
+	}
+	enable_irq(data->client->irq);
+	return err;
 }
 
 static int ft5x06_ts_resume(struct device *dev)
 {
 	struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+	int err;
 
 	if (!data->suspended) {
 		dev_info(dev, "Already in awake state\n");
 		return 0;
 	}
 
+	if (data->pdata->power_on) {
+		err = data->pdata->power_on(true);
+		if (err) {
+			dev_err(dev, "power on failed");
+			return err;
+		}
+	} else {
+		err = ft5x06_power_on(data, true);
+		if (err) {
+			dev_err(dev, "power on failed");
+			return err;
+		}
+	}
+
 	if (gpio_is_valid(data->pdata->reset_gpio)) {
 		gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
 		msleep(FT_RESET_DLY);
 		gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
 	}
+
+	msleep(FT_STARTUP_DLY);
+
 	enable_irq(data->client->irq);
 
 	data->suspended = false;
@@ -635,6 +691,9 @@
 	case FT5316_ID:
 		FT_UPGRADE_INFO(info, FT5316);
 		break;
+	case FT6X06_ID:
+		FT_UPGRADE_INFO(info, FT6X06);
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -647,6 +706,7 @@
 {
 	struct ft5x06_ts_data *ts_data = i2c_get_clientdata(client);
 	struct upgrade_info info;
+	u8 reset_reg;
 	u8 w_buf[FT_MAX_WR_BUF] = {0}, r_buf[FT_MAX_RD_BUF] = {0};
 	u8 pkt_buf[FT_FW_PKT_LEN + FT_FW_PKT_META_LEN];
 	int rc, i, j, temp;
@@ -659,22 +719,27 @@
 		return -EINVAL;
 	}
 
-	for (i = 0; i < FT_UPGRADE_LOOP; i++) {
-		/* reset - write 0xaa and 0x55 to register 0xfc */
-		ft5x0x_write_reg(client, FT_RST_CMD_REG, FT_UPGRADE_AA);
+	for (i = 0, j = 0; i < FT_UPGRADE_LOOP; i++) {
+		/* reset - write 0xaa and 0x55 to reset register */
+		if (ts_data->family_id == FT6X06_ID)
+			reset_reg = FT_RST_CMD_REG2;
+		else
+			reset_reg = FT_RST_CMD_REG1;
+
+		ft5x0x_write_reg(client, reset_reg, FT_UPGRADE_AA);
 		msleep(info.delay_aa);
 
-		ft5x0x_write_reg(client, FT_RST_CMD_REG, FT_UPGRADE_55);
+		ft5x0x_write_reg(client, reset_reg, FT_UPGRADE_55);
 		msleep(info.delay_55);
 
 		/* Enter upgrade mode */
 		w_buf[0] = FT_UPGRADE_55;
 		w_buf[1] = FT_UPGRADE_AA;
 		do {
-			i++;
+			j++;
 			rc = ft5x06_i2c_write(client, w_buf, 2);
 			msleep(FT_RETRY_DLY);
-		} while (rc <= 0 && i < FT_MAX_TRIES);
+		} while (rc <= 0 && j < FT_MAX_TRIES);
 
 		/* check READ_ID */
 		msleep(info.delay_readid);
@@ -777,6 +842,8 @@
 	ft5x06_i2c_write(client, w_buf, 1);
 	msleep(FT_STARTUP_DLY);
 
+	dev_info(&client->dev, "Firmware upgrade successful\n");
+
 	return 0;
 }
 
diff --git a/drivers/input/touchscreen/gt9xx/goodix_tool.c b/drivers/input/touchscreen/gt9xx/goodix_tool.c
new file mode 100644
index 0000000..3dfe4e1
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/goodix_tool.c
@@ -0,0 +1,615 @@
+/* drivers/input/touchscreen/goodix_tool.c

+ * 

+ * 2010 - 2012 Goodix Technology.

+ * 

+ * This program is free software; you can redistribute it and/or modify

+ * it under the terms of the GNU General Public License as published by

+ * the Free Software Foundation; either version 2 of the License, or

+ * (at your option) any later version.

+ * 

+ * This program is distributed in the hope that it will be a reference 

+ * to you, when you are integrating the GOODiX's CTP IC into your system, 

+ * but WITHOUT ANY WARRANTY; without even the implied warranty of 

+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 

+ * General Public License for more details.

+ * 

+ * Version:1.6

+ *        V1.0:2012/05/01,create file.

+ *        V1.2:2012/06/08,modify some warning.

+ *        V1.4:2012/08/28,modified to support GT9XX

+ *        V1.6:new proc name

+ */

+

+#include "gt9xx.h"

+

+#define DATA_LENGTH_UINT    512

+#define CMD_HEAD_LENGTH     (sizeof(st_cmd_head) - sizeof(u8*))

+static char procname[20] = {0};

+

+#define UPDATE_FUNCTIONS

+

+#ifdef UPDATE_FUNCTIONS

+extern s32 gup_enter_update_mode(struct i2c_client *client);

+extern void gup_leave_update_mode(void);

+extern s32 gup_update_proc(void *dir);

+#endif

+

+extern void gtp_irq_disable(struct goodix_ts_data *);

+extern void gtp_irq_enable(struct goodix_ts_data *);

+

+#pragma pack(1)

+typedef struct{

+    u8  wr;         //write read flag£¬0:R  1:W  2:PID 3:

+    u8  flag;       //0:no need flag/int 1: need flag  2:need int

+    u8 flag_addr[2];  //flag address

+    u8  flag_val;   //flag val

+    u8  flag_relation;  //flag_val:flag 0:not equal 1:equal 2:> 3:<

+    u16 circle;     //polling cycle 

+    u8  times;      //plling times

+    u8  retry;      //I2C retry times

+    u16 delay;      //delay befor read or after write

+    u16 data_len;   //data length

+    u8  addr_len;   //address length

+    u8  addr[2];    //address

+    u8  res[3];     //reserved

+    u8* data;       //data pointer

+}st_cmd_head;

+#pragma pack()

+st_cmd_head cmd_head;

+

+static struct i2c_client *gt_client = NULL;

+

+static struct proc_dir_entry *goodix_proc_entry;

+

+static s32 goodix_tool_write(struct file *filp, const char __user *buff, unsigned long len, void *data);

+static s32 goodix_tool_read( char *page, char **start, off_t off, int count, int *eof, void *data );

+static s32 (*tool_i2c_read)(u8 *, u16);

+static s32 (*tool_i2c_write)(u8 *, u16);

+

+#if GTP_ESD_PROTECT

+extern void gtp_esd_switch(struct i2c_client *, s32);

+#endif

+s32 DATA_LENGTH = 0;

+s8 IC_TYPE[16] = {0};

+

+static void tool_set_proc_name(char * procname)

+{

+    char *months[12] = {"Jan", "Feb", "Mar", "Apr", "May", 

+        "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};

+    char date[20] = {0};

+    char month[4] = {0};

+    int i = 0, n_month = 1, n_day = 0, n_year = 0;

+    

+    sprintf(date, "%s", __DATE__);

+    

+    //GTP_DEBUG("compile date: %s", date);

+    

+    sscanf(date, "%s %d %d", month, &n_day, &n_year);

+    

+    for (i = 0; i < 12; ++i)

+    {

+        if (!memcmp(months[i], month, 3))

+        {

+            n_month = i+1;

+            break;

+        }

+    }

+    

+    sprintf(procname, "gmnode%04d%02d%02d", n_year, n_month, n_day);    

+    

+    //GTP_DEBUG("procname = %s", procname);

+}

+

+

+static s32 tool_i2c_read_no_extra(u8* buf, u16 len)

+{

+    s32 ret = -1;

+    s32 i = 0;

+    struct i2c_msg msgs[2];

+    

+    msgs[0].flags = !I2C_M_RD;

+    msgs[0].addr  = gt_client->addr;

+    msgs[0].len   = cmd_head.addr_len;

+    msgs[0].buf   = &buf[0];

+    

+    msgs[1].flags = I2C_M_RD;

+    msgs[1].addr  = gt_client->addr;

+    msgs[1].len   = len;

+    msgs[1].buf   = &buf[GTP_ADDR_LENGTH];

+

+    for (i = 0; i < cmd_head.retry; i++)

+    {

+        ret=i2c_transfer(gt_client->adapter, msgs, 2);

+        if (ret > 0)

+        {

+            break;

+        }

+    }

+    return ret;

+}

+

+static s32 tool_i2c_write_no_extra(u8* buf, u16 len)

+{

+    s32 ret = -1;

+    s32 i = 0;

+    struct i2c_msg msg;

+

+    msg.flags = !I2C_M_RD;

+    msg.addr  = gt_client->addr;

+    msg.len   = len;

+    msg.buf   = buf;

+

+    for (i = 0; i < cmd_head.retry; i++)

+    {

+        ret=i2c_transfer(gt_client->adapter, &msg, 1);

+        if (ret > 0)

+        {

+            break;

+        }

+    }

+    return ret;

+}

+

+static s32 tool_i2c_read_with_extra(u8* buf, u16 len)

+{

+    s32 ret = -1;

+    u8 pre[2] = {0x0f, 0xff};

+    u8 end[2] = {0x80, 0x00};

+

+    tool_i2c_write_no_extra(pre, 2);

+    ret = tool_i2c_read_no_extra(buf, len);

+    tool_i2c_write_no_extra(end, 2);

+

+    return ret;

+}

+

+static s32 tool_i2c_write_with_extra(u8* buf, u16 len)

+{

+    s32 ret = -1;

+    u8 pre[2] = {0x0f, 0xff};

+    u8 end[2] = {0x80, 0x00};

+

+    tool_i2c_write_no_extra(pre, 2);

+    ret = tool_i2c_write_no_extra(buf, len);

+    tool_i2c_write_no_extra(end, 2);

+

+    return ret;

+}

+

+static void register_i2c_func(void)

+{

+//    if (!strncmp(IC_TYPE, "GT818", 5) || !strncmp(IC_TYPE, "GT816", 5) 

+//        || !strncmp(IC_TYPE, "GT811", 5) || !strncmp(IC_TYPE, "GT818F", 6) 

+//        || !strncmp(IC_TYPE, "GT827", 5) || !strncmp(IC_TYPE,"GT828", 5)

+//        || !strncmp(IC_TYPE, "GT813", 5))

+    if (strncmp(IC_TYPE, "GT8110", 6) && strncmp(IC_TYPE, "GT8105", 6)

+        && strncmp(IC_TYPE, "GT801", 5) && strncmp(IC_TYPE, "GT800", 5)

+        && strncmp(IC_TYPE, "GT801PLUS", 9) && strncmp(IC_TYPE, "GT811", 5)

+        && strncmp(IC_TYPE, "GTxxx", 5))

+    {

+        tool_i2c_read = tool_i2c_read_with_extra;

+        tool_i2c_write = tool_i2c_write_with_extra;

+        GTP_DEBUG("I2C function: with pre and end cmd!");

+    }

+    else

+    {

+        tool_i2c_read = tool_i2c_read_no_extra;

+        tool_i2c_write = tool_i2c_write_no_extra;

+        GTP_INFO("I2C function: without pre and end cmd!");

+    }

+}

+

+static void unregister_i2c_func(void)

+{

+    tool_i2c_read = NULL;

+    tool_i2c_write = NULL;

+    GTP_INFO("I2C function: unregister i2c transfer function!");

+}

+

+

+s32 init_wr_node(struct i2c_client *client)

+{

+    s32 i;

+

+    gt_client = client;

+    memset(&cmd_head, 0, sizeof(cmd_head));

+    cmd_head.data = NULL;

+

+    i = 5;

+    while ((!cmd_head.data) && i)

+    {

+        cmd_head.data = kzalloc(i * DATA_LENGTH_UINT, GFP_KERNEL);

+        if (NULL != cmd_head.data)

+        {

+            break;

+        }

+        i--;

+    }

+    if (i)

+    {

+        DATA_LENGTH = i * DATA_LENGTH_UINT + GTP_ADDR_LENGTH;

+        GTP_INFO("Applied memory size:%d.", DATA_LENGTH);

+    }

+    else

+    {

+        GTP_ERROR("Apply for memory failed.");

+        return FAIL;

+    }

+

+    cmd_head.addr_len = 2;

+    cmd_head.retry = 5;

+

+    register_i2c_func();

+

+    tool_set_proc_name(procname);

+    goodix_proc_entry = create_proc_entry(procname, 0666, NULL);

+    if (goodix_proc_entry == NULL)

+    {

+        GTP_ERROR("Couldn't create proc entry!");

+        return FAIL;

+    }

+    else

+    {

+        GTP_INFO("Create proc entry success!");

+        goodix_proc_entry->write_proc = goodix_tool_write;

+        goodix_proc_entry->read_proc = goodix_tool_read;

+    }

+

+    return SUCCESS;

+}

+

+void uninit_wr_node(void)

+{

+    kfree(cmd_head.data);

+    cmd_head.data = NULL;

+    unregister_i2c_func();

+    remove_proc_entry(procname, NULL);

+}

+

+static u8 relation(u8 src, u8 dst, u8 rlt)

+{

+    u8 ret = 0;

+    

+    switch (rlt)

+    {

+    case 0:

+        ret = (src != dst) ? true : false;

+        break;

+

+    case 1:

+        ret = (src == dst) ? true : false;

+        GTP_DEBUG("equal:src:0x%02x   dst:0x%02x   ret:%d.", src, dst, (s32)ret);

+        break;

+

+    case 2:

+        ret = (src > dst) ? true : false;

+        break;

+

+    case 3:

+        ret = (src < dst) ? true : false;

+        break;

+

+    case 4:

+        ret = (src & dst) ? true : false;

+        break;

+

+    case 5:

+        ret = (!(src | dst)) ? true : false;

+        break;

+

+    default:

+        ret = false;

+        break;    

+    }

+

+    return ret;

+}

+

+/*******************************************************    

+Function:

+    Comfirm function.

+Input:

+  None.

+Output:

+    Return write length.

+********************************************************/

+static u8 comfirm(void)

+{

+    s32 i = 0;

+    u8 buf[32];

+    

+//    memcpy(&buf[GTP_ADDR_LENGTH - cmd_head.addr_len], &cmd_head.flag_addr, cmd_head.addr_len);

+//    memcpy(buf, &cmd_head.flag_addr, cmd_head.addr_len);//Modified by Scott, 2012-02-17

+    memcpy(buf, cmd_head.flag_addr, cmd_head.addr_len);

+   

+    for (i = 0; i < cmd_head.times; i++)

+    {

+        if (tool_i2c_read(buf, 1) <= 0)

+        {

+            GTP_ERROR("Read flag data failed!");

+            return FAIL;

+        }

+        if (true == relation(buf[GTP_ADDR_LENGTH], cmd_head.flag_val, cmd_head.flag_relation))

+        {

+            GTP_DEBUG("value at flag addr:0x%02x.", buf[GTP_ADDR_LENGTH]);

+            GTP_DEBUG("flag value:0x%02x.", cmd_head.flag_val);

+            break;

+        }

+

+        msleep(cmd_head.circle);

+    }

+

+    if (i >= cmd_head.times)

+    {

+        GTP_ERROR("Didn't get the flag to continue!");

+        return FAIL;

+    }

+

+    return SUCCESS;

+}

+

+/*******************************************************    

+Function:

+    Goodix tool write function.

+Input:

+  standard proc write function param.

+Output:

+    Return write length.

+********************************************************/

+static s32 goodix_tool_write(struct file *filp, const char __user *buff, unsigned long len, void *data)

+{

+    s32 ret = 0;

+    GTP_DEBUG_FUNC();

+    GTP_DEBUG_ARRAY((u8*)buff, len);

+    

+    ret = copy_from_user(&cmd_head, buff, CMD_HEAD_LENGTH);

+    if(ret)

+    {

+        GTP_ERROR("copy_from_user failed.");

+    }

+

+    GTP_DEBUG("wr  :0x%02x.", cmd_head.wr);

+    GTP_DEBUG("flag:0x%02x.", cmd_head.flag);

+    GTP_DEBUG("flag addr:0x%02x%02x.", cmd_head.flag_addr[0], cmd_head.flag_addr[1]);

+    GTP_DEBUG("flag val:0x%02x.", cmd_head.flag_val);

+    GTP_DEBUG("flag rel:0x%02x.", cmd_head.flag_relation);

+    GTP_DEBUG("circle  :%d.", (s32)cmd_head.circle);

+    GTP_DEBUG("times   :%d.", (s32)cmd_head.times);

+    GTP_DEBUG("retry   :%d.", (s32)cmd_head.retry);

+    GTP_DEBUG("delay   :%d.", (s32)cmd_head.delay);

+    GTP_DEBUG("data len:%d.", (s32)cmd_head.data_len);

+    GTP_DEBUG("addr len:%d.", (s32)cmd_head.addr_len);

+    GTP_DEBUG("addr:0x%02x%02x.", cmd_head.addr[0], cmd_head.addr[1]);

+    GTP_DEBUG("len:%d.", (s32)len);

+    GTP_DEBUG("buf[20]:0x%02x.", buff[CMD_HEAD_LENGTH]);

+    

+    if (1 == cmd_head.wr)

+    {

+      //  copy_from_user(&cmd_head.data[cmd_head.addr_len], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);

+        ret = copy_from_user(&cmd_head.data[GTP_ADDR_LENGTH], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);

+        if(ret)

+        {

+            GTP_ERROR("copy_from_user failed.");

+        }

+        memcpy(&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len], cmd_head.addr, cmd_head.addr_len);

+

+        GTP_DEBUG_ARRAY(cmd_head.data, cmd_head.data_len + cmd_head.addr_len);

+        GTP_DEBUG_ARRAY((u8*)&buff[CMD_HEAD_LENGTH], cmd_head.data_len);

+

+        if (1 == cmd_head.flag)

+        {

+            if (FAIL == comfirm())

+            {

+                GTP_ERROR("[WRITE]Comfirm fail!");

+                return FAIL;

+            }

+        }

+        else if (2 == cmd_head.flag)

+        {

+            //Need interrupt!

+        }

+        if (tool_i2c_write(&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len],

+            cmd_head.data_len + cmd_head.addr_len) <= 0)

+        {

+            GTP_ERROR("[WRITE]Write data failed!");

+            return FAIL;

+        }

+

+        GTP_DEBUG_ARRAY(&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len],cmd_head.data_len + cmd_head.addr_len);

+        if (cmd_head.delay)

+        {

+            msleep(cmd_head.delay);

+        }

+

+        return cmd_head.data_len + CMD_HEAD_LENGTH;

+    }

+    else if (3 == cmd_head.wr)  //Write ic type

+    {

+    ret = copy_from_user(&cmd_head.data[0], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);

+        if(ret)

+        {

+            GTP_ERROR("copy_from_user failed.");

+        }

+        memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len);

+

+        register_i2c_func();

+

+        return cmd_head.data_len + CMD_HEAD_LENGTH;

+    }

+    else if (5 == cmd_head.wr)

+    {

+        //memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len);

+

+        return cmd_head.data_len + CMD_HEAD_LENGTH;

+    }

+    else if (7 == cmd_head.wr)//disable irq!

+    {

+        gtp_irq_disable(i2c_get_clientdata(gt_client));

+        

+    #if GTP_ESD_PROTECT

+        gtp_esd_switch(gt_client, SWITCH_OFF);

+    #endif

+        return CMD_HEAD_LENGTH;

+    }

+    else if (9 == cmd_head.wr) //enable irq!

+    {

+        gtp_irq_enable(i2c_get_clientdata(gt_client));

+

+    #if GTP_ESD_PROTECT

+        gtp_esd_switch(gt_client, SWITCH_ON);

+    #endif

+        return CMD_HEAD_LENGTH;

+    }

+    else if(17 == cmd_head.wr)

+    {

+        struct goodix_ts_data *ts = i2c_get_clientdata(gt_client);

+        ret = copy_from_user(&cmd_head.data[GTP_ADDR_LENGTH], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);

+        if(ret)

+        {

+            GTP_DEBUG("copy_from_user failed.");

+        }

+        if(cmd_head.data[GTP_ADDR_LENGTH])

+        {

+            GTP_DEBUG("gtp enter rawdiff.");

+            ts->gtp_rawdiff_mode = true;

+        }

+        else

+        {

+            ts->gtp_rawdiff_mode = false;

+            GTP_DEBUG("gtp leave rawdiff.");

+        }

+        return CMD_HEAD_LENGTH;

+    }

+#ifdef UPDATE_FUNCTIONS

+    else if (11 == cmd_head.wr)//Enter update mode!

+    {

+        if (FAIL == gup_enter_update_mode(gt_client))

+        {

+            return FAIL;

+        }

+    }

+    else if (13 == cmd_head.wr)//Leave update mode!

+    {

+        gup_leave_update_mode();

+    }

+    else if (15 == cmd_head.wr) //Update firmware!

+    {

+        show_len = 0;

+        total_len = 0;

+        memset(cmd_head.data, 0, cmd_head.data_len + 1);

+        memcpy(cmd_head.data, &buff[CMD_HEAD_LENGTH], cmd_head.data_len);

+

+        if (FAIL == gup_update_proc((void*)cmd_head.data))

+        {

+            return FAIL;

+        }

+    }

+#endif

+

+    return CMD_HEAD_LENGTH;

+}

+

+/*******************************************************    

+Function:

+    Goodix tool read function.

+Input:

+  standard proc read function param.

+Output:

+    Return read length.

+********************************************************/

+static s32 goodix_tool_read( char *page, char **start, off_t off, int count, int *eof, void *data )

+{

+    GTP_DEBUG_FUNC();

+    

+    if (cmd_head.wr % 2)

+    {

+        return FAIL;

+    }

+    else if (!cmd_head.wr)

+    {

+        u16 len = 0;

+        s16 data_len = 0;

+        u16 loc = 0;

+        

+        if (1 == cmd_head.flag)

+        {

+            if (FAIL == comfirm())

+            {

+                GTP_ERROR("[READ]Comfirm fail!");

+                return FAIL;

+            }

+        }

+        else if (2 == cmd_head.flag)

+        {

+            //Need interrupt!

+        }

+

+        memcpy(cmd_head.data, cmd_head.addr, cmd_head.addr_len);

+

+        GTP_DEBUG("[CMD HEAD DATA] ADDR:0x%02x%02x.", cmd_head.data[0], cmd_head.data[1]);

+        GTP_DEBUG("[CMD HEAD ADDR] ADDR:0x%02x%02x.", cmd_head.addr[0], cmd_head.addr[1]);

+        

+        if (cmd_head.delay)

+        {

+            msleep(cmd_head.delay);

+        }

+        

+        data_len = cmd_head.data_len;

+        while(data_len > 0)

+        {

+            if (data_len > DATA_LENGTH)

+            {

+                len = DATA_LENGTH;

+            }

+            else

+            {

+                len = data_len;

+            }

+            data_len -= DATA_LENGTH;

+

+            if (tool_i2c_read(cmd_head.data, len) <= 0)

+            {

+                GTP_ERROR("[READ]Read data failed!");

+                return FAIL;

+            }

+            memcpy(&page[loc], &cmd_head.data[GTP_ADDR_LENGTH], len);

+            loc += len;

+

+            GTP_DEBUG_ARRAY(&cmd_head.data[GTP_ADDR_LENGTH], len);

+            GTP_DEBUG_ARRAY(page, len);

+        }

+    }

+    else if (2 == cmd_head.wr)

+    {

+    //    memcpy(page, "gt8", cmd_head.data_len);

+       // memcpy(page, "GT818", 5);

+      //  page[5] = 0;

+

+        GTP_DEBUG("Return ic type:%s len:%d.", page, (s32)cmd_head.data_len);

+        return cmd_head.data_len;

+        //return sizeof(IC_TYPE_NAME);

+    }

+    else if (4 == cmd_head.wr)

+    {

+        page[0] = show_len >> 8;

+        page[1] = show_len & 0xff;

+        page[2] = total_len >> 8;

+        page[3] = total_len & 0xff;

+

+        return cmd_head.data_len;

+    }

+    else if (6 == cmd_head.wr)

+    {

+        //Read error code!

+    }

+    else if (8 == cmd_head.wr)  //Read driver version

+    {

+       // memcpy(page, GTP_DRIVER_VERSION, strlen(GTP_DRIVER_VERSION));

+       s32 tmp_len;

+       tmp_len = strlen(GTP_DRIVER_VERSION);

+       memcpy(page, GTP_DRIVER_VERSION, tmp_len);

+       page[tmp_len] = 0;

+    }

+

+    return cmd_head.data_len;

+}

diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
new file mode 100644
index 0000000..b1dc08b
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -0,0 +1,1810 @@
+/* drivers/input/touchscreen/gt9xx.c
+ * 
+ * 2010 - 2013 Goodix Technology.
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be a reference 
+ * to you, when you are integrating the GOODiX's CTP IC into your system, 
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of 
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
+ * General Public License for more details.
+ * 
+ * Version: 1.8
+ * Authors: andrew@goodix.com, meta@goodix.com
+ * Release Date: 2013/04/25
+ * Revision record:
+ *      V1.0:   
+ *          first Release. By Andrew, 2012/08/31 
+ *      V1.2:
+ *          modify gtp_reset_guitar,slot report,tracking_id & 0x0F. By Andrew, 2012/10/15
+ *      V1.4:
+ *          modify gt9xx_update.c. By Andrew, 2012/12/12
+ *      V1.6: 
+ *          1. new heartbeat/esd_protect mechanism(add external watchdog)
+ *          2. doze mode, sliding wakeup 
+ *          3. 3 more cfg_group(GT9 Sensor_ID: 0~5) 
+ *          3. config length verification
+ *          4. names & comments
+ *                  By Meta, 2013/03/11
+ *      V1.8:
+ *          1. pen/stylus identification 
+ *          2. read double check & fixed config support
+ *          2. new esd & slide wakeup optimization
+ *                  By Meta, 2013/06/08
+ */
+
+#include <linux/irq.h>
+#include "gt9xx.h"
+
+#if GTP_ICS_SLOT_REPORT
+    #include <linux/input/mt.h>
+#endif
+
+static const char *goodix_ts_name = "Goodix Capacitive TouchScreen";
+static struct workqueue_struct *goodix_wq;
+struct i2c_client * i2c_connect_client = NULL; 
+u8 config[GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH]
+                = {GTP_REG_CONFIG_DATA >> 8, GTP_REG_CONFIG_DATA & 0xff};
+
+#if GTP_HAVE_TOUCH_KEY
+    static const u16 touch_key_array[] = GTP_KEY_TAB;
+    #define GTP_MAX_KEY_NUM  (sizeof(touch_key_array)/sizeof(touch_key_array[0]))
+    
+#if GTP_DEBUG_ON
+    static const int  key_codes[] = {KEY_HOME, KEY_BACK, KEY_MENU, KEY_SEARCH};
+    static const char *key_names[] = {"Key_Home", "Key_Back", "Key_Menu", "Key_Search"};
+#endif
+    
+#endif
+
+static s8 gtp_i2c_test(struct i2c_client *client);
+void gtp_reset_guitar(struct i2c_client *client, s32 ms);
+void gtp_int_sync(s32 ms);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void goodix_ts_early_suspend(struct early_suspend *h);
+static void goodix_ts_late_resume(struct early_suspend *h);
+#endif
+ 
+#if GTP_CREATE_WR_NODE
+extern s32 init_wr_node(struct i2c_client*);
+extern void uninit_wr_node(void);
+#endif
+
+#if GTP_AUTO_UPDATE
+extern u8 gup_init_update_proc(struct goodix_ts_data *);
+#endif
+
+#if GTP_ESD_PROTECT
+static struct delayed_work gtp_esd_check_work;
+static struct workqueue_struct * gtp_esd_check_workqueue = NULL;
+static void gtp_esd_check_func(struct work_struct *);
+static s32 gtp_init_ext_watchdog(struct i2c_client *client);
+void gtp_esd_switch(struct i2c_client *, s32);
+#endif
+
+
+#if GTP_SLIDE_WAKEUP
+typedef enum
+{
+    DOZE_DISABLED = 0,
+    DOZE_ENABLED = 1,
+    DOZE_WAKEUP = 2,
+}DOZE_T;
+static DOZE_T doze_status = DOZE_DISABLED;
+static s8 gtp_enter_doze(struct goodix_ts_data *ts);
+#endif
+
+static u8 chip_gt9xxs = 0;  // true if ic is gt9xxs, like gt915s
+u8 grp_cfg_version = 0;
+
+/*******************************************************
+Function:
+    Read data from the i2c slave device.
+Input:
+    client:     i2c device.
+    buf[0~1]:   read start address.
+    buf[2~len-1]:   read data buffer.
+    len:    GTP_ADDR_LENGTH + read bytes count
+Output:
+    numbers of i2c_msgs to transfer: 
+      2: succeed, otherwise: failed
+*********************************************************/
+s32 gtp_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
+{
+    struct i2c_msg msgs[2];
+    s32 ret=-1;
+    s32 retries = 0;
+
+    GTP_DEBUG_FUNC();
+
+    msgs[0].flags = !I2C_M_RD;
+    msgs[0].addr  = client->addr;
+    msgs[0].len   = GTP_ADDR_LENGTH;
+    msgs[0].buf   = &buf[0];
+    //msgs[0].scl_rate = 300 * 1000;    // for Rockchip
+    
+    msgs[1].flags = I2C_M_RD;
+    msgs[1].addr  = client->addr;
+    msgs[1].len   = len - GTP_ADDR_LENGTH;
+    msgs[1].buf   = &buf[GTP_ADDR_LENGTH];
+    //msgs[1].scl_rate = 300 * 1000;
+
+    while(retries < 5)
+    {
+        ret = i2c_transfer(client->adapter, msgs, 2);
+        if(ret == 2)break;
+        retries++;
+    }
+    if((retries >= 5))
+    {
+    #if GTP_SLIDE_WAKEUP
+        // reset chip would quit doze mode
+        if (DOZE_ENABLED == doze_status)
+        {
+            return ret;
+        }
+    #endif
+        GTP_DEBUG("I2C communication timeout, resetting chip...");
+        gtp_reset_guitar(client, 10);
+    }
+    return ret;
+}
+
+/*******************************************************
+Function:
+    Write data to the i2c slave device.
+Input:
+    client:     i2c device.
+    buf[0~1]:   write start address.
+    buf[2~len-1]:   data buffer
+    len:    GTP_ADDR_LENGTH + write bytes count
+Output:
+    numbers of i2c_msgs to transfer: 
+        1: succeed, otherwise: failed
+*********************************************************/
+s32 gtp_i2c_write(struct i2c_client *client,u8 *buf,s32 len)
+{
+    struct i2c_msg msg;
+    s32 ret = -1;
+    s32 retries = 0;
+
+    GTP_DEBUG_FUNC();
+
+    msg.flags = !I2C_M_RD;
+    msg.addr  = client->addr;
+    msg.len   = len;
+    msg.buf   = buf;
+    //msg.scl_rate = 300 * 1000;    // for Rockchip
+
+    while(retries < 5)
+    {
+        ret = i2c_transfer(client->adapter, &msg, 1);
+        if (ret == 1)break;
+        retries++;
+    }
+    if((retries >= 5))
+    {
+    #if GTP_SLIDE_WAKEUP
+        if (DOZE_ENABLED == doze_status)
+        {
+            return ret;
+        }
+    #endif
+        GTP_DEBUG("I2C communication timeout, resetting chip...");
+        gtp_reset_guitar(client, 10);
+    }
+    return ret;
+}
+/*******************************************************
+Function:
+    i2c read twice, compare the results
+Input:
+    client:  i2c device
+    addr:    operate address
+    rxbuf:   read data to store, if compare successful
+    len:     bytes to read
+Output:
+    FAIL:    read failed
+    SUCCESS: read successful
+*********************************************************/
+s32 gtp_i2c_read_dbl_check(struct i2c_client *client, u16 addr, u8 *rxbuf, int len)
+{
+    u8 buf[16] = {0};
+    u8 confirm_buf[16] = {0};
+    u8 retry = 0;
+    
+    while (retry++ < 3)
+    {
+        memset(buf, 0xAA, 16);
+        buf[0] = (u8)(addr >> 8);
+        buf[1] = (u8)(addr & 0xFF);
+        gtp_i2c_read(client, buf, len + 2);
+        
+        memset(confirm_buf, 0xAB, 16);
+        confirm_buf[0] = (u8)(addr >> 8);
+        confirm_buf[1] = (u8)(addr & 0xFF);
+        gtp_i2c_read(client, confirm_buf, len + 2);
+        
+        if (!memcmp(buf, confirm_buf, len+2))
+        {
+            break;
+        }
+    }    
+    if (retry < 3)
+    {
+        memcpy(rxbuf, confirm_buf+2, len);
+        return SUCCESS;
+    }
+    else
+    {
+        GTP_ERROR("i2c read 0x%04X, %d bytes, double check failed!", addr, len);
+        return FAIL;
+    }
+}
+
+/*******************************************************
+Function:
+    Send config.
+Input:
+    client: i2c device.
+Output:
+    result of i2c write operation. 
+        1: succeed, otherwise: failed
+*********************************************************/
+s32 gtp_send_cfg(struct i2c_client *client)
+{
+    s32 ret = 2;
+    
+#if GTP_DRIVER_SEND_CFG
+    s32 retry = 0;
+    struct goodix_ts_data *ts = i2c_get_clientdata(client);
+    
+    if (ts->fixed_cfg)
+    {
+        GTP_INFO("Ic fixed config, no config sent!");
+        return 2;
+    }
+    GTP_INFO("driver send config");
+    for (retry = 0; retry < 5; retry++)
+    {
+        ret = gtp_i2c_write(client, config , GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH);
+        if (ret > 0)
+        {
+            break;
+        }
+    }
+#endif
+
+    return ret;
+}
+
+/*******************************************************
+Function:
+    Disable irq function
+Input:
+    ts: goodix i2c_client private data
+Output:
+    None.
+*********************************************************/
+void gtp_irq_disable(struct goodix_ts_data *ts)
+{
+    unsigned long irqflags;
+
+    GTP_DEBUG_FUNC();
+
+    spin_lock_irqsave(&ts->irq_lock, irqflags);
+    if (!ts->irq_is_disable)
+    {
+        ts->irq_is_disable = 1; 
+        disable_irq_nosync(ts->client->irq);
+    }
+    spin_unlock_irqrestore(&ts->irq_lock, irqflags);
+}
+
+/*******************************************************
+Function:
+    Enable irq function
+Input:
+    ts: goodix i2c_client private data
+Output:
+    None.
+*********************************************************/
+void gtp_irq_enable(struct goodix_ts_data *ts)
+{
+    unsigned long irqflags = 0;
+
+    GTP_DEBUG_FUNC();
+    
+    spin_lock_irqsave(&ts->irq_lock, irqflags);
+    if (ts->irq_is_disable) 
+    {
+        enable_irq(ts->client->irq);
+        ts->irq_is_disable = 0; 
+    }
+    spin_unlock_irqrestore(&ts->irq_lock, irqflags);
+}
+
+
+/*******************************************************
+Function:
+    Report touch point event 
+Input:
+    ts: goodix i2c_client private data
+    id: trackId
+    x:  input x coordinate
+    y:  input y coordinate
+    w:  input pressure
+Output:
+    None.
+*********************************************************/
+static void gtp_touch_down(struct goodix_ts_data* ts,s32 id,s32 x,s32 y,s32 w)
+{
+#if GTP_CHANGE_X2Y
+    GTP_SWAP(x, y);
+#endif
+
+#if GTP_ICS_SLOT_REPORT
+    input_mt_slot(ts->input_dev, id);
+    input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
+    input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
+    input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
+    input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
+    input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+#else
+    input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
+    input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
+    input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
+    input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+    input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
+    input_mt_sync(ts->input_dev);
+#endif
+
+    GTP_DEBUG("ID:%d, X:%d, Y:%d, W:%d", id, x, y, w);
+}
+
+/*******************************************************
+Function:
+    Report touch release event
+Input:
+    ts: goodix i2c_client private data
+Output:
+    None.
+*********************************************************/
+static void gtp_touch_up(struct goodix_ts_data* ts, s32 id)
+{
+#if GTP_ICS_SLOT_REPORT
+    input_mt_slot(ts->input_dev, id);
+    input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
+    GTP_DEBUG("Touch id[%2d] release!", id);
+#else
+    input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+    input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+    input_mt_sync(ts->input_dev);
+#endif
+}
+
+
+/*******************************************************
+Function:
+    Goodix touchscreen work function
+Input:
+    work: work struct of goodix_workqueue
+Output:
+    None.
+*********************************************************/
+static void goodix_ts_work_func(struct work_struct *work)
+{
+    u8  end_cmd[3] = {GTP_READ_COOR_ADDR >> 8, GTP_READ_COOR_ADDR & 0xFF, 0};
+    u8  point_data[2 + 1 + 8 * GTP_MAX_TOUCH + 1]={GTP_READ_COOR_ADDR >> 8, GTP_READ_COOR_ADDR & 0xFF};
+    u8  touch_num = 0;
+    u8  finger = 0;
+    static u16 pre_touch = 0;
+    static u8 pre_key = 0;
+#if GTP_WITH_PEN
+    static u8 pre_pen = 0;
+#endif
+    u8  key_value = 0;
+    u8* coor_data = NULL;
+    s32 input_x = 0;
+    s32 input_y = 0;
+    s32 input_w = 0;
+    s32 id = 0;
+    s32 i  = 0;
+    s32 ret = -1;
+    struct goodix_ts_data *ts = NULL;
+
+#if GTP_SLIDE_WAKEUP
+    u8 doze_buf[3] = {0x81, 0x4B};
+#endif
+
+    GTP_DEBUG_FUNC();
+    ts = container_of(work, struct goodix_ts_data, work);
+    if (ts->enter_update)
+    {
+        return;
+    }
+#if GTP_SLIDE_WAKEUP
+    if (DOZE_ENABLED == doze_status)
+    {               
+        ret = gtp_i2c_read(i2c_connect_client, doze_buf, 3);
+        GTP_DEBUG("0x814B = 0x%02X", doze_buf[2]);
+        if (ret > 0)
+        {               
+            if (doze_buf[2] == 0xAA)
+            {
+                GTP_INFO("Slide(0xAA) To Light up the screen!");
+                doze_status = DOZE_WAKEUP;
+                input_report_key(ts->input_dev, KEY_POWER, 1);
+                input_sync(ts->input_dev);
+                input_report_key(ts->input_dev, KEY_POWER, 0);
+                input_sync(ts->input_dev);
+                // clear 0x814B
+                doze_buf[2] = 0x00;
+                gtp_i2c_write(i2c_connect_client, doze_buf, 3);
+            }
+            else if (doze_buf[2] == 0xBB)
+            {
+                GTP_INFO("Slide(0xBB) To Light up the screen!");
+                doze_status = DOZE_WAKEUP;
+                input_report_key(ts->input_dev, KEY_POWER, 1);
+                input_sync(ts->input_dev);
+                input_report_key(ts->input_dev, KEY_POWER, 0);
+                input_sync(ts->input_dev);
+                // clear 0x814B
+                doze_buf[2] = 0x00;
+                gtp_i2c_write(i2c_connect_client, doze_buf, 3);
+            }
+            else if (0xC0 == (doze_buf[2] & 0xC0))
+            {
+                GTP_INFO("double click to light up the screen!");
+                doze_status = DOZE_WAKEUP;
+                input_report_key(ts->input_dev, KEY_POWER, 1);
+                input_sync(ts->input_dev);
+                input_report_key(ts->input_dev, KEY_POWER, 0);
+                input_sync(ts->input_dev);
+                // clear 0x814B
+                doze_buf[2] = 0x00;
+                gtp_i2c_write(i2c_connect_client, doze_buf, 3);
+            }
+            else
+            {
+                gtp_enter_doze(ts);
+            }
+        }
+        if (ts->use_irq)
+        {
+            gtp_irq_enable(ts);
+        }
+        return;
+    }
+#endif
+
+    ret = gtp_i2c_read(ts->client, point_data, 12);
+    if (ret < 0)
+    {
+        GTP_ERROR("I2C transfer error. errno:%d\n ", ret);
+        goto exit_work_func;
+    }
+
+    finger = point_data[GTP_ADDR_LENGTH];    
+    if((finger & 0x80) == 0)
+    {
+        goto exit_work_func;
+    }
+
+    touch_num = finger & 0x0f;
+    if (touch_num > GTP_MAX_TOUCH)
+    {
+        goto exit_work_func;
+    }
+
+    if (touch_num > 1)
+    {
+        u8 buf[8 * GTP_MAX_TOUCH] = {(GTP_READ_COOR_ADDR + 10) >> 8, (GTP_READ_COOR_ADDR + 10) & 0xff};
+
+        ret = gtp_i2c_read(ts->client, buf, 2 + 8 * (touch_num - 1)); 
+        memcpy(&point_data[12], &buf[2], 8 * (touch_num - 1));
+    }
+
+#if GTP_HAVE_TOUCH_KEY
+    key_value = point_data[3 + 8 * touch_num];
+    
+    if(key_value || pre_key)
+    {
+        for (i = 0; i < GTP_MAX_KEY_NUM; i++)
+        {
+        #if GTP_DEBUG_ON
+            for (ret = 0; ret < 4; ++ret)
+            {
+                if (key_codes[ret] == touch_key_array[i])
+                {
+                    GTP_DEBUG("Key: %s %s", key_names[ret], (key_value & (0x01 << i)) ? "Down" : "Up");
+                    break;
+                }
+            }
+        #endif
+            input_report_key(ts->input_dev, touch_key_array[i], key_value & (0x01<<i));   
+        }
+        touch_num = 0;
+        pre_touch = 0;
+    }
+#endif
+    pre_key = key_value;
+
+    GTP_DEBUG("pre_touch:%02x, finger:%02x.", pre_touch, finger);
+
+#if GTP_ICS_SLOT_REPORT
+
+#if GTP_WITH_PEN
+    if (pre_pen && (touch_num == 0))
+    {
+        GTP_DEBUG("Pen touch UP(Slot)!");
+        input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
+        input_mt_slot(ts->input_dev, 5);
+        input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
+        pre_pen = 0;
+    }
+#endif
+    if (pre_touch || touch_num)
+    {
+        s32 pos = 0;
+        u16 touch_index = 0;
+
+        coor_data = &point_data[3];
+        
+        if(touch_num)
+        {
+            id = coor_data[pos] & 0x0F;
+        
+        #if GTP_WITH_PEN
+            id = coor_data[pos];
+            if ((id == 128))  
+            {
+                GTP_DEBUG("Pen touch DOWN(Slot)!");
+                input_x  = coor_data[pos + 1] | (coor_data[pos + 2] << 8);
+                input_y  = coor_data[pos + 3] | (coor_data[pos + 4] << 8);
+                input_w  = coor_data[pos + 5] | (coor_data[pos + 6] << 8);
+                
+                input_report_key(ts->input_dev, BTN_TOOL_PEN, 1);
+                input_mt_slot(ts->input_dev, 5);
+                input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 5);
+                input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
+                input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, input_y);
+                input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+                GTP_DEBUG("Pen/Stylus: (%d, %d)[%d]", input_x, input_y, input_w);
+                pre_pen = 1;
+                pre_touch = 0;
+            }    
+        #endif
+        
+            touch_index |= (0x01<<id);
+        }
+        
+        GTP_DEBUG("id = %d,touch_index = 0x%x, pre_touch = 0x%x\n",id, touch_index,pre_touch);
+        for (i = 0; i < GTP_MAX_TOUCH; i++)
+        {
+        #if GTP_WITH_PEN
+            if (pre_pen == 1)
+            {
+                break;
+            }
+        #endif
+        
+            if (touch_index & (0x01<<i))
+            {
+                input_x  = coor_data[pos + 1] | (coor_data[pos + 2] << 8);
+                input_y  = coor_data[pos + 3] | (coor_data[pos + 4] << 8);
+                input_w  = coor_data[pos + 5] | (coor_data[pos + 6] << 8);
+
+                gtp_touch_down(ts, id, input_x, input_y, input_w);
+                pre_touch |= 0x01 << i;
+                
+                pos += 8;
+                id = coor_data[pos] & 0x0F;
+                touch_index |= (0x01<<id);
+            }
+            else
+            {
+                gtp_touch_up(ts, i);
+                pre_touch &= ~(0x01 << i);
+            }
+        }
+    }
+#else
+    input_report_key(ts->input_dev, BTN_TOUCH, (touch_num || key_value));
+    if (touch_num)
+    {
+        for (i = 0; i < touch_num; i++)
+        {
+            coor_data = &point_data[i * 8 + 3];
+
+            id = coor_data[0];      //  & 0x0F;
+            input_x  = coor_data[1] | (coor_data[2] << 8);
+            input_y  = coor_data[3] | (coor_data[4] << 8);
+            input_w  = coor_data[5] | (coor_data[6] << 8);
+        
+        #if GTP_WITH_PEN
+            if (id == 128)
+            {
+                GTP_DEBUG("Pen touch DOWN!");
+                input_report_key(ts->input_dev, BTN_TOOL_PEN, 1);
+                pre_pen = 1;
+                id = 0;   
+            }
+        #endif
+        
+            gtp_touch_down(ts, id, input_x, input_y, input_w);
+        }
+    }
+    else if (pre_touch)
+    {
+    
+    #if GTP_WITH_PEN
+        if (pre_pen == 1)
+        {
+            GTP_DEBUG("Pen touch UP!");
+            input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
+            pre_pen = 0;
+        }
+    #endif
+    
+        GTP_DEBUG("Touch Release!");
+        gtp_touch_up(ts, 0);
+    }
+
+    pre_touch = touch_num;
+#endif
+
+    input_sync(ts->input_dev);
+
+exit_work_func:
+    if(!ts->gtp_rawdiff_mode)
+    {
+        ret = gtp_i2c_write(ts->client, end_cmd, 3);
+        if (ret < 0)
+        {
+            GTP_INFO("I2C write end_cmd error!");
+        }
+    }
+    if (ts->use_irq)
+    {
+        gtp_irq_enable(ts);
+    }
+}
+
+/*******************************************************
+Function:
+    Timer interrupt service routine for polling mode.
+Input:
+    timer: timer struct pointer
+Output:
+    Timer work mode. 
+        HRTIMER_NORESTART: no restart mode
+*********************************************************/
+static enum hrtimer_restart goodix_ts_timer_handler(struct hrtimer *timer)
+{
+    struct goodix_ts_data *ts = container_of(timer, struct goodix_ts_data, timer);
+
+    GTP_DEBUG_FUNC();
+
+    queue_work(goodix_wq, &ts->work);
+    hrtimer_start(&ts->timer, ktime_set(0, (GTP_POLL_TIME+6)*1000000), HRTIMER_MODE_REL);
+    return HRTIMER_NORESTART;
+}
+
+/*******************************************************
+Function:
+    External interrupt service routine for interrupt mode.
+Input:
+    irq:  interrupt number.
+    dev_id: private data pointer
+Output:
+    Handle Result.
+        IRQ_HANDLED: interrupt handled successfully
+*********************************************************/
+static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
+{
+    struct goodix_ts_data *ts = dev_id;
+
+    GTP_DEBUG_FUNC();
+ 
+    gtp_irq_disable(ts);
+
+    queue_work(goodix_wq, &ts->work);
+    
+    return IRQ_HANDLED;
+}
+/*******************************************************
+Function:
+    Synchronization.
+Input:
+    ms: synchronization time in millisecond.
+Output:
+    None.
+*******************************************************/
+void gtp_int_sync(s32 ms)
+{
+    GTP_GPIO_OUTPUT(GTP_INT_PORT, 0);
+    msleep(ms);
+    GTP_GPIO_AS_INT(GTP_INT_PORT);
+}
+
+/*******************************************************
+Function:
+    Reset chip.
+Input:
+    ms: reset time in millisecond
+Output:
+    None.
+*******************************************************/
+void gtp_reset_guitar(struct i2c_client *client, s32 ms)
+{
+    GTP_DEBUG_FUNC();
+
+    GTP_GPIO_OUTPUT(GTP_RST_PORT, 0);   // begin select I2C slave addr
+    msleep(ms);                         // T2: > 10ms
+    // HIGH: 0x28/0x29, LOW: 0xBA/0xBB
+    GTP_GPIO_OUTPUT(GTP_INT_PORT, client->addr == 0x14);
+
+    msleep(2);                          // T3: > 100us
+    GTP_GPIO_OUTPUT(GTP_RST_PORT, 1);
+    
+    msleep(6);                          // T4: > 5ms
+
+    GTP_GPIO_AS_INPUT(GTP_RST_PORT);    // end select I2C slave addr
+
+    gtp_int_sync(50);                  
+    
+#if GTP_ESD_PROTECT
+    gtp_init_ext_watchdog(client);
+#endif
+}
+
+#if GTP_SLIDE_WAKEUP
+/*******************************************************
+Function:
+    Enter doze mode for sliding wakeup.
+Input:
+    ts: goodix tp private data
+Output:
+    1: succeed, otherwise failed
+*******************************************************/
+static s8 gtp_enter_doze(struct goodix_ts_data *ts)
+{
+    s8 ret = -1;
+    s8 retry = 0;
+    u8 i2c_control_buf[3] = {(u8)(GTP_REG_SLEEP >> 8), (u8)GTP_REG_SLEEP, 8};
+
+    GTP_DEBUG_FUNC();
+
+#if GTP_DBL_CLK_WAKEUP
+    i2c_control_buf[2] = 0x09;
+#endif
+
+    gtp_irq_disable(ts);
+    
+    GTP_DEBUG("entering doze mode...");
+    while(retry++ < 5)
+    {
+        i2c_control_buf[0] = 0x80;
+        i2c_control_buf[1] = 0x46;
+        ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+        if (ret < 0)
+        {
+            GTP_DEBUG("failed to set doze flag into 0x8046, %d", retry);
+            continue;
+        }
+        i2c_control_buf[0] = 0x80;
+        i2c_control_buf[1] = 0x40;
+        ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+        if (ret > 0)
+        {
+            doze_status = DOZE_ENABLED;
+            GTP_INFO("GTP has been working in doze mode!");
+            gtp_irq_enable(ts);
+            return ret;
+        }
+        msleep(10);
+    }
+    GTP_ERROR("GTP send doze cmd failed.");
+    gtp_irq_enable(ts);
+    return ret;
+}
+#else 
+/*******************************************************
+Function:
+    Enter sleep mode.
+Input:
+    ts: private data.
+Output:
+    Executive outcomes.
+       1: succeed, otherwise failed.
+*******************************************************/
+static s8 gtp_enter_sleep(struct goodix_ts_data * ts)
+{
+    s8 ret = -1;
+    s8 retry = 0;
+    u8 i2c_control_buf[3] = {(u8)(GTP_REG_SLEEP >> 8), (u8)GTP_REG_SLEEP, 5};
+
+    GTP_DEBUG_FUNC();
+    
+    GTP_GPIO_OUTPUT(GTP_INT_PORT, 0);
+    msleep(5);
+    
+    while(retry++ < 5)
+    {
+        ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+        if (ret > 0)
+        {
+            GTP_INFO("GTP enter sleep!");
+            
+            return ret;
+        }
+        msleep(10);
+    }
+    GTP_ERROR("GTP send sleep cmd failed.");
+    return ret;
+}
+#endif 
+/*******************************************************
+Function:
+    Wakeup from sleep.
+Input:
+    ts: private data.
+Output:
+    Executive outcomes.
+        >0: succeed, otherwise: failed.
+*******************************************************/
+static s8 gtp_wakeup_sleep(struct goodix_ts_data * ts)
+{
+    u8 retry = 0;
+    s8 ret = -1;
+    
+    GTP_DEBUG_FUNC();
+    
+#if GTP_POWER_CTRL_SLEEP
+    while(retry++ < 5)
+    {
+        gtp_reset_guitar(ts->client, 20);
+        
+        ret = gtp_send_cfg(ts->client);
+        if (ret < 0)
+        {
+            GTP_INFO("Wakeup sleep send config failed!");
+            continue;
+        }
+        GTP_INFO("GTP wakeup sleep");
+        return 1;
+    }
+#else
+    while(retry++ < 10)
+    {
+    #if GTP_SLIDE_WAKEUP
+        if (DOZE_WAKEUP != doze_status)       // wakeup not by slide 
+        {
+            gtp_reset_guitar(ts->client, 10);
+        }
+        else              // wakeup by slide 
+        {
+            doze_status = DOZE_DISABLED;
+        }
+    #else
+        if (chip_gt9xxs == 1)
+        {
+           gtp_reset_guitar(ts->client, 10);
+        }
+        else
+        {
+            GTP_GPIO_OUTPUT(GTP_INT_PORT, 1);
+            msleep(5);
+        }
+    #endif
+        ret = gtp_i2c_test(ts->client);
+        if (ret > 0)
+        {
+            GTP_INFO("GTP wakeup sleep.");
+            
+        #if (!GTP_SLIDE_WAKEUP)
+            if (chip_gt9xxs == 0)
+            {
+                gtp_int_sync(25);
+                msleep(20);
+            #if GTP_ESD_PROTECT
+                gtp_init_ext_watchdog(ts->client);
+            #endif
+            }
+        #endif
+            return ret;
+        }
+        gtp_reset_guitar(ts->client, 20);
+    }
+#endif
+
+    GTP_ERROR("GTP wakeup sleep failed.");
+    return ret;
+}
+
+/*******************************************************
+Function:
+    Initialize gtp.
+Input:
+    ts: goodix private data
+Output:
+    Executive outcomes.
+        0: succeed, otherwise: failed
+*******************************************************/
+static s32 gtp_init_panel(struct goodix_ts_data *ts)
+{
+    s32 ret = -1;
+
+#if GTP_DRIVER_SEND_CFG
+    s32 i;
+    u8 check_sum = 0;
+    u8 opr_buf[16];
+    u8 sensor_id = 0;
+
+    u8 cfg_info_group1[] = CTP_CFG_GROUP1;
+    u8 cfg_info_group2[] = CTP_CFG_GROUP2;
+    u8 cfg_info_group3[] = CTP_CFG_GROUP3;
+    u8 cfg_info_group4[] = CTP_CFG_GROUP4;
+    u8 cfg_info_group5[] = CTP_CFG_GROUP5;
+    u8 cfg_info_group6[] = CTP_CFG_GROUP6;
+    u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2, cfg_info_group3,
+                        cfg_info_group4, cfg_info_group5, cfg_info_group6};
+    u8 cfg_info_len[] = { CFG_GROUP_LEN(cfg_info_group1),
+                          CFG_GROUP_LEN(cfg_info_group2),
+                          CFG_GROUP_LEN(cfg_info_group3),
+                          CFG_GROUP_LEN(cfg_info_group4),
+                          CFG_GROUP_LEN(cfg_info_group5),
+                          CFG_GROUP_LEN(cfg_info_group6)};
+
+    GTP_DEBUG("Config Groups\' Lengths: %d, %d, %d, %d, %d, %d", 
+        cfg_info_len[0], cfg_info_len[1], cfg_info_len[2], cfg_info_len[3],
+        cfg_info_len[4], cfg_info_len[5]);
+
+    ret = gtp_i2c_read_dbl_check(ts->client, 0x41E4, opr_buf, 1);
+    if (SUCCESS == ret) 
+    {
+        if (opr_buf[0] != 0xBE)
+        {
+            ts->fw_error = 1;
+            GTP_ERROR("Firmware error, no config sent!");
+            return -1;
+        }
+    }
+
+    if ((!cfg_info_len[1]) && (!cfg_info_len[2]) && 
+        (!cfg_info_len[3]) && (!cfg_info_len[4]) && 
+        (!cfg_info_len[5]))
+    {
+        sensor_id = 0; 
+    }
+    else
+    {
+        ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID, &sensor_id, 1);
+        if (SUCCESS == ret)
+        {
+            if (sensor_id >= 0x06)
+            {
+                GTP_ERROR("Invalid sensor_id(0x%02X), No Config Sent!", sensor_id);
+                return -1;
+            }
+        }
+        else
+        {
+            GTP_ERROR("Failed to get sensor_id, No config sent!");
+            return -1;
+        }
+    }
+    GTP_DEBUG("Sensor_ID: %d", sensor_id);
+    
+    ts->gtp_cfg_len = cfg_info_len[sensor_id];
+    
+    if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH)
+    {
+        GTP_ERROR("Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!", sensor_id);
+        return -1;
+    }
+    
+    ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA, &opr_buf[0], 1);
+    
+    if (ret == SUCCESS)
+    {
+        GTP_DEBUG("CFG_GROUP%d Config Version: %d, 0x%02X; IC Config Version: %d, 0x%02X", sensor_id+1, 
+                    send_cfg_buf[sensor_id][0], send_cfg_buf[sensor_id][0], opr_buf[0], opr_buf[0]);
+        
+        if (opr_buf[0] < 90)    
+        {
+            grp_cfg_version = send_cfg_buf[sensor_id][0];       // backup group config version
+            send_cfg_buf[sensor_id][0] = 0x00;
+            ts->fixed_cfg = 0;
+        }
+        else        // treated as fixed config, not send config
+        {
+            GTP_INFO("Ic fixed config with config version(%d, 0x%02X)", opr_buf[0], opr_buf[0]);
+            ts->fixed_cfg = 1;
+        }
+    }
+    else
+    {
+        GTP_ERROR("Failed to get ic config version!No config sent!");
+        return -1;
+    }
+    
+    memset(&config[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
+    memcpy(&config[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id], ts->gtp_cfg_len);
+
+#if GTP_CUSTOM_CFG
+    config[RESOLUTION_LOC]     = (u8)GTP_MAX_WIDTH;
+    config[RESOLUTION_LOC + 1] = (u8)(GTP_MAX_WIDTH>>8);
+    config[RESOLUTION_LOC + 2] = (u8)GTP_MAX_HEIGHT;
+    config[RESOLUTION_LOC + 3] = (u8)(GTP_MAX_HEIGHT>>8);
+    
+    if (GTP_INT_TRIGGER == 0)  //RISING
+    {
+        config[TRIGGER_LOC] &= 0xfe; 
+    }
+    else if (GTP_INT_TRIGGER == 1)  //FALLING
+    {
+        config[TRIGGER_LOC] |= 0x01;
+    }
+#endif  // GTP_CUSTOM_CFG
+    
+    check_sum = 0;
+    for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
+    {
+        check_sum += config[i];
+    }
+    config[ts->gtp_cfg_len] = (~check_sum) + 1;
+    
+#else // DRIVER NOT SEND CONFIG
+    ts->gtp_cfg_len = GTP_CONFIG_MAX_LENGTH;
+    ret = gtp_i2c_read(ts->client, config, ts->gtp_cfg_len + GTP_ADDR_LENGTH);
+    if (ret < 0)
+    {
+        GTP_ERROR("Read Config Failed, Using Default Resolution & INT Trigger!");
+        ts->abs_x_max = GTP_MAX_WIDTH;
+        ts->abs_y_max = GTP_MAX_HEIGHT;
+        ts->int_trigger_type = GTP_INT_TRIGGER;
+    }
+#endif // GTP_DRIVER_SEND_CFG
+
+    GTP_DEBUG_FUNC();
+    if ((ts->abs_x_max == 0) && (ts->abs_y_max == 0))
+    {
+        ts->abs_x_max = (config[RESOLUTION_LOC + 1] << 8) + config[RESOLUTION_LOC];
+        ts->abs_y_max = (config[RESOLUTION_LOC + 3] << 8) + config[RESOLUTION_LOC + 2];
+        ts->int_trigger_type = (config[TRIGGER_LOC]) & 0x03; 
+    }
+    ret = gtp_send_cfg(ts->client);
+    if (ret < 0)
+    {
+        GTP_ERROR("Send config error.");
+    }
+    GTP_DEBUG("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x",
+        ts->abs_x_max,ts->abs_y_max,ts->int_trigger_type);
+
+    msleep(10);
+    return 0;
+}
+
+/*******************************************************
+Function:
+    Read chip version.
+Input:
+    client:  i2c device
+    version: buffer to keep ic firmware version
+Output:
+    read operation return.
+        2: succeed, otherwise: failed
+*******************************************************/
+s32 gtp_read_version(struct i2c_client *client, u16* version)
+{
+    s32 ret = -1;
+    u8 buf[8] = {GTP_REG_VERSION >> 8, GTP_REG_VERSION & 0xff};
+
+    GTP_DEBUG_FUNC();
+
+    ret = gtp_i2c_read(client, buf, sizeof(buf));
+    if (ret < 0)
+    {
+        GTP_ERROR("GTP read version failed");
+        return ret;
+    }
+
+    if (version)
+    {
+        *version = (buf[7] << 8) | buf[6];
+    }
+    
+    if (buf[5] == 0x00)
+    {
+        GTP_INFO("IC Version: %c%c%c_%02x%02x", buf[2], buf[3], buf[4], buf[7], buf[6]);
+    }
+    else
+    {
+        if (buf[5] == 'S' || buf[5] == 's')
+        {
+            chip_gt9xxs = 1;
+        }
+        GTP_INFO("IC Version: %c%c%c%c_%02x%02x", buf[2], buf[3], buf[4], buf[5], buf[7], buf[6]);
+    }
+    return ret;
+}
+
+/*******************************************************
+Function:
+    I2c test Function.
+Input:
+    client:i2c client.
+Output:
+    Executive outcomes.
+        2: succeed, otherwise failed.
+*******************************************************/
+static s8 gtp_i2c_test(struct i2c_client *client)
+{
+    u8 test[3] = {GTP_REG_CONFIG_DATA >> 8, GTP_REG_CONFIG_DATA & 0xff};
+    u8 retry = 0;
+    s8 ret = -1;
+  
+    GTP_DEBUG_FUNC();
+  
+    while(retry++ < 5)
+    {
+        ret = gtp_i2c_read(client, test, 3);
+        if (ret > 0)
+        {
+            return ret;
+        }
+        GTP_ERROR("GTP i2c test failed time %d.",retry);
+        msleep(10);
+    }
+    return ret;
+}
+
+/*******************************************************
+Function:
+    Request gpio(INT & RST) ports.
+Input:
+    ts: private data.
+Output:
+    Executive outcomes.
+        >= 0: succeed, < 0: failed
+*******************************************************/
+static s8 gtp_request_io_port(struct goodix_ts_data *ts)
+{
+    s32 ret = 0;
+
+    ret = GTP_GPIO_REQUEST(GTP_INT_PORT, "GTP_INT_IRQ");
+    if (ret < 0) 
+    {
+        GTP_ERROR("Failed to request GPIO:%d, ERRNO:%d", (s32)GTP_INT_PORT, ret);
+        ret = -ENODEV;
+    }
+    else
+    {
+        GTP_GPIO_AS_INT(GTP_INT_PORT);  
+        ts->client->irq = GTP_INT_IRQ;
+    }
+
+    ret = GTP_GPIO_REQUEST(GTP_RST_PORT, "GTP_RST_PORT");
+    if (ret < 0) 
+    {
+        GTP_ERROR("Failed to request GPIO:%d, ERRNO:%d",(s32)GTP_RST_PORT,ret);
+        ret = -ENODEV;
+    }
+
+    GTP_GPIO_AS_INPUT(GTP_RST_PORT);
+    gtp_reset_guitar(ts->client, 20);
+
+    
+    if(ret < 0)
+    {
+        GTP_GPIO_FREE(GTP_RST_PORT);
+        GTP_GPIO_FREE(GTP_INT_PORT);
+    }
+
+    return ret;
+}
+
+/*******************************************************
+Function:
+    Request interrupt.
+Input:
+    ts: private data.
+Output:
+    Executive outcomes.
+        0: succeed, -1: failed.
+*******************************************************/
+static s8 gtp_request_irq(struct goodix_ts_data *ts)
+{
+    s32 ret = -1;
+    const u8 irq_table[] = GTP_IRQ_TAB;
+
+    GTP_DEBUG("INT trigger type:%x", ts->int_trigger_type);
+
+    ret  = request_irq(ts->client->irq, 
+                       goodix_ts_irq_handler,
+                       irq_table[ts->int_trigger_type],
+                       ts->client->name,
+                       ts);
+    if (ret)
+    {
+        GTP_ERROR("Request IRQ failed!ERRNO:%d.", ret);
+        GTP_GPIO_AS_INPUT(GTP_INT_PORT);
+        GTP_GPIO_FREE(GTP_INT_PORT);
+
+        hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+        ts->timer.function = goodix_ts_timer_handler;
+        hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+        return -1;
+    }
+    else 
+    {
+        gtp_irq_disable(ts);
+        ts->use_irq = 1;
+        return 0;
+    }
+}
+
+/*******************************************************
+Function:
+    Request input device Function.
+Input:
+    ts:private data.
+Output:
+    Executive outcomes.
+        0: succeed, otherwise: failed.
+*******************************************************/
+static s8 gtp_request_input_dev(struct goodix_ts_data *ts)
+{
+    s8 ret = -1;
+    s8 phys[32];
+#if GTP_HAVE_TOUCH_KEY
+    u8 index = 0;
+#endif
+  
+    GTP_DEBUG_FUNC();
+  
+    ts->input_dev = input_allocate_device();
+    if (ts->input_dev == NULL)
+    {
+        GTP_ERROR("Failed to allocate input device.");
+        return -ENOMEM;
+    }
+
+    ts->input_dev->evbit[0] = BIT_MASK(EV_SYN) | BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) ;
+#if GTP_ICS_SLOT_REPORT
+    __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+    input_mt_init_slots(ts->input_dev, 10);     // in case of "out of memory"
+#else
+    ts->input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+#endif
+
+#if GTP_HAVE_TOUCH_KEY
+    for (index = 0; index < GTP_MAX_KEY_NUM; index++)
+    {
+        input_set_capability(ts->input_dev, EV_KEY, touch_key_array[index]);  
+    }
+#endif
+
+#if GTP_SLIDE_WAKEUP
+    input_set_capability(ts->input_dev, EV_KEY, KEY_POWER);
+#endif 
+
+#if GTP_WITH_PEN
+    // pen support
+    __set_bit(BTN_TOOL_PEN, ts->input_dev->keybit);
+    __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+    __set_bit(INPUT_PROP_POINTER, ts->input_dev->propbit);
+#endif
+
+#if GTP_CHANGE_X2Y
+    GTP_SWAP(ts->abs_x_max, ts->abs_y_max);
+#endif
+
+    input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, 0, ts->abs_x_max, 0, 0);
+    input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, 0, ts->abs_y_max, 0, 0);
+    input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
+    input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+    input_set_abs_params(ts->input_dev, ABS_MT_TRACKING_ID, 0, 255, 0, 0);
+
+    sprintf(phys, "input/ts");
+    ts->input_dev->name = goodix_ts_name;
+    ts->input_dev->phys = phys;
+    ts->input_dev->id.bustype = BUS_I2C;
+    ts->input_dev->id.vendor = 0xDEAD;
+    ts->input_dev->id.product = 0xBEEF;
+    ts->input_dev->id.version = 10427;
+    
+    ret = input_register_device(ts->input_dev);
+    if (ret)
+    {
+        GTP_ERROR("Register %s input device failed", ts->input_dev->name);
+        return -ENODEV;
+    }
+    
+#ifdef CONFIG_HAS_EARLYSUSPEND
+    ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+    ts->early_suspend.suspend = goodix_ts_early_suspend;
+    ts->early_suspend.resume = goodix_ts_late_resume;
+    register_early_suspend(&ts->early_suspend);
+#endif
+
+    return 0;
+}
+
+/*******************************************************
+Function:
+    I2c probe.
+Input:
+    client: i2c device struct.
+    id: device id.
+Output:
+    Executive outcomes. 
+        0: succeed.
+*******************************************************/
+static int goodix_ts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+    s32 ret = -1;
+    struct goodix_ts_data *ts;
+    u16 version_info;
+
+    GTP_DEBUG_FUNC();
+    
+    //do NOT remove these logs
+    GTP_INFO("GTP Driver Version: %s", GTP_DRIVER_VERSION);
+    GTP_INFO("GTP Driver Built@%s, %s", __TIME__, __DATE__);
+    GTP_INFO("GTP I2C Address: 0x%02x", client->addr);
+
+    i2c_connect_client = client;
+    
+    if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) 
+    {
+        GTP_ERROR("I2C check functionality failed.");
+        return -ENODEV;
+    }
+    ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+    if (ts == NULL)
+    {
+        GTP_ERROR("Alloc GFP_KERNEL memory failed.");
+        return -ENOMEM;
+    }
+   
+    memset(ts, 0, sizeof(*ts));
+    INIT_WORK(&ts->work, goodix_ts_work_func);
+    ts->client = client;
+    spin_lock_init(&ts->irq_lock);          // 2.6.39 later
+    // ts->irq_lock = SPIN_LOCK_UNLOCKED;   // 2.6.39 & before
+    i2c_set_clientdata(client, ts);
+    
+    ts->gtp_rawdiff_mode = 0;
+
+    ret = gtp_request_io_port(ts);
+    if (ret < 0)
+    {
+        GTP_ERROR("GTP request IO port failed.");
+        kfree(ts);
+        return ret;
+    }
+
+    ret = gtp_i2c_test(client);
+    if (ret < 0)
+    {
+        GTP_ERROR("I2C communication ERROR!");
+    }
+
+#if GTP_AUTO_UPDATE
+    ret = gup_init_update_proc(ts);
+    if (ret < 0)
+    {
+        GTP_ERROR("Create update thread error.");
+    }
+#endif
+    
+    ret = gtp_init_panel(ts);
+    if (ret < 0)
+    {
+        GTP_ERROR("GTP init panel failed.");
+        ts->abs_x_max = GTP_MAX_WIDTH;
+        ts->abs_y_max = GTP_MAX_HEIGHT;
+        ts->int_trigger_type = GTP_INT_TRIGGER;
+    }
+
+    ret = gtp_request_input_dev(ts);
+    if (ret < 0)
+    {
+        GTP_ERROR("GTP request input dev failed");
+    }
+    
+    ret = gtp_request_irq(ts); 
+    if (ret < 0)
+    {
+        GTP_INFO("GTP works in polling mode.");
+    }
+    else
+    {
+        GTP_INFO("GTP works in interrupt mode.");
+    }
+
+    ret = gtp_read_version(client, &version_info);
+    if (ret < 0)
+    {
+        GTP_ERROR("Read version failed.");
+    }
+    if (ts->use_irq)
+    {
+        gtp_irq_enable(ts);
+    }
+    
+#if GTP_CREATE_WR_NODE
+    init_wr_node(client);
+#endif
+    
+#if GTP_ESD_PROTECT
+    gtp_esd_switch(client, SWITCH_ON);
+#endif
+    return 0;
+}
+
+
+/*******************************************************
+Function:
+    Goodix touchscreen driver release function.
+Input:
+    client: i2c device struct.
+Output:
+    Executive outcomes. 0---succeed.
+*******************************************************/
+static int goodix_ts_remove(struct i2c_client *client)
+{
+    struct goodix_ts_data *ts = i2c_get_clientdata(client);
+    
+    GTP_DEBUG_FUNC();
+    
+#ifdef CONFIG_HAS_EARLYSUSPEND
+    unregister_early_suspend(&ts->early_suspend);
+#endif
+
+#if GTP_CREATE_WR_NODE
+    uninit_wr_node();
+#endif
+
+#if GTP_ESD_PROTECT
+    destroy_workqueue(gtp_esd_check_workqueue);
+#endif
+
+    if (ts) 
+    {
+        if (ts->use_irq)
+        {
+            GTP_GPIO_AS_INPUT(GTP_INT_PORT);
+            GTP_GPIO_FREE(GTP_INT_PORT);
+            free_irq(client->irq, ts);
+        }
+        else
+        {
+            hrtimer_cancel(&ts->timer);
+        }
+    }   
+    
+    GTP_INFO("GTP driver removing...");
+    i2c_set_clientdata(client, NULL);
+    input_unregister_device(ts->input_dev);
+    kfree(ts);
+
+    return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*******************************************************
+Function:
+    Early suspend function.
+Input:
+    h: early_suspend struct.
+Output:
+    None.
+*******************************************************/
+static void goodix_ts_early_suspend(struct early_suspend *h)
+{
+    struct goodix_ts_data *ts;
+    s8 ret = -1;    
+    ts = container_of(h, struct goodix_ts_data, early_suspend);
+    
+    GTP_DEBUG_FUNC();
+
+#if GTP_ESD_PROTECT
+    ts->gtp_is_suspend = 1;
+    gtp_esd_switch(ts->client, SWITCH_OFF);
+#endif
+
+#if GTP_SLIDE_WAKEUP
+    ret = gtp_enter_doze(ts);
+#else
+    if (ts->use_irq)
+    {
+        gtp_irq_disable(ts);
+    }
+    else
+    {
+        hrtimer_cancel(&ts->timer);
+    }
+    ret = gtp_enter_sleep(ts);
+#endif 
+    if (ret < 0)
+    {
+        GTP_ERROR("GTP early suspend failed.");
+    }
+    // to avoid waking up while not sleeping
+    //  delay 48 + 10ms to ensure reliability    
+    msleep(58);   
+}
+
+/*******************************************************
+Function:
+    Late resume function.
+Input:
+    h: early_suspend struct.
+Output:
+    None.
+*******************************************************/
+static void goodix_ts_late_resume(struct early_suspend *h)
+{
+    struct goodix_ts_data *ts;
+    s8 ret = -1;
+    ts = container_of(h, struct goodix_ts_data, early_suspend);
+    
+    GTP_DEBUG_FUNC();
+    
+    ret = gtp_wakeup_sleep(ts);
+
+#if GTP_SLIDE_WAKEUP
+    doze_status = DOZE_DISABLED;
+#endif
+
+    if (ret < 0)
+    {
+        GTP_ERROR("GTP later resume failed.");
+    }
+
+    if (ts->use_irq)
+    {
+        gtp_irq_enable(ts);
+    }
+    else
+    {
+        hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+    }
+
+#if GTP_ESD_PROTECT
+    ts->gtp_is_suspend = 0;
+    gtp_esd_switch(ts->client, SWITCH_ON);
+#endif
+}
+#endif
+
+#if GTP_ESD_PROTECT
+/*******************************************************
+Function:
+    switch on & off esd delayed work
+Input:
+    client:  i2c device
+    on:      SWITCH_ON / SWITCH_OFF
+Output:
+    void
+*********************************************************/
+void gtp_esd_switch(struct i2c_client *client, s32 on)
+{
+    struct goodix_ts_data *ts;
+    
+    ts = i2c_get_clientdata(client);
+    if (SWITCH_ON == on)     // switch on esd 
+    {
+        if (!ts->esd_running)
+        {
+            ts->esd_running = 1;
+            GTP_INFO("Esd started");
+            queue_delayed_work(gtp_esd_check_workqueue, &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
+        }
+    }
+    else    // switch off esd
+    {
+        if (ts->esd_running)
+        {
+            ts->esd_running = 0;
+            GTP_INFO("Esd cancelled");
+            cancel_delayed_work_sync(&gtp_esd_check_work);
+        }
+    }
+}
+
+/*******************************************************
+Function:
+    Initialize external watchdog for esd protect
+Input:
+    client:  i2c device.
+Output:
+    result of i2c write operation. 
+        1: succeed, otherwise: failed
+*********************************************************/
+static s32 gtp_init_ext_watchdog(struct i2c_client *client)
+{
+    u8 opr_buffer[4] = {0x80, 0x40, 0xAA, 0xAA};
+    
+    struct i2c_msg msg;         // in case of recursively reset by calling gtp_i2c_write
+    s32 ret = -1;
+    s32 retries = 0;
+
+    GTP_DEBUG("Init external watchdog...");
+    GTP_DEBUG_FUNC();
+
+    msg.flags = !I2C_M_RD;
+    msg.addr  = client->addr;
+    msg.len   = 4;
+    msg.buf   = opr_buffer;
+
+    while(retries < 5)
+    {
+        ret = i2c_transfer(client->adapter, &msg, 1);
+        if (ret == 1)
+        {
+            return 1;
+        }
+        retries++;
+    }
+    if (retries >= 5)
+    {
+        GTP_ERROR("init external watchdog failed!");
+    }
+    return 0;
+}
+
+/*******************************************************
+Function:
+    Esd protect function.
+    Added external watchdog by meta, 2013/03/07
+Input:
+    work: delayed work
+Output:
+    None.
+*******************************************************/
+static void gtp_esd_check_func(struct work_struct *work)
+{
+    s32 i;
+    s32 ret = -1;
+    struct goodix_ts_data *ts = NULL;
+    u8 test[4] = {0x80, 0x40};
+    
+    GTP_DEBUG_FUNC();
+   
+    ts = i2c_get_clientdata(i2c_connect_client);
+
+    if (ts->gtp_is_suspend)
+    {
+        ts->esd_running = 0;
+        GTP_INFO("Esd terminated!");
+        return;
+    }
+    
+    for (i = 0; i < 3; i++)
+    {
+        ret = gtp_i2c_read(ts->client, test, 4);
+        
+        GTP_DEBUG("0x8040 = 0x%02X, 0x8041 = 0x%02X", test[2], test[3]);
+        if ((ret < 0))
+        {
+            // IIC communication problem
+            continue;
+        }
+        else
+        { 
+            if ((test[2] == 0xAA) || (test[3] != 0xAA))
+            {
+                // IC works abnormally..
+                i = 3;
+                break;  
+            }
+            else 
+            {
+                // IC works normally, Write 0x8040 0xAA, feed the dog
+                test[2] = 0xAA; 
+                gtp_i2c_write(ts->client, test, 3);
+                break;
+            }
+        }
+    }
+    if (i >= 3)
+    {
+        GTP_ERROR("IC Working ABNORMALLY, Resetting Guitar...");
+        gtp_reset_guitar(ts->client, 50);
+    }
+
+    if(!ts->gtp_is_suspend)
+    {
+        queue_delayed_work(gtp_esd_check_workqueue, &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
+    }
+    else
+    {
+        GTP_INFO("Esd terminated!");
+        ts->esd_running = 0;
+    }
+    return;
+}
+#endif
+
+static const struct i2c_device_id goodix_ts_id[] = {
+    { GTP_I2C_NAME, 0 },
+    { }
+};
+
+static struct i2c_driver goodix_ts_driver = {
+    .probe      = goodix_ts_probe,
+    .remove     = goodix_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+    .suspend    = goodix_ts_early_suspend,
+    .resume     = goodix_ts_late_resume,
+#endif
+    .id_table   = goodix_ts_id,
+    .driver = {
+        .name     = GTP_I2C_NAME,
+        .owner    = THIS_MODULE,
+    },
+};
+
+/*******************************************************    
+Function:
+    Driver Install function.
+Input:
+    None.
+Output:
+    Executive Outcomes. 0---succeed.
+********************************************************/
+static int __devinit goodix_ts_init(void)
+{
+    s32 ret;
+
+    GTP_DEBUG_FUNC();   
+    GTP_INFO("GTP driver installing...");
+    goodix_wq = create_singlethread_workqueue("goodix_wq");
+    if (!goodix_wq)
+    {
+        GTP_ERROR("Creat workqueue failed.");
+        return -ENOMEM;
+    }
+#if GTP_ESD_PROTECT
+    INIT_DELAYED_WORK(&gtp_esd_check_work, gtp_esd_check_func);
+    gtp_esd_check_workqueue = create_workqueue("gtp_esd_check");
+#endif
+    ret = i2c_add_driver(&goodix_ts_driver);
+    return ret; 
+}
+
+/*******************************************************    
+Function:
+    Driver uninstall function.
+Input:
+    None.
+Output:
+    Executive Outcomes. 0---succeed.
+********************************************************/
+static void __exit goodix_ts_exit(void)
+{
+    GTP_DEBUG_FUNC();
+    GTP_INFO("GTP driver exited.");
+    i2c_del_driver(&goodix_ts_driver);
+    if (goodix_wq)
+    {
+        destroy_workqueue(goodix_wq);
+    }
+}
+
+late_initcall(goodix_ts_init);
+module_exit(goodix_ts_exit);
+
+MODULE_DESCRIPTION("GTP Series Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
new file mode 100644
index 0000000..e375af5
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -0,0 +1,241 @@
+/* drivers/input/touchscreen/gt9xx.h
+ * 
+ * 2010 - 2013 Goodix Technology.
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be a reference 
+ * to you, when you are integrating the GOODiX's CTP IC into your system, 
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of 
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
+ * General Public License for more details.
+ * 
+ */
+
+#ifndef _GOODIX_GT9XX_H_
+#define _GOODIX_GT9XX_H_
+
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <mach/gpio.h>
+#include <linux/earlysuspend.h>
+
+struct goodix_ts_data {
+    spinlock_t irq_lock;
+    struct i2c_client *client;
+    struct input_dev  *input_dev;
+    struct hrtimer timer;
+    struct work_struct  work;
+    struct early_suspend early_suspend;
+    s32 irq_is_disable;
+    s32 use_irq;
+    u16 abs_x_max;
+    u16 abs_y_max;
+    u8  max_touch_num;
+    u8  int_trigger_type;
+    u8  green_wake_mode;
+    u8  chip_type;
+    u8  enter_update;
+    u8  gtp_is_suspend;
+    u8  gtp_rawdiff_mode;
+    u8  gtp_cfg_len;
+    u8  fixed_cfg;
+    u8  esd_running;
+    u8  fw_error;
+};
+
+extern u16 show_len;
+extern u16 total_len;
+
+//***************************PART1:ON/OFF define*******************************
+#define GTP_CUSTOM_CFG        0
+#define GTP_CHANGE_X2Y        0
+#define GTP_DRIVER_SEND_CFG   1
+#define GTP_HAVE_TOUCH_KEY    0
+#define GTP_POWER_CTRL_SLEEP  0
+#define GTP_ICS_SLOT_REPORT   0
+
+#define GTP_AUTO_UPDATE       1     // auto updated by .bin file as default
+#define GTP_HEADER_FW_UPDATE  0     // auto updated by head_fw_array in gt9xx_firmware.h, function together with GTP_AUTO_UPDATE
+                               
+#define GTP_CREATE_WR_NODE    1
+#define GTP_ESD_PROTECT       0
+#define GTP_WITH_PEN          0
+
+#define GTP_SLIDE_WAKEUP      0
+#define GTP_DBL_CLK_WAKEUP    0     // double-click wakeup, function together with GTP_SLIDE_WAKEUP
+
+#define GTP_DEBUG_ON          1
+#define GTP_DEBUG_ARRAY_ON    0
+#define GTP_DEBUG_FUNC_ON     0
+
+//*************************** PART2:TODO define **********************************
+// STEP_1(REQUIRED): Define Configuration Information Group(s)
+// Sensor_ID Map:
+/* sensor_opt1 sensor_opt2 Sensor_ID
+    GND         GND         0 
+    VDDIO       GND         1 
+    NC          GND         2 
+    GND         NC/300K     3 
+    VDDIO       NC/300K     4 
+    NC          NC/300K     5 
+*/
+// TODO: define your own default or for Sensor_ID == 0 config here. 
+// The predefined one is just a sample config, which is not suitable for your tp in most cases.
+#define CTP_CFG_GROUP1 {\
+    0x41,0x1C,0x02,0xC0,0x03,0x0A,0x05,0x01,0x01,0x0F,\
+    0x23,0x0F,0x5F,0x41,0x03,0x05,0x00,0x00,0x00,0x00,\
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x91,0x00,0x0A,\
+    0x28,0x00,0xB8,0x0B,0x00,0x00,0x00,0x9A,0x03,0x25,\
+    0x00,0x00,0x00,0x00,0x00,0x03,0x64,0x32,0x00,0x00,\
+    0x00,0x32,0x8C,0x94,0x05,0x01,0x05,0x00,0x00,0x96,\
+    0x0C,0x22,0xD8,0x0E,0x23,0x56,0x11,0x25,0xFF,0x13,\
+    0x28,0xA7,0x15,0x2E,0x00,0x00,0x10,0x30,0x48,0x00,\
+    0x56,0x4A,0x3A,0xFF,0xFF,0x16,0x00,0x00,0x00,0x00,\
+    0x00,0x01,0x1B,0x14,0x0D,0x19,0x00,0x00,0x01,0x00,\
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+    0x00,0x00,0x1A,0x18,0x16,0x14,0x12,0x10,0x0E,0x0C,\
+    0x0A,0x08,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
+    0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
+    0xFF,0xFF,0x1D,0x1E,0x1F,0x20,0x22,0x24,0x28,0x29,\
+    0x0C,0x0A,0x08,0x00,0x02,0x04,0x05,0x06,0x0E,0xFF,\
+    0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
+    0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
+    0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,\
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+    0x00,0x00,0x00,0x00,0x00,0x00,0x91,0x01\
+    }
+    
+// TODO: define your config for Sensor_ID == 1 here, if needed
+#define CTP_CFG_GROUP2 {\
+    }
+// TODO: define your config for Sensor_ID == 2 here, if needed
+#define CTP_CFG_GROUP3 {\
+    }
+
+// TODO: define your config for Sensor_ID == 3 here, if needed
+#define CTP_CFG_GROUP4 {\
+    }
+
+// TODO: define your config for Sensor_ID == 4 here, if needed
+#define CTP_CFG_GROUP5 {\
+    }
+
+// TODO: define your config for Sensor_ID == 5 here, if needed
+#define CTP_CFG_GROUP6 {\
+    }
+
+// STEP_2(REQUIRED): Customize your I/O ports & I/O operations
+#define GTP_RST_PORT    S5PV210_GPJ3(6)
+#define GTP_INT_PORT    S5PV210_GPH1(3)
+#define GTP_INT_IRQ     gpio_to_irq(GTP_INT_PORT)
+#define GTP_INT_CFG     S3C_GPIO_SFN(0xF)
+
+#define GTP_GPIO_AS_INPUT(pin)          do{\
+                                            gpio_direction_input(pin);\
+                                            s3c_gpio_setpull(pin, S3C_GPIO_PULL_NONE);\
+                                        }while(0)
+#define GTP_GPIO_AS_INT(pin)            do{\
+                                            GTP_GPIO_AS_INPUT(pin);\
+                                            s3c_gpio_cfgpin(pin, GTP_INT_CFG);\
+                                        }while(0)
+#define GTP_GPIO_GET_VALUE(pin)         gpio_get_value(pin)
+#define GTP_GPIO_OUTPUT(pin,level)      gpio_direction_output(pin,level)
+#define GTP_GPIO_REQUEST(pin, label)    gpio_request(pin, label)
+#define GTP_GPIO_FREE(pin)              gpio_free(pin)
+#define GTP_IRQ_TAB                     {IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_LOW, IRQ_TYPE_LEVEL_HIGH}
+
+// STEP_3(optional): Specify your special config info if needed
+#if GTP_CUSTOM_CFG
+  #define GTP_MAX_HEIGHT   800
+  #define GTP_MAX_WIDTH    480
+  #define GTP_INT_TRIGGER  0            // 0: Rising 1: Falling
+#else
+  #define GTP_MAX_HEIGHT   4096
+  #define GTP_MAX_WIDTH    4096
+  #define GTP_INT_TRIGGER  1
+#endif
+#define GTP_MAX_TOUCH         5
+#define GTP_ESD_CHECK_CIRCLE  2000      // jiffy: ms
+
+// STEP_4(optional): If keys are available and reported as keys, config your key info here                             
+#if GTP_HAVE_TOUCH_KEY
+    #define GTP_KEY_TAB  {KEY_MENU, KEY_HOME, KEY_BACK}
+#endif
+
+//***************************PART3:OTHER define*********************************
+#define GTP_DRIVER_VERSION    "V1.8<2013/06/08>"
+#define GTP_I2C_NAME          "Goodix-TS"
+#define GTP_POLL_TIME         10     // jiffy: ms
+#define GTP_ADDR_LENGTH       2
+#define GTP_CONFIG_MIN_LENGTH 186
+#define GTP_CONFIG_MAX_LENGTH 240
+#define FAIL                  0
+#define SUCCESS               1
+#define SWITCH_OFF            0
+#define SWITCH_ON             1
+
+// Registers define
+#define GTP_READ_COOR_ADDR    0x814E
+#define GTP_REG_SLEEP         0x8040
+#define GTP_REG_SENSOR_ID     0x814A
+#define GTP_REG_CONFIG_DATA   0x8047
+#define GTP_REG_VERSION       0x8140
+
+#define RESOLUTION_LOC        3
+#define TRIGGER_LOC           8
+
+#define CFG_GROUP_LEN(p_cfg_grp)  (sizeof(p_cfg_grp) / sizeof(p_cfg_grp[0]))
+// Log define
+#define GTP_INFO(fmt,arg...)           printk("<<-GTP-INFO->> "fmt"\n",##arg)
+#define GTP_ERROR(fmt,arg...)          printk("<<-GTP-ERROR->> "fmt"\n",##arg)
+#define GTP_DEBUG(fmt,arg...)          do{\
+                                         if(GTP_DEBUG_ON)\
+                                         printk("<<-GTP-DEBUG->> [%d]"fmt"\n",__LINE__, ##arg);\
+                                       }while(0)
+#define GTP_DEBUG_ARRAY(array, num)    do{\
+                                         s32 i;\
+                                         u8* a = array;\
+                                         if(GTP_DEBUG_ARRAY_ON)\
+                                         {\
+                                            printk("<<-GTP-DEBUG-ARRAY->>\n");\
+                                            for (i = 0; i < (num); i++)\
+                                            {\
+                                                printk("%02x   ", (a)[i]);\
+                                                if ((i + 1 ) %10 == 0)\
+                                                {\
+                                                    printk("\n");\
+                                                }\
+                                            }\
+                                            printk("\n");\
+                                        }\
+                                       }while(0)
+#define GTP_DEBUG_FUNC()               do{\
+                                         if(GTP_DEBUG_FUNC_ON)\
+                                         printk("<<-GTP-FUNC->> Func:%s@Line:%d\n",__func__,__LINE__);\
+                                       }while(0)
+#define GTP_SWAP(x, y)                 do{\
+                                         typeof(x) z = x;\
+                                         x = y;\
+                                         y = z;\
+                                       }while (0)
+
+//*****************************End of Part III********************************
+
+#endif /* _GOODIX_GT9XX_H_ */
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h b/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h
new file mode 100644
index 0000000..3998bf0
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h
@@ -0,0 +1,6 @@
+// make sense only when GTP_HEADER_FW_UPDATE & GTP_AUTO_UPDATE are enabled

+// define your own firmware array here

+const unsigned char header_fw_array[] = 

+{

+    

+};
\ No newline at end of file
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_update.c b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
new file mode 100644
index 0000000..f564a6b
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
@@ -0,0 +1,1930 @@
+/* drivers/input/touchscreen/gt9xx_update.c
+ * 
+ * 2010 - 2012 Goodix Technology.
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be a reference 
+ * to you, when you are integrating the GOODiX's CTP IC into your system, 
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of 
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
+ * General Public License for more details.
+ * 
+ * Latest Version:1.6
+ * Author: andrew@goodix.com
+ * Revision Record: 
+ *      V1.0:
+ *          first release. By Andrew, 2012/08/31
+ *      V1.2:
+ *          add force update,GT9110P pid map. By Andrew, 2012/10/15
+ *      V1.4:
+ *          1. add config auto update function;
+ *          2. modify enter_update_mode;
+ *          3. add update file cal checksum.
+ *                          By Andrew, 2012/12/12
+ *      V1.6: 
+ *          1. replace guitar_client with i2c_connect_client;
+ *          2. support firmware header array update.
+ *                          By Meta, 2013/03/11
+ */
+#include <linux/kthread.h>
+#include "gt9xx.h"
+
+#if GTP_HEADER_FW_UPDATE
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include "gt9xx_firmware.h"
+#endif
+
+#define GUP_REG_HW_INFO             0x4220
+#define GUP_REG_FW_MSG              0x41E4
+#define GUP_REG_PID_VID             0x8140
+
+#define GUP_SEARCH_FILE_TIMES       50
+#define UPDATE_FILE_PATH_2          "/data/_goodix_update_.bin"
+#define UPDATE_FILE_PATH_1          "/sdcard/_goodix_update_.bin"
+
+#define CONFIG_FILE_PATH_1          "/data/_goodix_config_.cfg"     
+#define CONFIG_FILE_PATH_2          "/sdcard/_goodix_config_.cfg"   
+
+#define FW_HEAD_LENGTH               14
+#define FW_SECTION_LENGTH            0x2000
+#define FW_DSP_ISP_LENGTH            0x1000
+#define FW_DSP_LENGTH                0x1000
+#define FW_BOOT_LENGTH               0x800
+
+#define PACK_SIZE                    256
+#define MAX_FRAME_CHECK_TIME         5
+
+#define _bRW_MISCTL__SRAM_BANK       0x4048
+#define _bRW_MISCTL__MEM_CD_EN       0x4049
+#define _bRW_MISCTL__CACHE_EN        0x404B
+#define _bRW_MISCTL__TMR0_EN         0x40B0
+#define _rRW_MISCTL__SWRST_B0_       0x4180
+#define _bWO_MISCTL__CPU_SWRST_PULSE 0x4184
+#define _rRW_MISCTL__BOOTCTL_B0_     0x4190
+#define _rRW_MISCTL__BOOT_OPT_B0_    0x4218
+#define _rRW_MISCTL__BOOT_CTL_       0x5094
+
+#define FAIL    0
+#define SUCCESS 1
+
+#pragma pack(1)
+typedef struct 
+{
+    u8  hw_info[4];          //hardware info//
+    u8  pid[8];              //product id   //
+    u16 vid;                 //version id   //
+}st_fw_head;
+#pragma pack()
+
+typedef struct
+{
+    u8 force_update;
+    u8 fw_flag;
+    struct file *file; 
+    struct file *cfg_file;
+    st_fw_head  ic_fw_msg;
+    mm_segment_t old_fs;
+}st_update_msg;
+
+st_update_msg update_msg;
+u16 show_len;
+u16 total_len;
+u8 got_file_flag = 0;
+u8 searching_file = 0;
+extern u8 config[GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH];
+extern void gtp_reset_guitar(struct i2c_client *client, s32 ms);
+extern s32  gtp_send_cfg(struct i2c_client *client);
+extern struct i2c_client * i2c_connect_client;
+extern void gtp_irq_enable(struct goodix_ts_data *ts);
+extern void gtp_irq_disable(struct goodix_ts_data *ts);
+extern s32 gtp_i2c_read_dbl_check(struct i2c_client *, u16, u8 *, int);
+#if GTP_ESD_PROTECT
+extern void gtp_esd_switch(struct i2c_client *, s32);
+#endif
+/*******************************************************
+Function:
+    Read data from the i2c slave device.
+Input:
+    client:     i2c device.
+    buf[0~1]:   read start address.
+    buf[2~len-1]:   read data buffer.
+    len:    GTP_ADDR_LENGTH + read bytes count
+Output:
+    numbers of i2c_msgs to transfer: 
+      2: succeed, otherwise: failed
+*********************************************************/
+s32 gup_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
+{
+    struct i2c_msg msgs[2];
+    s32 ret=-1;
+    s32 retries = 0;
+
+    GTP_DEBUG_FUNC();
+
+    msgs[0].flags = !I2C_M_RD;
+    msgs[0].addr  = client->addr;
+    msgs[0].len   = GTP_ADDR_LENGTH;
+    msgs[0].buf   = &buf[0];
+    //msgs[0].scl_rate = 300 * 1000;    // for Rockchip
+
+    msgs[1].flags = I2C_M_RD;
+    msgs[1].addr  = client->addr;
+    msgs[1].len   = len - GTP_ADDR_LENGTH;
+    msgs[1].buf   = &buf[GTP_ADDR_LENGTH];
+    //msgs[1].scl_rate = 300 * 1000;    
+
+    while(retries < 5)
+    {
+        ret = i2c_transfer(client->adapter, msgs, 2);
+        if(ret == 2)break;
+        retries++;
+    }
+
+    return ret;
+}
+
+/*******************************************************
+Function:
+    Write data to the i2c slave device.
+Input:
+    client:     i2c device.
+    buf[0~1]:   write start address.
+    buf[2~len-1]:   data buffer
+    len:    GTP_ADDR_LENGTH + write bytes count
+Output:
+    numbers of i2c_msgs to transfer: 
+        1: succeed, otherwise: failed
+*********************************************************/
+s32 gup_i2c_write(struct i2c_client *client,u8 *buf,s32 len)
+{
+    struct i2c_msg msg;
+    s32 ret=-1;
+    s32 retries = 0;
+
+    GTP_DEBUG_FUNC();
+
+    msg.flags = !I2C_M_RD;
+    msg.addr  = client->addr;
+    msg.len   = len;
+    msg.buf   = buf;
+    //msg.scl_rate = 300 * 1000;    // for Rockchip
+
+    while(retries < 5)
+    {
+        ret = i2c_transfer(client->adapter, &msg, 1);
+        if (ret == 1)break;
+        retries++;
+    }
+
+    return ret;
+}
+
+static s32 gup_init_panel(struct goodix_ts_data *ts)
+{
+    s32 ret = 0;
+    s32 i = 0;
+    u8 check_sum = 0;
+    u8 opr_buf[16];
+    u8 sensor_id = 0;
+
+    u8 cfg_info_group1[] = CTP_CFG_GROUP1;
+    u8 cfg_info_group2[] = CTP_CFG_GROUP2;
+    u8 cfg_info_group3[] = CTP_CFG_GROUP3;
+    u8 cfg_info_group4[] = CTP_CFG_GROUP4;
+    u8 cfg_info_group5[] = CTP_CFG_GROUP5;
+    u8 cfg_info_group6[] = CTP_CFG_GROUP6;
+    u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2, cfg_info_group3,
+                        cfg_info_group4, cfg_info_group5, cfg_info_group6};
+    u8 cfg_info_len[] = { CFG_GROUP_LEN(cfg_info_group1),
+                          CFG_GROUP_LEN(cfg_info_group2),
+                          CFG_GROUP_LEN(cfg_info_group3),
+                          CFG_GROUP_LEN(cfg_info_group4),
+                          CFG_GROUP_LEN(cfg_info_group5),
+                          CFG_GROUP_LEN(cfg_info_group6)};
+                          
+    if ((!cfg_info_len[1]) && (!cfg_info_len[2]) && 
+        (!cfg_info_len[3]) && (!cfg_info_len[4]) && 
+        (!cfg_info_len[5]))
+    {
+        sensor_id = 0; 
+    }
+    else
+    {
+        ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID, &sensor_id, 1);
+        if (SUCCESS == ret)
+        {
+            if (sensor_id >= 0x06)
+            {
+                GTP_ERROR("Invalid sensor_id(0x%02X), No Config Sent!", sensor_id);
+                return -1;
+            }
+        }
+        else
+        {
+            GTP_ERROR("Failed to get sensor_id, No config sent!");
+            return -1;
+        }
+    }
+    
+    GTP_DEBUG("Sensor_ID: %d", sensor_id);
+    
+    ts->gtp_cfg_len = cfg_info_len[sensor_id];
+    
+    if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH)
+    {
+        GTP_ERROR("Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!", sensor_id);
+        return -1;
+    }
+    
+    ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA, &opr_buf[0], 1);
+    
+    if (ret == SUCCESS)
+    {
+        GTP_DEBUG("CFG_GROUP%d Config Version: %d, IC Config Version: %d", sensor_id+1, 
+                    send_cfg_buf[sensor_id][0], opr_buf[0]);
+        
+        send_cfg_buf[sensor_id][0] = opr_buf[0];
+        ts->fixed_cfg = 0;
+        /*
+        if (opr_buf[0] < 90)    
+        {
+            grp_cfg_version = send_cfg_buf[sensor_id][0];       // backup group config version
+            send_cfg_buf[sensor_id][0] = 0x00;
+            ts->fixed_cfg = 0;
+        }
+        else        // treated as fixed config, not send config
+        {
+            GTP_INFO("Ic fixed config with config version(%d)", opr_buf[0]);
+            ts->fixed_cfg = 1;
+        }*/
+    }
+    else
+    {
+        GTP_ERROR("Failed to get ic config version!No config sent!");
+        return -1;
+    }
+    
+    memset(&config[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
+    memcpy(&config[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id], ts->gtp_cfg_len);
+
+    GTP_DEBUG("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x",
+        ts->abs_x_max, ts->abs_y_max, ts->int_trigger_type);
+
+    config[RESOLUTION_LOC]     = (u8)GTP_MAX_WIDTH;
+    config[RESOLUTION_LOC + 1] = (u8)(GTP_MAX_WIDTH>>8);
+    config[RESOLUTION_LOC + 2] = (u8)GTP_MAX_HEIGHT;
+    config[RESOLUTION_LOC + 3] = (u8)(GTP_MAX_HEIGHT>>8);
+    
+    if (GTP_INT_TRIGGER == 0)  //RISING
+    {
+        config[TRIGGER_LOC] &= 0xfe; 
+    }
+    else if (GTP_INT_TRIGGER == 1)  //FALLING
+    {
+        config[TRIGGER_LOC] |= 0x01;
+    }
+    
+    check_sum = 0;
+    for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
+    {
+        check_sum += config[i];
+    }
+    config[ts->gtp_cfg_len] = (~check_sum) + 1;
+
+    GTP_DEBUG_FUNC();
+    ret = gtp_send_cfg(ts->client);
+    if (ret < 0)
+    {
+        GTP_ERROR("Send config error.");
+    }
+
+    msleep(10);
+    return 0;
+}
+
+
+static u8 gup_get_ic_msg(struct i2c_client *client, u16 addr, u8* msg, s32 len)
+{
+    s32 i = 0;
+
+    msg[0] = (addr >> 8) & 0xff;
+    msg[1] = addr & 0xff;
+
+    for (i = 0; i < 5; i++)
+    {
+        if (gup_i2c_read(client, msg, GTP_ADDR_LENGTH + len) > 0)
+        {
+            break;
+        }
+    }
+
+    if (i >= 5)
+    {
+        GTP_ERROR("Read data from 0x%02x%02x failed!", msg[0], msg[1]);
+        return FAIL;
+    }
+
+    return SUCCESS;
+}
+
+static u8 gup_set_ic_msg(struct i2c_client *client, u16 addr, u8 val)
+{
+    s32 i = 0;
+    u8 msg[3];
+
+    msg[0] = (addr >> 8) & 0xff;
+    msg[1] = addr & 0xff;
+    msg[2] = val;
+
+    for (i = 0; i < 5; i++)
+    {
+        if (gup_i2c_write(client, msg, GTP_ADDR_LENGTH + 1) > 0)
+        {
+            break;
+        }
+    }
+
+    if (i >= 5)
+    {
+        GTP_ERROR("Set data to 0x%02x%02x failed!", msg[0], msg[1]);
+        return FAIL;
+    }
+
+    return SUCCESS;
+}
+
+static u8 gup_get_ic_fw_msg(struct i2c_client *client)
+{
+    s32 ret = -1;
+    u8  retry = 0;
+    u8  buf[16];
+    u8  i;
+    
+    // step1:get hardware info
+    ret = gtp_i2c_read_dbl_check(client, GUP_REG_HW_INFO, &buf[GTP_ADDR_LENGTH], 4);
+    if (FAIL == ret)
+    {
+        GTP_ERROR("[get_ic_fw_msg]get hw_info failed,exit");
+        return FAIL;
+    }
+     
+    // buf[2~5]: 00 06 90 00
+    // hw_info: 00 90 06 00
+    for(i=0; i<4; i++)
+    {
+        update_msg.ic_fw_msg.hw_info[i] = buf[GTP_ADDR_LENGTH + 3 - i];
+    } 
+    GTP_DEBUG("IC Hardware info:%02x%02x%02x%02x", update_msg.ic_fw_msg.hw_info[0], update_msg.ic_fw_msg.hw_info[1],
+                                                   update_msg.ic_fw_msg.hw_info[2], update_msg.ic_fw_msg.hw_info[3]);
+    // step2:get firmware message
+    for(retry=0; retry<2; retry++)
+    {
+        ret = gup_get_ic_msg(client, GUP_REG_FW_MSG, buf, 1);
+        if(FAIL == ret)
+        {
+            GTP_ERROR("Read firmware message fail.");
+            return ret;
+        }
+        
+        update_msg.force_update = buf[GTP_ADDR_LENGTH];
+        if((0xBE != update_msg.force_update)&&(!retry))
+        {
+            GTP_INFO("The check sum in ic is error.");
+            GTP_INFO("The IC will be updated by force.");
+            continue;
+        }
+        break;
+    }
+    GTP_DEBUG("IC force update flag:0x%x", update_msg.force_update);
+    
+    // step3:get pid & vid
+    ret = gtp_i2c_read_dbl_check(client, GUP_REG_PID_VID, &buf[GTP_ADDR_LENGTH], 6);
+    if (FAIL == ret)
+    {
+        GTP_ERROR("[get_ic_fw_msg]get pid & vid failed,exit");
+        return FAIL;
+    }
+    
+    memset(update_msg.ic_fw_msg.pid, 0, sizeof(update_msg.ic_fw_msg.pid));
+    memcpy(update_msg.ic_fw_msg.pid, &buf[GTP_ADDR_LENGTH], 4);
+    GTP_DEBUG("IC Product id:%s", update_msg.ic_fw_msg.pid);
+    
+    //GT9XX PID MAPPING
+    /*|-----FLASH-----RAM-----|
+      |------918------918-----|
+      |------968------968-----|
+      |------913------913-----|
+      |------913P-----913P----|
+      |------927------927-----|
+      |------927P-----927P----|
+      |------9110-----9110----|
+      |------9110P----9111----|*/
+    if(update_msg.ic_fw_msg.pid[0] != 0)
+    {
+        if(!memcmp(update_msg.ic_fw_msg.pid, "9111", 4))
+        {
+            GTP_DEBUG("IC Mapping Product id:%s", update_msg.ic_fw_msg.pid);
+            memcpy(update_msg.ic_fw_msg.pid, "9110P", 5);
+        }
+    }
+    
+    update_msg.ic_fw_msg.vid = buf[GTP_ADDR_LENGTH+4] + (buf[GTP_ADDR_LENGTH+5]<<8);
+    GTP_DEBUG("IC version id:%04x", update_msg.ic_fw_msg.vid);
+    
+    return SUCCESS;
+}
+
+s32 gup_enter_update_mode(struct i2c_client *client)
+{
+    s32 ret = -1;
+    s32 retry = 0;
+    u8 rd_buf[3];
+    
+    //step1:RST output low last at least 2ms
+    GTP_GPIO_OUTPUT(GTP_RST_PORT, 0);
+    msleep(2);
+    
+    //step2:select I2C slave addr,INT:0--0xBA;1--0x28.
+    GTP_GPIO_OUTPUT(GTP_INT_PORT, (client->addr == 0x14));
+    msleep(2);
+    
+    //step3:RST output high reset guitar
+    GTP_GPIO_OUTPUT(GTP_RST_PORT, 1);
+    
+    //20121211 modify start
+    msleep(5);
+    while(retry++ < 200)
+    {
+        //step4:Hold ss51 & dsp
+        ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+        if(ret <= 0)
+        {
+            GTP_DEBUG("Hold ss51 & dsp I2C error,retry:%d", retry);
+            continue;
+        }
+        
+        //step5:Confirm hold
+        ret = gup_get_ic_msg(client, _rRW_MISCTL__SWRST_B0_, rd_buf, 1);
+        if(ret <= 0)
+        {
+            GTP_DEBUG("Hold ss51 & dsp I2C error,retry:%d", retry);
+            continue;
+        }
+        if(0x0C == rd_buf[GTP_ADDR_LENGTH])
+        {
+            GTP_DEBUG("Hold ss51 & dsp confirm SUCCESS");
+            break;
+        }
+        GTP_DEBUG("Hold ss51 & dsp confirm 0x4180 failed,value:%d", rd_buf[GTP_ADDR_LENGTH]);
+    }
+    if(retry >= 200)
+    {
+        GTP_ERROR("Enter update Hold ss51 failed.");
+        return FAIL;
+    }
+    
+    //step6:DSP_CK and DSP_ALU_CK PowerOn
+    ret = gup_set_ic_msg(client, 0x4010, 0x00);
+    
+    //20121211 modify end
+    return ret;
+}
+
+void gup_leave_update_mode(void)
+{
+    GTP_GPIO_AS_INT(GTP_INT_PORT);
+    
+    GTP_DEBUG("[leave_update_mode]reset chip.");
+    gtp_reset_guitar(i2c_connect_client, 20);
+}
+
+// Get the correct nvram data
+// The correct conditions: 
+//  1. the hardware info is the same
+//  2. the product id is the same
+//  3. the firmware version in update file is greater than the firmware version in ic 
+//      or the check sum in ic is wrong
+/* Update Conditions: 
+    1. Same hardware info
+    2. Same PID
+    3. File PID > IC PID
+   Force Update Conditions:
+    1. Wrong ic firmware checksum
+    2. INVALID IC PID or VID
+    3. IC PID == 91XX || File PID == 91XX
+*/
+
+static u8 gup_enter_update_judge(st_fw_head *fw_head)
+{
+    u16 u16_tmp;
+    s32 i = 0;
+    
+    u16_tmp = fw_head->vid;
+    fw_head->vid = (u16)(u16_tmp>>8) + (u16)(u16_tmp<<8);
+
+    GTP_DEBUG("FILE HARDWARE INFO:%02x%02x%02x%02x", fw_head->hw_info[0], fw_head->hw_info[1], fw_head->hw_info[2], fw_head->hw_info[3]);
+    GTP_DEBUG("FILE PID:%s", fw_head->pid);
+    GTP_DEBUG("FILE VID:%04x", fw_head->vid);
+
+    GTP_DEBUG("IC HARDWARE INFO:%02x%02x%02x%02x", update_msg.ic_fw_msg.hw_info[0], update_msg.ic_fw_msg.hw_info[1],
+                                                   update_msg.ic_fw_msg.hw_info[2], update_msg.ic_fw_msg.hw_info[3]);
+    GTP_DEBUG("IC PID:%s", update_msg.ic_fw_msg.pid);
+    GTP_DEBUG("IC VID:%04x", update_msg.ic_fw_msg.vid);
+
+    //First two conditions
+    if ( !memcmp(fw_head->hw_info, update_msg.ic_fw_msg.hw_info, sizeof(update_msg.ic_fw_msg.hw_info)))
+    {
+        GTP_DEBUG("Get the same hardware info.");
+        if( update_msg.force_update != 0xBE )
+        {
+            GTP_INFO("FW chksum error,need enter update.");
+            return SUCCESS;
+        }
+        
+        // 20130523 start
+        if (strlen(update_msg.ic_fw_msg.pid) < 3)
+        {
+            GTP_INFO("Illegal IC pid, need enter update");
+            return SUCCESS;
+        }
+        else
+        {
+            for (i = 0; i < 3; i++)
+            {
+                if ((update_msg.ic_fw_msg.pid[i] < 0x30) || (update_msg.ic_fw_msg.pid[i] > 0x39))
+                {
+                    GTP_INFO("Illegal IC pid, out of bound, need enter update");
+                    return SUCCESS;
+                }
+            }
+        }
+        // 20130523 end
+        
+        
+        if (( !memcmp(fw_head->pid, update_msg.ic_fw_msg.pid, (strlen(fw_head->pid)<3?3:strlen(fw_head->pid))))||
+            (!memcmp(update_msg.ic_fw_msg.pid, "91XX", 4))||
+            (!memcmp(fw_head->pid, "91XX", 4)))
+        {
+            if(!memcmp(fw_head->pid, "91XX", 4))
+            {
+                GTP_DEBUG("Force none same pid update mode.");
+            }
+            else
+            {
+                GTP_DEBUG("Get the same pid.");
+            }
+            //The third condition
+            if (fw_head->vid > update_msg.ic_fw_msg.vid)
+            {
+
+                GTP_INFO("Need enter update.");
+                return SUCCESS;
+            }
+            GTP_ERROR("Don't meet the third condition.");
+            GTP_ERROR("File VID <= Ic VID, update aborted!");
+        }
+        else
+        {
+            GTP_ERROR("File PID != Ic PID, update aborted!");
+        }
+    }
+    else
+    {
+        GTP_ERROR("Different Hardware, update aborted!");
+    }
+    return FAIL;
+}
+
+static u8 ascii2hex(u8 a)
+{
+    s8 value = 0;
+
+    if(a >= '0' && a <= '9')
+    {
+        value = a - '0';
+    }
+    else if(a >= 'A' && a <= 'F')
+    {
+        value = a - 'A' + 0x0A;
+    }
+    else if(a >= 'a' && a <= 'f')
+    {
+        value = a - 'a' + 0x0A;
+    }
+    else
+    {
+        value = 0xff;
+    }
+    
+    return value;
+}
+
+static s8 gup_update_config(struct i2c_client *client)
+{
+    s32 file_len = 0;
+    s32 ret = 0;
+    s32 i = 0;
+    s32 file_cfg_len = 0;
+    s32 chip_cfg_len = 0;
+    s32 count = 0;
+    u8 *buf;
+    u8 *pre_buf;
+    u8 *file_config;
+    //u8 checksum = 0;
+    u8 pid[8];
+    
+    if(NULL == update_msg.cfg_file)
+    {
+        GTP_ERROR("[update_cfg]No need to upgrade config!");
+        return FAIL;
+    }
+    file_len = update_msg.cfg_file->f_op->llseek(update_msg.cfg_file, 0, SEEK_END);
+    
+    ret = gup_get_ic_msg(client, GUP_REG_PID_VID, pid, 6);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[update_cfg]Read product id & version id fail.");
+        return FAIL;
+    }
+    pid[5] = '\0';
+    GTP_DEBUG("update cfg get pid:%s", &pid[GTP_ADDR_LENGTH]);
+    
+    chip_cfg_len = 186;
+    if(!memcmp(&pid[GTP_ADDR_LENGTH], "968", 3) || 
+       !memcmp(&pid[GTP_ADDR_LENGTH], "910", 3) ||
+       !memcmp(&pid[GTP_ADDR_LENGTH], "960", 3))
+    {
+        chip_cfg_len = 228;
+    }
+    GTP_DEBUG("[update_cfg]config file len:%d", file_len);
+    GTP_DEBUG("[update_cfg]need config len:%d",chip_cfg_len);
+    if((file_len+5) < chip_cfg_len*5)
+    {
+        GTP_ERROR("Config length error");
+        return -1;
+    }
+    
+    buf = (u8*)kzalloc(file_len, GFP_KERNEL);
+    pre_buf = (u8*)kzalloc(file_len, GFP_KERNEL);
+    file_config = (u8*)kzalloc(chip_cfg_len + GTP_ADDR_LENGTH, GFP_KERNEL);
+    update_msg.cfg_file->f_op->llseek(update_msg.cfg_file, 0, SEEK_SET);
+    
+    GTP_DEBUG("[update_cfg]Read config from file.");
+    ret = update_msg.cfg_file->f_op->read(update_msg.cfg_file, (char*)pre_buf, file_len, &update_msg.cfg_file->f_pos);
+    if(ret<0)
+    {
+        GTP_ERROR("[update_cfg]Read config file failed.");
+        goto update_cfg_file_failed;
+    }
+    
+    GTP_DEBUG("[update_cfg]Delete illgal charactor.");
+    for(i=0,count=0; i<file_len; i++)
+    {
+        if (pre_buf[i] == ' ' || pre_buf[i] == '\r' || pre_buf[i] == '\n')
+        {
+            continue;
+        }
+        buf[count++] = pre_buf[i];
+    }
+    
+    GTP_DEBUG("[update_cfg]Ascii to hex.");
+    file_config[0] = GTP_REG_CONFIG_DATA >> 8;
+    file_config[1] = GTP_REG_CONFIG_DATA & 0xff;
+    for(i=0,file_cfg_len=GTP_ADDR_LENGTH; i<count; i+=5)
+    {
+        if((buf[i]=='0') && ((buf[i+1]=='x') || (buf[i+1]=='X')))
+        {
+            u8 high,low;
+            high = ascii2hex(buf[i+2]);
+            low = ascii2hex(buf[i+3]);
+            
+            if((high == 0xFF) || (low == 0xFF))
+            {
+                ret = 0;
+                GTP_ERROR("[update_cfg]Illegal config file.");
+                goto update_cfg_file_failed;
+            }
+            file_config[file_cfg_len++] = (high<<4) + low;
+        }
+        else
+        {
+            ret = 0;
+            GTP_ERROR("[update_cfg]Illegal config file.");
+            goto update_cfg_file_failed;
+        }
+    }
+    
+//    //cal checksum
+//    for(i=GTP_ADDR_LENGTH; i<chip_cfg_len; i++)
+//    {
+//        checksum += file_config[i];
+//    }
+//    file_config[chip_cfg_len] = (~checksum) + 1;
+//    file_config[chip_cfg_len+1] = 0x01;
+    
+    GTP_DEBUG("config:");
+    GTP_DEBUG_ARRAY(file_config+2, file_cfg_len);
+    
+    i = 0;
+    while(i++ < 5)
+    {
+        ret = gup_i2c_write(client, file_config, file_cfg_len);
+        if(ret > 0)
+        {
+            GTP_INFO("[update_cfg]Send config SUCCESS.");
+            break;
+        }
+        GTP_ERROR("[update_cfg]Send config i2c error.");
+    }
+    
+update_cfg_file_failed:
+    kfree(pre_buf);
+    kfree(buf);
+    kfree(file_config);
+    return ret;
+}
+
+#if GTP_HEADER_FW_UPDATE
+static u8 gup_check_fs_mounted(char *path_name)
+{
+    struct path root_path;
+    struct path path;
+    int err;
+    err = kern_path("/", LOOKUP_FOLLOW, &root_path);
+
+    if (err)
+    {
+        GTP_DEBUG("\"/\" NOT Mounted: %d", err);
+        return FAIL;
+    }
+    err = kern_path(path_name, LOOKUP_FOLLOW, &path);
+
+    if (err)
+    {
+        GTP_DEBUG("/data/ NOT Mounted: %d", err);
+        return FAIL;
+    }
+
+    return SUCCESS;
+    
+    /*
+    if (path.mnt->mnt_sb == root_path.mnt->mnt_sb)
+    {
+        //-- not mounted
+        return FAIL;
+    }
+    else
+    {
+        return SUCCESS;
+    }*/
+
+}
+#endif
+static u8 gup_check_update_file(struct i2c_client *client, st_fw_head* fw_head, u8* path)
+{
+    s32 ret = 0;
+    s32 i = 0;
+    s32 fw_checksum = 0;
+    u8 buf[FW_HEAD_LENGTH];
+    
+    if (path)
+    {
+        GTP_DEBUG("Update File path:%s, %d", path, strlen(path));
+        update_msg.file = filp_open(path, O_RDONLY, 0);
+
+        if (IS_ERR(update_msg.file))
+        {
+            GTP_ERROR("Open update file(%s) error!", path);
+            return FAIL;
+        }
+    }
+    else
+    {
+#if GTP_HEADER_FW_UPDATE
+        for (i = 0; i < (GUP_SEARCH_FILE_TIMES); i++)
+        {
+            GTP_DEBUG("Waiting for /data mounted [%d]", i);
+
+            if (gup_check_fs_mounted("/data") == SUCCESS)
+            {
+                GTP_DEBUG("/data Mounted!");
+                break;
+            }
+            msleep(3000);
+        }
+        if (i >= (GUP_SEARCH_FILE_TIMES))
+        {
+            GTP_ERROR("Wait for /data mounted timeout!");
+            return FAIL;
+        }
+        
+        // update config
+        update_msg.cfg_file = filp_open(CONFIG_FILE_PATH_1, O_RDONLY, 0);
+        if (IS_ERR(update_msg.cfg_file))
+        {
+            GTP_DEBUG("%s is unavailable", CONFIG_FILE_PATH_1);
+        }
+        else
+        {
+            GTP_INFO("Update Config File: %s", CONFIG_FILE_PATH_1);
+            ret = gup_update_config(client);
+            if(ret <= 0)
+            {
+                GTP_ERROR("Update config failed.");
+            }
+            filp_close(update_msg.cfg_file, NULL);
+        }
+        
+        if (sizeof(header_fw_array) < (FW_HEAD_LENGTH+FW_SECTION_LENGTH*4+FW_DSP_ISP_LENGTH+FW_DSP_LENGTH+FW_BOOT_LENGTH))
+        {
+            GTP_ERROR("INVALID header_fw_array, check your gt9xx_firmware.h file!");
+            return FAIL;           
+        }
+        update_msg.file = filp_open(UPDATE_FILE_PATH_2, O_CREAT | O_RDWR, 0666);
+        if ((IS_ERR(update_msg.file)))
+        {
+            GTP_ERROR("Failed to Create file: %s for fw_header!", UPDATE_FILE_PATH_2);
+            return FAIL;
+        }
+        update_msg.file->f_op->llseek(update_msg.file, 0, SEEK_SET);
+        update_msg.file->f_op->write(update_msg.file, (char *)header_fw_array, sizeof(header_fw_array), &update_msg.file->f_pos);
+        filp_close(update_msg.file, NULL);
+        update_msg.file = filp_open(UPDATE_FILE_PATH_2, O_RDONLY, 0);
+#else
+        u8 fp_len = max(sizeof(UPDATE_FILE_PATH_1), sizeof(UPDATE_FILE_PATH_2));
+        u8 cfp_len = max(sizeof(CONFIG_FILE_PATH_1), sizeof(CONFIG_FILE_PATH_2));
+        u8 *search_update_path = (u8*)kzalloc(fp_len, GFP_KERNEL);
+        u8 *search_cfg_path = (u8*)kzalloc(cfp_len, GFP_KERNEL);
+        //Begin to search update file,the config file & firmware file must be in the same path,single or double.
+        searching_file = 1;
+        for (i = 0; i < GUP_SEARCH_FILE_TIMES; i++)
+        {
+            if (searching_file == 0)
+            {
+                kfree(search_update_path);
+                kfree(search_cfg_path);
+                GTP_INFO(".bin/.cfg update file search forcely terminated!");
+                return FAIL;
+            }
+            if(i%2)
+            {
+                memcpy(search_update_path, UPDATE_FILE_PATH_1, sizeof(UPDATE_FILE_PATH_1));
+                memcpy(search_cfg_path, CONFIG_FILE_PATH_1, sizeof(CONFIG_FILE_PATH_1));
+            }
+            else
+            {
+                memcpy(search_update_path, UPDATE_FILE_PATH_2, sizeof(UPDATE_FILE_PATH_2));
+                memcpy(search_cfg_path, CONFIG_FILE_PATH_2, sizeof(CONFIG_FILE_PATH_2));
+            }
+            
+            if(!(got_file_flag&0x0F))
+            {
+                update_msg.file = filp_open(search_update_path, O_RDONLY, 0);
+                if(!IS_ERR(update_msg.file))
+                {
+                    GTP_DEBUG("Find the bin file");
+                    got_file_flag |= 0x0F;
+                }
+            }
+            if(!(got_file_flag&0xF0))
+            {
+                update_msg.cfg_file = filp_open(search_cfg_path, O_RDONLY, 0);
+                if(!IS_ERR(update_msg.cfg_file))
+                {
+                    GTP_DEBUG("Find the cfg file");
+                    got_file_flag |= 0xF0;
+                }
+            }
+            
+            if(got_file_flag)
+            {
+                if(got_file_flag == 0xFF)
+                {
+                    break;
+                }
+                else
+                {
+                    i += 4;
+                }
+            }
+            GTP_DEBUG("%3d:Searching %s %s file...", i, (got_file_flag&0x0F)?"":"bin", (got_file_flag&0xF0)?"":"cfg");
+            msleep(3000);
+        }
+        searching_file = 0;
+        kfree(search_update_path);
+        kfree(search_cfg_path);
+        
+        if(!got_file_flag)
+        {
+            GTP_ERROR("Can't find update file.");
+            goto load_failed;
+        }
+        
+        if(got_file_flag&0xF0)
+        {
+            GTP_DEBUG("Got the update config file.");
+            ret = gup_update_config(client);
+            if(ret <= 0)
+            {
+                GTP_ERROR("Update config failed.");
+            }
+            filp_close(update_msg.cfg_file, NULL);
+            msleep(500);                //waiting config to be stored in FLASH.
+        }
+        if(got_file_flag&0x0F)
+        {
+            GTP_DEBUG("Got the update firmware file.");
+        }
+        else
+        {
+            GTP_ERROR("No need to upgrade firmware.");
+            goto load_failed;
+        }
+#endif
+    }
+    
+    update_msg.old_fs = get_fs();
+    set_fs(KERNEL_DS);
+
+    update_msg.file->f_op->llseek(update_msg.file, 0, SEEK_SET);
+    //update_msg.file->f_pos = 0;
+
+    ret = update_msg.file->f_op->read(update_msg.file, (char*)buf, FW_HEAD_LENGTH, &update_msg.file->f_pos);
+    if (ret < 0)
+    {
+        GTP_ERROR("Read firmware head in update file error.");
+        goto load_failed;
+    }
+    memcpy(fw_head, buf, FW_HEAD_LENGTH);
+    
+    //check firmware legality
+    fw_checksum = 0;
+    for(i=0; i<FW_SECTION_LENGTH*4+FW_DSP_ISP_LENGTH+FW_DSP_LENGTH+FW_BOOT_LENGTH; i+=2)
+    {
+        u16 temp;
+        ret = update_msg.file->f_op->read(update_msg.file, (char*)buf, 2, &update_msg.file->f_pos);
+        if (ret < 0)
+        {
+            GTP_ERROR("Read firmware file error.");
+            goto load_failed;
+        }
+        //GTP_DEBUG("BUF[0]:%x", buf[0]);
+        temp = (buf[0]<<8) + buf[1];
+        fw_checksum += temp;
+    }
+    
+    GTP_DEBUG("firmware checksum:%x", fw_checksum&0xFFFF);
+    if(fw_checksum&0xFFFF)
+    {
+        GTP_ERROR("Illegal firmware file.");
+        goto load_failed;    
+    }
+    
+    return SUCCESS;
+
+load_failed:
+    set_fs(update_msg.old_fs);
+    return FAIL;
+}
+
+#if 0
+static u8 gup_check_update_header(struct i2c_client *client, st_fw_head* fw_head)
+{
+    const u8* pos;
+    int i = 0;
+    u8 mask_num = 0;
+    s32 ret = 0;
+
+    pos = HEADER_UPDATE_DATA;
+      
+    memcpy(fw_head, pos, FW_HEAD_LENGTH);
+    pos += FW_HEAD_LENGTH;
+
+    ret = gup_enter_update_judge(fw_head);
+    if(SUCCESS == ret)
+    {
+        return SUCCESS;
+    }
+    return FAIL;
+}
+#endif
+
+static u8 gup_burn_proc(struct i2c_client *client, u8 *burn_buf, u16 start_addr, u16 total_length)
+{
+    s32 ret = 0;
+    u16 burn_addr = start_addr;
+    u16 frame_length = 0;
+    u16 burn_length = 0;
+    u8  wr_buf[PACK_SIZE + GTP_ADDR_LENGTH];
+    u8  rd_buf[PACK_SIZE + GTP_ADDR_LENGTH];
+    u8  retry = 0;
+    
+    GTP_DEBUG("Begin burn %dk data to addr 0x%x", (total_length/1024), start_addr);
+    while(burn_length < total_length)
+    {
+        GTP_DEBUG("B/T:%04d/%04d", burn_length, total_length);
+        frame_length = ((total_length - burn_length) > PACK_SIZE) ? PACK_SIZE : (total_length - burn_length);
+        wr_buf[0] = (u8)(burn_addr>>8);
+        rd_buf[0] = wr_buf[0];
+        wr_buf[1] = (u8)burn_addr;
+        rd_buf[1] = wr_buf[1];
+        memcpy(&wr_buf[GTP_ADDR_LENGTH], &burn_buf[burn_length], frame_length);
+        
+        for(retry = 0; retry < MAX_FRAME_CHECK_TIME; retry++)
+        {
+            ret = gup_i2c_write(client, wr_buf, GTP_ADDR_LENGTH + frame_length);
+            if(ret <= 0)
+            {
+                GTP_ERROR("Write frame data i2c error.");
+                continue;
+            }
+            ret = gup_i2c_read(client, rd_buf, GTP_ADDR_LENGTH + frame_length);
+            if(ret <= 0)
+            {
+                GTP_ERROR("Read back frame data i2c error.");
+                continue;
+            }
+            
+            if(memcmp(&wr_buf[GTP_ADDR_LENGTH], &rd_buf[GTP_ADDR_LENGTH], frame_length))
+            {
+                GTP_ERROR("Check frame data fail,not equal.");
+                GTP_DEBUG("write array:");
+                GTP_DEBUG_ARRAY(&wr_buf[GTP_ADDR_LENGTH], frame_length);
+                GTP_DEBUG("read array:");
+                GTP_DEBUG_ARRAY(&rd_buf[GTP_ADDR_LENGTH], frame_length);
+                continue;
+            }
+            else
+            {
+                //GTP_DEBUG("Check frame data success.");
+                break;
+            }
+        }
+        if(retry >= MAX_FRAME_CHECK_TIME)
+        {
+            GTP_ERROR("Burn frame data time out,exit.");
+            return FAIL;
+        }
+        burn_length += frame_length;
+        burn_addr += frame_length;
+    }
+    return SUCCESS;
+}
+
+static u8 gup_load_section_file(u8* buf, u16 offset, u16 length)
+{
+    s32 ret = 0;
+    
+    if(update_msg.file == NULL)
+    {
+        GTP_ERROR("cannot find update file,load section file fail.");
+        return FAIL;
+    }
+    update_msg.file->f_pos = FW_HEAD_LENGTH + offset;
+    
+    ret = update_msg.file->f_op->read(update_msg.file, (char*)buf, length, &update_msg.file->f_pos);
+    if(ret < 0)
+    {
+        GTP_ERROR("Read update file fail.");
+        return FAIL;
+    }
+    
+    return SUCCESS;
+}
+
+static u8 gup_recall_check(struct i2c_client *client, u8* chk_src, u16 start_rd_addr, u16 chk_length)
+{
+    u8  rd_buf[PACK_SIZE + GTP_ADDR_LENGTH];
+    s32 ret = 0;
+    u16 recall_addr = start_rd_addr;
+    u16 recall_length = 0;
+    u16 frame_length = 0;
+
+    while(recall_length < chk_length)
+    {
+        frame_length = ((chk_length - recall_length) > PACK_SIZE) ? PACK_SIZE : (chk_length - recall_length);
+        ret = gup_get_ic_msg(client, recall_addr, rd_buf, frame_length);
+        if(ret <= 0)
+        {
+            GTP_ERROR("recall i2c error,exit");
+            return FAIL;
+        }
+        
+        if(memcmp(&rd_buf[GTP_ADDR_LENGTH], &chk_src[recall_length], frame_length))
+        {
+            GTP_ERROR("Recall frame data fail,not equal.");
+            GTP_DEBUG("chk_src array:");
+            GTP_DEBUG_ARRAY(&chk_src[recall_length], frame_length);
+            GTP_DEBUG("recall array:");
+            GTP_DEBUG_ARRAY(&rd_buf[GTP_ADDR_LENGTH], frame_length);
+            return FAIL;
+        }
+        
+        recall_length += frame_length;
+        recall_addr += frame_length;
+    }
+    GTP_DEBUG("Recall check %dk firmware success.", (chk_length/1024));
+    
+    return SUCCESS;
+}
+
+static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section, u16 start_addr, u8 bank_cmd )
+{
+    s32 ret = 0;
+    u8  rd_buf[5];
+  
+    //step1:hold ss51 & dsp
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]hold ss51 & dsp fail.");
+        return FAIL;
+    }
+    
+    //step2:set scramble
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]set scramble fail.");
+        return FAIL;
+    }
+    
+    //step3:select bank
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, (bank_cmd >> 4)&0x0F);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]select bank %d fail.", (bank_cmd >> 4)&0x0F);
+        return FAIL;
+    }
+    
+    //step4:enable accessing code
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]enable accessing code fail.");
+        return FAIL;
+    }
+    
+    //step5:burn 8k fw section
+    ret = gup_burn_proc(client, fw_section, start_addr, FW_SECTION_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_section]burn fw_section fail.");
+        return FAIL;
+    }
+    
+    //step6:hold ss51 & release dsp
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]hold ss51 & release dsp fail.");
+        return FAIL;
+    }
+    //must delay
+    msleep(1);
+    
+    //step7:send burn cmd to move data to flash from sram
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, bank_cmd&0x0f);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]send burn cmd fail.");
+        return FAIL;
+    }
+    GTP_DEBUG("[burn_fw_section]Wait for the burn is complete......");
+    do{
+        ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
+        if(ret <= 0)
+        {
+            GTP_ERROR("[burn_fw_section]Get burn state fail");
+            return FAIL;
+        }
+        msleep(10);
+        //GTP_DEBUG("[burn_fw_section]Get burn state:%d.", rd_buf[GTP_ADDR_LENGTH]);
+    }while(rd_buf[GTP_ADDR_LENGTH]);
+
+    //step8:select bank
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, (bank_cmd >> 4)&0x0F);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]select bank %d fail.", (bank_cmd >> 4)&0x0F);
+        return FAIL;
+    }
+    
+    //step9:enable accessing code
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]enable accessing code fail.");
+        return FAIL;
+    }
+    
+    //step10:recall 8k fw section
+    ret = gup_recall_check(client, fw_section, start_addr, FW_SECTION_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_section]recall check 8k firmware fail.");
+        return FAIL;
+    }
+    
+    //step11:disable accessing code
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_section]disable accessing code fail.");
+        return FAIL;
+    }
+    
+    return SUCCESS;
+}
+
+static u8 gup_burn_dsp_isp(struct i2c_client *client)
+{
+    s32 ret = 0;
+    u8* fw_dsp_isp = NULL;
+    u8  retry = 0;
+    
+    GTP_DEBUG("[burn_dsp_isp]Begin burn dsp isp---->>");
+    
+    //step1:alloc memory
+    GTP_DEBUG("[burn_dsp_isp]step1:alloc memory");
+    while(retry++ < 5)
+    {
+        fw_dsp_isp = (u8*)kzalloc(FW_DSP_ISP_LENGTH, GFP_KERNEL);
+        if(fw_dsp_isp == NULL)
+        {
+            continue;
+        }
+        else
+        {
+            GTP_INFO("[burn_dsp_isp]Alloc %dk byte memory success.", (FW_DSP_ISP_LENGTH/1024));
+            break;
+        }
+    }
+    if(retry >= 5)
+    {
+        GTP_ERROR("[burn_dsp_isp]Alloc memory fail,exit.");
+        return FAIL;
+    }
+    
+    //step2:load dsp isp file data
+    GTP_DEBUG("[burn_dsp_isp]step2:load dsp isp file data");
+    ret = gup_load_section_file(fw_dsp_isp, (4*FW_SECTION_LENGTH+FW_DSP_LENGTH+FW_BOOT_LENGTH), FW_DSP_ISP_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_dsp_isp]load firmware dsp_isp fail.");
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step3:disable wdt,clear cache enable
+    GTP_DEBUG("[burn_dsp_isp]step3:disable wdt,clear cache enable");
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__TMR0_EN, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]disable wdt fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__CACHE_EN, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]clear cache enable fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step4:hold ss51 & dsp
+    GTP_DEBUG("[burn_dsp_isp]step4:hold ss51 & dsp");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]hold ss51 & dsp fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step5:set boot from sram
+    GTP_DEBUG("[burn_dsp_isp]step5:set boot from sram");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOTCTL_B0_, 0x02);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]set boot from sram fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step6:software reboot
+    GTP_DEBUG("[burn_dsp_isp]step6:software reboot");
+    ret = gup_set_ic_msg(client, _bWO_MISCTL__CPU_SWRST_PULSE, 0x01);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]software reboot fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step7:select bank2
+    GTP_DEBUG("[burn_dsp_isp]step7:select bank2");
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x02);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]select bank2 fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step8:enable accessing code
+    GTP_DEBUG("[burn_dsp_isp]step8:enable accessing code");
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]enable accessing code fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step9:burn 4k dsp_isp
+    GTP_DEBUG("[burn_dsp_isp]step9:burn 4k dsp_isp");
+    ret = gup_burn_proc(client, fw_dsp_isp, 0xC000, FW_DSP_ISP_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_dsp_isp]burn dsp_isp fail.");
+        goto exit_burn_dsp_isp;
+    }
+    
+    //step10:set scramble
+    GTP_DEBUG("[burn_dsp_isp]step10:set scramble");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_dsp_isp]set scramble fail.");
+        ret = FAIL;
+        goto exit_burn_dsp_isp;
+    }
+    ret = SUCCESS;
+
+exit_burn_dsp_isp:
+    kfree(fw_dsp_isp);
+    return ret;
+}
+
+static u8 gup_burn_fw_ss51(struct i2c_client *client)
+{
+    u8* fw_ss51 = NULL;
+    u8  retry = 0;
+    s32 ret = 0;
+    
+    GTP_DEBUG("[burn_fw_ss51]Begin burn ss51 firmware---->>");
+    
+    //step1:alloc memory
+    GTP_DEBUG("[burn_fw_ss51]step1:alloc memory");
+    while(retry++ < 5)
+    {
+        fw_ss51 = (u8*)kzalloc(FW_SECTION_LENGTH, GFP_KERNEL);
+        if(fw_ss51 == NULL)
+        {
+            continue;
+        }
+        else
+        {
+            GTP_INFO("[burn_fw_ss51]Alloc %dk byte memory success.", (FW_SECTION_LENGTH/1024));
+            break;
+        }
+    }
+    if(retry >= 5)
+    {
+        GTP_ERROR("[burn_fw_ss51]Alloc memory fail,exit.");
+        return FAIL;
+    }
+    
+    //step2:load ss51 firmware section 1 file data
+    GTP_DEBUG("[burn_fw_ss51]step2:load ss51 firmware section 1 file data");
+    ret = gup_load_section_file(fw_ss51, 0, FW_SECTION_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 1 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step3:clear control flag
+    GTP_DEBUG("[burn_fw_ss51]step3:clear control flag");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_ss51]clear control flag fail.");
+        ret = FAIL;
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step4:burn ss51 firmware section 1
+    GTP_DEBUG("[burn_fw_ss51]step4:burn ss51 firmware section 1");
+    ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x01);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 1 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step5:load ss51 firmware section 2 file data
+    GTP_DEBUG("[burn_fw_ss51]step5:load ss51 firmware section 2 file data");
+    ret = gup_load_section_file(fw_ss51, FW_SECTION_LENGTH, FW_SECTION_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 2 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step6:burn ss51 firmware section 2
+    GTP_DEBUG("[burn_fw_ss51]step6:burn ss51 firmware section 2");
+    ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x02);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 2 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step7:load ss51 firmware section 3 file data
+    GTP_DEBUG("[burn_fw_ss51]step7:load ss51 firmware section 3 file data");
+    ret = gup_load_section_file(fw_ss51, 2*FW_SECTION_LENGTH, FW_SECTION_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 3 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step8:burn ss51 firmware section 3
+    GTP_DEBUG("[burn_fw_ss51]step8:burn ss51 firmware section 3");
+    ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x13);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 3 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step9:load ss51 firmware section 4 file data
+    GTP_DEBUG("[burn_fw_ss51]step9:load ss51 firmware section 4 file data");
+    ret = gup_load_section_file(fw_ss51, 3*FW_SECTION_LENGTH, FW_SECTION_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 4 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    //step10:burn ss51 firmware section 4
+    GTP_DEBUG("[burn_fw_ss51]step10:burn ss51 firmware section 4");
+    ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x14);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 4 fail.");
+        goto exit_burn_fw_ss51;
+    }
+    
+    ret = SUCCESS;
+    
+exit_burn_fw_ss51:
+    kfree(fw_ss51);
+    return ret;
+}
+
+static u8 gup_burn_fw_dsp(struct i2c_client *client)
+{
+    s32 ret = 0;
+    u8* fw_dsp = NULL;
+    u8  retry = 0;
+    u8  rd_buf[5];
+    
+    GTP_DEBUG("[burn_fw_dsp]Begin burn dsp firmware---->>");
+    //step1:alloc memory
+    GTP_DEBUG("[burn_fw_dsp]step1:alloc memory");
+    while(retry++ < 5)
+    {
+        fw_dsp = (u8*)kzalloc(FW_DSP_LENGTH, GFP_KERNEL);
+        if(fw_dsp == NULL)
+        {
+            continue;
+        }
+        else
+        {
+            GTP_INFO("[burn_fw_dsp]Alloc %dk byte memory success.", (FW_SECTION_LENGTH/1024));
+            break;
+        }
+    }
+    if(retry >= 5)
+    {
+        GTP_ERROR("[burn_fw_dsp]Alloc memory fail,exit.");
+        return FAIL;
+    }
+    
+    //step2:load firmware dsp
+    GTP_DEBUG("[burn_fw_dsp]step2:load firmware dsp");
+    ret = gup_load_section_file(fw_dsp, 4*FW_SECTION_LENGTH, FW_DSP_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_dsp]load firmware dsp fail.");
+        goto exit_burn_fw_dsp;
+    }
+    
+    //step3:select bank3
+    GTP_DEBUG("[burn_fw_dsp]step3:select bank3");
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_dsp]select bank3 fail.");
+        ret = FAIL;
+        goto exit_burn_fw_dsp;
+    }
+    
+    //step4:hold ss51 & dsp
+    GTP_DEBUG("[burn_fw_dsp]step4:hold ss51 & dsp");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_dsp]hold ss51 & dsp fail.");
+        ret = FAIL;
+        goto exit_burn_fw_dsp;
+    }
+    
+    //step5:set scramble
+    GTP_DEBUG("[burn_fw_dsp]step5:set scramble");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_dsp]set scramble fail.");
+        ret = FAIL;
+        goto exit_burn_fw_dsp;
+    }
+    
+    //step6:release ss51 & dsp
+    GTP_DEBUG("[burn_fw_dsp]step6:release ss51 & dsp");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);                 //20121211
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_dsp]release ss51 & dsp fail.");
+        ret = FAIL;
+        goto exit_burn_fw_dsp;
+    }
+    //must delay
+    msleep(1);
+    
+    //step7:burn 4k dsp firmware
+    GTP_DEBUG("[burn_fw_dsp]step7:burn 4k dsp firmware");
+    ret = gup_burn_proc(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_dsp]burn fw_section fail.");
+        goto exit_burn_fw_dsp;
+    }
+    
+    //step8:send burn cmd to move data to flash from sram
+    GTP_DEBUG("[burn_fw_dsp]step8:send burn cmd to move data to flash from sram");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x05);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_dsp]send burn cmd fail.");
+        goto exit_burn_fw_dsp;
+    }
+    GTP_DEBUG("[burn_fw_dsp]Wait for the burn is complete......");
+    do{
+        ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
+        if(ret <= 0)
+        {
+            GTP_ERROR("[burn_fw_dsp]Get burn state fail");
+            goto exit_burn_fw_dsp;
+        }
+        msleep(10);
+        //GTP_DEBUG("[burn_fw_dsp]Get burn state:%d.", rd_buf[GTP_ADDR_LENGTH]);
+    }while(rd_buf[GTP_ADDR_LENGTH]);
+    
+    //step9:recall check 4k dsp firmware
+    GTP_DEBUG("[burn_fw_dsp]step9:recall check 4k dsp firmware");
+    ret = gup_recall_check(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_dsp]recall check 4k dsp firmware fail.");
+        goto exit_burn_fw_dsp;
+    }
+    
+    ret = SUCCESS;
+    
+exit_burn_fw_dsp:
+    kfree(fw_dsp);
+    return ret;
+}
+
+static u8 gup_burn_fw_boot(struct i2c_client *client)
+{
+    s32 ret = 0;
+    u8* fw_boot = NULL;
+    u8  retry = 0;
+    u8  rd_buf[5];
+    
+    GTP_DEBUG("[burn_fw_boot]Begin burn bootloader firmware---->>");
+    
+    //step1:Alloc memory
+    GTP_DEBUG("[burn_fw_boot]step1:Alloc memory");
+    while(retry++ < 5)
+    {
+        fw_boot = (u8*)kzalloc(FW_BOOT_LENGTH, GFP_KERNEL);
+        if(fw_boot == NULL)
+        {
+            continue;
+        }
+        else
+        {
+            GTP_INFO("[burn_fw_boot]Alloc %dk byte memory success.", (FW_BOOT_LENGTH/1024));
+            break;
+        }
+    }
+    if(retry >= 5)
+    {
+        GTP_ERROR("[burn_fw_boot]Alloc memory fail,exit.");
+        return FAIL;
+    }
+    
+    //step2:load firmware bootloader
+    GTP_DEBUG("[burn_fw_boot]step2:load firmware bootloader");
+    ret = gup_load_section_file(fw_boot, (4*FW_SECTION_LENGTH+FW_DSP_LENGTH), FW_BOOT_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_boot]load firmware dsp fail.");
+        goto exit_burn_fw_boot;
+    }
+    
+    //step3:hold ss51 & dsp
+    GTP_DEBUG("[burn_fw_boot]step3:hold ss51 & dsp");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_boot]hold ss51 & dsp fail.");
+        ret = FAIL;
+        goto exit_burn_fw_boot;
+    }
+    
+    //step4:set scramble
+    GTP_DEBUG("[burn_fw_boot]step4:set scramble");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_boot]set scramble fail.");
+        ret = FAIL;
+        goto exit_burn_fw_boot;
+    }
+    
+    //step5:release ss51 & dsp
+    GTP_DEBUG("[burn_fw_boot]step5:release ss51 & dsp");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);                 //20121211
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_boot]release ss51 & dsp fail.");
+        ret = FAIL;
+        goto exit_burn_fw_boot;
+    }
+    //must delay
+    msleep(1);
+    
+    //step6:select bank3
+    GTP_DEBUG("[burn_fw_boot]step6:select bank3");
+    ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_boot]select bank3 fail.");
+        ret = FAIL;
+        goto exit_burn_fw_boot;
+    }
+    
+    //step7:burn 2k bootloader firmware
+    GTP_DEBUG("[burn_fw_boot]step7:burn 2k bootloader firmware");
+    ret = gup_burn_proc(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_boot]burn fw_section fail.");
+        goto exit_burn_fw_boot;
+    }
+    
+    //step7:send burn cmd to move data to flash from sram
+    GTP_DEBUG("[burn_fw_boot]step7:send burn cmd to move data to flash from sram");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x06);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_boot]send burn cmd fail.");
+        goto exit_burn_fw_boot;
+    }
+    GTP_DEBUG("[burn_fw_boot]Wait for the burn is complete......");
+    do{
+        ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
+        if(ret <= 0)
+        {
+            GTP_ERROR("[burn_fw_boot]Get burn state fail");
+            goto exit_burn_fw_boot;
+        }
+        msleep(10);
+        //GTP_DEBUG("[burn_fw_boot]Get burn state:%d.", rd_buf[GTP_ADDR_LENGTH]);
+    }while(rd_buf[GTP_ADDR_LENGTH]);
+    
+    //step8:recall check 2k bootloader firmware
+    GTP_DEBUG("[burn_fw_boot]step8:recall check 2k bootloader firmware");
+    ret = gup_recall_check(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[burn_fw_boot]recall check 4k dsp firmware fail.");
+        goto exit_burn_fw_boot;
+    }
+    
+    //step9:enable download DSP code 
+    GTP_DEBUG("[burn_fw_boot]step9:enable download DSP code ");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x99);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_boot]enable download DSP code fail.");
+        ret = FAIL;
+        goto exit_burn_fw_boot;
+    }
+    
+    //step10:release ss51 & hold dsp
+    GTP_DEBUG("[burn_fw_boot]step10:release ss51 & hold dsp");
+    ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x08);
+    if(ret <= 0)
+    {
+        GTP_ERROR("[burn_fw_boot]release ss51 & hold dsp fail.");
+        ret = FAIL;
+        goto exit_burn_fw_boot;
+    }
+    
+    ret = SUCCESS;
+    
+exit_burn_fw_boot:
+    kfree(fw_boot);
+    return ret;
+}
+
+s32 gup_update_proc(void *dir)
+{
+    s32 ret = 0;
+    u8  retry = 0;
+    st_fw_head fw_head;
+    struct goodix_ts_data *ts = NULL;
+    
+    GTP_DEBUG("[update_proc]Begin update ......");
+    
+    show_len = 1;
+    total_len = 100;
+    if(dir == NULL)
+    {
+        msleep(3000);                               //wait main thread to be completed
+    }
+    
+    ts = i2c_get_clientdata(i2c_connect_client);
+    
+    if (searching_file)
+    {
+        searching_file = 0;     // exit .bin update file searching 
+        GTP_INFO("Exiting searching .bin update file...");
+        while ((show_len != 200) && (show_len != 100))     // wait for auto update quitted completely
+        {
+            msleep(100);
+        }
+    }
+    
+    update_msg.file = NULL;
+    ret = gup_check_update_file(i2c_connect_client, &fw_head, (u8*)dir);     //20121211
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[update_proc]check update file fail.");
+        goto file_fail;
+    }
+    
+    //gtp_reset_guitar(i2c_connect_client, 20);
+    ret = gup_get_ic_fw_msg(i2c_connect_client);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[update_proc]get ic message fail.");
+        goto file_fail;
+    }    
+    
+    ret = gup_enter_update_judge(&fw_head);
+    if(FAIL == ret)
+    {
+        GTP_ERROR("[update_proc]Check *.bin file fail.");
+        goto file_fail;
+    }
+    
+    ts->enter_update = 1;        
+    gtp_irq_disable(ts);
+#if GTP_ESD_PROTECT
+    gtp_esd_switch(ts->client, SWITCH_OFF);
+#endif
+    ret = gup_enter_update_mode(i2c_connect_client);
+    if(FAIL == ret)
+    {
+         GTP_ERROR("[update_proc]enter update mode fail.");
+         goto update_fail;
+    }
+    
+    while(retry++ < 5)
+    {
+        show_len = 10;
+        total_len = 100;
+        ret = gup_burn_dsp_isp(i2c_connect_client);
+        if(FAIL == ret)
+        {
+            GTP_ERROR("[update_proc]burn dsp isp fail.");
+            continue;
+        }
+        
+        show_len += 10;
+        ret = gup_burn_fw_ss51(i2c_connect_client);
+        if(FAIL == ret)
+        {
+            GTP_ERROR("[update_proc]burn ss51 firmware fail.");
+            continue;
+        }
+        
+        show_len += 40;
+        ret = gup_burn_fw_dsp(i2c_connect_client);
+        if(FAIL == ret)
+        {
+            GTP_ERROR("[update_proc]burn dsp firmware fail.");
+            continue;
+        }
+        
+        show_len += 20;
+        ret = gup_burn_fw_boot(i2c_connect_client);
+        if(FAIL == ret)
+        {
+            GTP_ERROR("[update_proc]burn bootloader firmware fail.");
+            continue;
+        }
+        show_len += 10;
+        GTP_INFO("[update_proc]UPDATE SUCCESS.");
+        break;
+    }
+    if(retry >= 5)
+    {
+        GTP_ERROR("[update_proc]retry timeout,UPDATE FAIL.");
+        goto update_fail;
+    }
+    
+    GTP_DEBUG("[update_proc]leave update mode.");
+    gup_leave_update_mode();
+    
+    msleep(100);
+//    GTP_DEBUG("[update_proc]send config.");
+//    ret = gtp_send_cfg(i2c_connect_client);
+//    if(ret < 0)
+//    {
+//        GTP_ERROR("[update_proc]send config fail.");
+//    }
+    if (ts->fw_error)
+    {
+        GTP_INFO("firmware error auto update, resent config!");
+        gup_init_panel(ts);
+    }
+    show_len = 100;
+    total_len = 100;
+    ts->enter_update = 0;
+    gtp_irq_enable(ts);
+    
+#if GTP_ESD_PROTECT
+    gtp_esd_switch(ts->client, SWITCH_ON);
+#endif
+    filp_close(update_msg.file, NULL);
+    return SUCCESS;
+    
+update_fail:
+    ts->enter_update = 0;
+    gtp_irq_enable(ts);
+    
+#if GTP_ESD_PROTECT
+    gtp_esd_switch(ts->client, SWITCH_ON);
+#endif
+
+file_fail:
+    if(update_msg.file && !IS_ERR(update_msg.file))
+    {
+        filp_close(update_msg.file, NULL);
+    }
+    show_len = 200;
+    total_len = 100;
+    return FAIL;
+}
+
+#if GTP_AUTO_UPDATE
+u8 gup_init_update_proc(struct goodix_ts_data *ts)
+{
+    struct task_struct *thread = NULL;
+
+    GTP_INFO("Ready to run update thread.");
+    thread = kthread_run(gup_update_proc, (void*)NULL, "guitar_update");
+    if (IS_ERR(thread))
+    {
+        GTP_ERROR("Failed to create update thread.\n");
+        return -1;
+    }
+
+    return 0;
+}
+#endif
\ No newline at end of file
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index 908d0d7..ba0be2b 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -1056,8 +1056,6 @@
 
 	rmi4_pdata->i2c_pull_up = of_property_read_bool(np,
 			"synaptics,i2c-pull-up");
-	rmi4_pdata->power_down_enable = of_property_read_bool(np,
-			"synaptics,power-down");
 	rmi4_pdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
 	rmi4_pdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
 
@@ -2005,7 +2003,7 @@
 
 error_reg_en_vcc_i2c:
 	if (rmi4_data->board->i2c_pull_up)
-		reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0);
+		reg_set_optimum_mode_check(rmi4_data->vdd, 0);
 error_reg_opt_i2c:
 	regulator_disable(rmi4_data->vdd);
 error_reg_en_vdd:
@@ -2594,51 +2592,27 @@
 						bool on)
 {
 	int retval;
-	int load_ua;
 
 	if (on == false)
 		goto regulator_hpm;
 
-	load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA;
-	retval = reg_set_optimum_mode_check(rmi4_data->vdd, load_ua);
+	retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_LPM_LOAD_UA);
 	if (retval < 0) {
 		dev_err(&rmi4_data->i2c_client->dev,
-			"Regulator vdd_ana set_opt failed rc=%d\n",
+			"Regulator vcc_ana set_opt failed rc=%d\n",
 			retval);
 		goto fail_regulator_lpm;
 	}
 
-	if (rmi4_data->board->power_down_enable) {
-		retval = regulator_disable(rmi4_data->vdd);
-		if (retval) {
-			dev_err(&rmi4_data->i2c_client->dev,
-				"Regulator vdd disable failed rc=%d\n",
-				retval);
-			goto fail_regulator_lpm;
-		}
-	}
-
 	if (rmi4_data->board->i2c_pull_up) {
-		load_ua = rmi4_data->board->power_down_enable ?
-			0 : RMI4_I2C_LPM_LOAD_UA;
 		retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
-			load_ua);
+			RMI4_I2C_LPM_LOAD_UA);
 		if (retval < 0) {
 			dev_err(&rmi4_data->i2c_client->dev,
-				"Regulator vcc_i2c set_opt failed " \
-				"rc=%d\n", retval);
+				"Regulator vcc_i2c set_opt failed rc=%d\n",
+				retval);
 			goto fail_regulator_lpm;
 		}
-
-		if (rmi4_data->board->power_down_enable) {
-			retval = regulator_disable(rmi4_data->vcc_i2c);
-			if (retval) {
-				dev_err(&rmi4_data->i2c_client->dev,
-					"Regulator vcc_i2c disable failed " \
-					"rc=%d\n", retval);
-				goto fail_regulator_lpm;
-			}
-		}
 	}
 
 	return 0;
@@ -2654,16 +2628,6 @@
 		goto fail_regulator_hpm;
 	}
 
-	if (rmi4_data->board->power_down_enable) {
-		retval = regulator_enable(rmi4_data->vdd);
-		if (retval) {
-			dev_err(&rmi4_data->i2c_client->dev,
-				"Regulator vdd enable failed rc=%d\n",
-				retval);
-			goto fail_regulator_hpm;
-		}
-	}
-
 	if (rmi4_data->board->i2c_pull_up) {
 		retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
 			RMI4_I2C_LOAD_UA);
@@ -2673,26 +2637,6 @@
 				retval);
 			goto fail_regulator_hpm;
 		}
-
-		if (rmi4_data->board->power_down_enable) {
-			retval = regulator_enable(rmi4_data->vcc_i2c);
-			if (retval) {
-				dev_err(&rmi4_data->i2c_client->dev,
-					"Regulator vcc_i2c enable failed " \
-					"rc=%d\n", retval);
-				goto fail_regulator_hpm;
-			}
-		}
-	}
-
-	if (rmi4_data->board->power_down_enable) {
-		retval = synaptics_rmi4_reset_device(rmi4_data);
-		if (retval < 0) {
-			dev_err(&rmi4_data->i2c_client->dev,
-				"%s: Failed to issue reset command, rc = %d\n",
-					__func__, retval);
-			return retval;
-		}
 	}
 
 	return 0;
diff --git a/drivers/iommu/msm_iommu-v1.c b/drivers/iommu/msm_iommu-v1.c
index b9c4cae..53c7c30 100644
--- a/drivers/iommu/msm_iommu-v1.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -692,7 +692,6 @@
 	if (ret)
 		goto fail;
 
-	ret = __flush_iotlb_va(domain, va);
 fail:
 	mutex_unlock(&msm_iommu_lock);
 	return ret;
@@ -742,7 +741,6 @@
 	if (ret)
 		goto fail;
 
-	__flush_iotlb(domain);
 fail:
 	mutex_unlock(&msm_iommu_lock);
 	return ret;
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 474efdf..78fffb2 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -371,7 +371,7 @@
 	map.info.ctx_id = ctx_drvdata->num;
 	map.info.va = va;
 	map.info.size = len;
-	map.flags = IOMMU_TLBINVAL_FLAG;
+	map.flags = 0;
 	flush_va = &pa;
 	flush_pa = virt_to_phys(&pa);
 
@@ -421,7 +421,7 @@
 	map.info.ctx_id = ctx_drvdata->num;
 	map.info.va = va;
 	map.info.size = len;
-	map.flags = IOMMU_TLBINVAL_FLAG;
+	map.flags = 0;
 
 	if (sg->length == len) {
 		pa = get_phys_addr(sg);
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index 3c2be38..a1453ae 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -43,7 +43,7 @@
 #define WLED_HIGH_POLE_CAP_REG(base)		(base + 0x58)
 #define WLED_CURR_SINK_MASK		0xE0
 #define WLED_CURR_SINK_SHFT		0x05
-#define WLED_SWITCH_FREQ_MASK		0x02
+#define WLED_SWITCH_FREQ_MASK		0x0F
 #define WLED_OVP_VAL_MASK		0x03
 #define WLED_OVP_VAL_BIT_SHFT		0x00
 #define WLED_BOOST_LIMIT_MASK		0x07
@@ -78,7 +78,7 @@
 #define WLED_BOOST_LIM_DEFAULT		0x03
 #define WLED_CP_SEL_DEFAULT		0x00
 #define WLED_CTRL_DLY_DEFAULT		0x00
-#define WLED_SWITCH_FREQ_DEFAULT	0x02
+#define WLED_SWITCH_FREQ_DEFAULT	0x0B
 
 #define FLASH_SAFETY_TIMER(base)	(base + 0x40)
 #define FLASH_MAX_CURR(base)		(base + 0x41)
@@ -239,14 +239,6 @@
 	WLED_OVP_37V,
 };
 
-/* switch frquency */
-enum wled_switch_freq {
-	WLED_800kHz = 0,
-	WLED_960kHz,
-	WLED_1600kHz,
-	WLED_3200kHz,
-};
-
 enum flash_headroom {
 	HEADROOM_250mV = 0,
 	HEADROOM_300mV,
@@ -378,9 +370,11 @@
  *  @second_addr - address of secondary flash to be written
  *  @safety_timer - enable safety timer or watchdog timer
  *  @torch_enable - enable flash LED torch mode
- *  @regulator_get - regulator attached or not
+ *  @flash_reg_get - flash regulator attached or not
  *  @flash_on - flash status, on or off
+ *  @torch_on - torch status, on or off
  *  @flash_boost_reg - boost regulator for flash
+ *  @torch_boost_reg - boost regulator for torch
  */
 struct flash_config_data {
 	u8	current_prgm;
@@ -395,9 +389,11 @@
 	u16	second_addr;
 	bool	safety_timer;
 	bool	torch_enable;
-	bool	regulator_get;
+	bool	flash_reg_get;
 	bool	flash_on;
+	bool	torch_on;
 	struct regulator *flash_boost_reg;
+	struct regulator *torch_boost_reg;
 };
 
 /**
@@ -681,9 +677,100 @@
 	return 0;
 }
 
-static int qpnp_flash_set(struct qpnp_led_data *led)
+static int qpnp_flash_regulator_operate(struct qpnp_led_data *led, bool on)
+{
+	int rc, i;
+	struct qpnp_led_data *led_array;
+	bool regulator_on = false;
+
+	led_array = dev_get_drvdata(&led->spmi_dev->dev);
+	if (!led_array) {
+		dev_err(&led->spmi_dev->dev,
+				"Unable to get LED array\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < led->num_leds; i++)
+		regulator_on |= led_array[i].flash_cfg->flash_on;
+
+	if (!on)
+		goto regulator_turn_off;
+
+	if (!regulator_on && !led->flash_cfg->flash_on) {
+		for (i = 0; i < led->num_leds; i++) {
+			if (led_array[i].flash_cfg->flash_reg_get) {
+				rc = regulator_enable(
+					led_array[i].flash_cfg->\
+					flash_boost_reg);
+				if (rc) {
+					dev_err(&led->spmi_dev->dev,
+						"Regulator enable failed(%d)\n",
+									rc);
+					return rc;
+				}
+				led->flash_cfg->flash_on = true;
+			}
+			break;
+		}
+	}
+
+	return 0;
+
+regulator_turn_off:
+	if (regulator_on && led->flash_cfg->flash_on) {
+		for (i = 0; i < led->num_leds; i++) {
+			if (led_array[i].flash_cfg->flash_reg_get) {
+				rc = regulator_disable(led_array[i].flash_cfg->\
+							flash_boost_reg);
+				if (rc) {
+					dev_err(&led->spmi_dev->dev,
+						"Regulator disable failed(%d)\n",
+									rc);
+					return rc;
+				}
+				led->flash_cfg->flash_on = false;
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_torch_regulator_operate(struct qpnp_led_data *led, bool on)
 {
 	int rc;
+
+	if (!on)
+		goto regulator_turn_off;
+
+	if (!led->flash_cfg->torch_on) {
+		rc = regulator_enable(led->flash_cfg->torch_boost_reg);
+		if (rc) {
+			dev_err(&led->spmi_dev->dev,
+				"Regulator enable failed(%d)\n", rc);
+				return rc;
+		}
+		led->flash_cfg->torch_on = true;
+	}
+	return 0;
+
+regulator_turn_off:
+	if (led->flash_cfg->torch_on) {
+		rc = regulator_disable(led->flash_cfg->torch_boost_reg);
+		if (rc) {
+			dev_err(&led->spmi_dev->dev,
+				"Regulator disable failed(%d)\n", rc);
+			return rc;
+		}
+		led->flash_cfg->torch_on = false;
+	}
+	return 0;
+}
+
+static int qpnp_flash_set(struct qpnp_led_data *led)
+{
+	int rc, error;
 	int val = led->cdev.brightness;
 
 	if (led->flash_cfg->torch_enable)
@@ -701,13 +788,21 @@
 	/* Set led current */
 	if (val > 0) {
 		if (led->flash_cfg->torch_enable) {
+			rc = qpnp_torch_regulator_operate(led, true);
+			if (rc) {
+				dev_err(&led->spmi_dev->dev,
+					"Torch regulator operate failed(%d)\n",
+					rc);
+				return rc;
+			}
+
 			rc = qpnp_led_masked_write(led,
 				FLASH_LED_UNLOCK_SECURE(led->base),
 				FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Secure reg write failed(%d)\n", rc);
-				return rc;
+				goto error_torch_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
@@ -716,7 +811,7 @@
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Torch reg write failed(%d)\n", rc);
-				return rc;
+				goto error_torch_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
@@ -726,7 +821,7 @@
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Current reg write failed(%d)\n", rc);
-				return rc;
+				goto error_torch_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
@@ -737,7 +832,7 @@
 				dev_err(&led->spmi_dev->dev,
 					"2nd Current reg write failed(%d)\n",
 					rc);
-				return rc;
+				goto error_torch_set;
 			}
 
 			qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base),
@@ -747,18 +842,26 @@
 				dev_err(&led->spmi_dev->dev,
 					"Max current reg write failed(%d)\n",
 					rc);
-				return rc;
+				goto error_torch_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
 				FLASH_ENABLE_CONTROL(led->base),
-				FLASH_ENABLE_MODULE_MASK, FLASH_ENABLE_MODULE);
+				FLASH_ENABLE_MASK, FLASH_ENABLE_MODULE);
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Enable reg write failed(%d)\n", rc);
-				return rc;
+				goto error_torch_set;
 			}
 		} else {
+			rc = qpnp_flash_regulator_operate(led, true);
+			if (rc) {
+				dev_err(&led->spmi_dev->dev,
+					"Flash regulator operate failed(%d)\n",
+					rc);
+				goto error_flash_set;
+			}
+
 			/* Set flash safety timer */
 			rc = qpnp_led_masked_write(led,
 				FLASH_SAFETY_TIMER(led->base),
@@ -768,7 +871,7 @@
 				dev_err(&led->spmi_dev->dev,
 					"Safety timer reg write failed(%d)\n",
 					rc);
-				return rc;
+				goto error_flash_set;
 			}
 
 			/* Set max current */
@@ -779,7 +882,7 @@
 				dev_err(&led->spmi_dev->dev,
 					"Max current reg write failed(%d)\n",
 					rc);
-				return rc;
+				goto error_flash_set;
 			}
 
 			/* Set clamp current */
@@ -791,7 +894,7 @@
 				dev_err(&led->spmi_dev->dev,
 					"Clamp current reg write failed(%d)\n",
 					rc);
-				return rc;
+				goto error_flash_set;
 			}
 
 			/* Write 0x80 to MODULE_ENABLE before writing 0xE0
@@ -804,7 +907,7 @@
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Enable reg write failed(%d)\n", rc);
-				return rc;
+				goto error_flash_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
@@ -814,7 +917,7 @@
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Current reg write failed(%d)\n", rc);
-				return rc;
+				goto error_flash_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
@@ -825,7 +928,7 @@
 				dev_err(&led->spmi_dev->dev,
 					"2nd Current reg write failed(%d)\n",
 					rc);
-				return rc;
+				goto error_flash_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
@@ -834,7 +937,7 @@
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Enable reg write failed(%d)\n", rc);
-				return rc;
+				goto error_flash_set;
 			}
 		}
 
@@ -846,7 +949,10 @@
 				dev_err(&led->spmi_dev->dev,
 					"LED %d strobe reg write failed(%d)\n",
 					led->id, rc);
-				return rc;
+				if (led->flash_cfg->torch_enable)
+					goto error_torch_set;
+				else
+					goto error_flash_set;
 			}
 		} else {
 			rc = qpnp_led_masked_write(led,
@@ -856,10 +962,38 @@
 				dev_err(&led->spmi_dev->dev,
 					"LED %d strobe reg write failed(%d)\n",
 					led->id, rc);
-				return rc;
+				if (led->flash_cfg->torch_enable)
+					goto error_torch_set;
+				else
+					goto error_flash_set;
 			}
 		}
 	} else {
+		rc = qpnp_led_masked_write(led,
+			FLASH_LED_STROBE_CTRL(led->base),
+			FLASH_STROBE_MASK,
+			FLASH_DISABLE_ALL);
+		if (rc) {
+			dev_err(&led->spmi_dev->dev,
+				"LED %d flash write failed(%d)\n", led->id, rc);
+			if (led->flash_cfg->torch_enable)
+				goto error_torch_set;
+			else
+				goto error_flash_set;
+		}
+
+		rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base),
+			FLASH_ENABLE_MASK,
+			FLASH_DISABLE_ALL);
+		if (rc) {
+			dev_err(&led->spmi_dev->dev,
+				"Enable reg write failed(%d)\n", rc);
+			if (led->flash_cfg->torch_enable)
+				goto error_torch_set;
+			else
+				goto error_flash_set;
+		}
+
 		if (led->flash_cfg->torch_enable) {
 			rc = qpnp_led_masked_write(led,
 				FLASH_LED_UNLOCK_SECURE(led->base),
@@ -867,7 +1001,7 @@
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Secure reg write failed(%d)\n", rc);
-				return rc;
+				goto error_torch_set;
 			}
 
 			rc = qpnp_led_masked_write(led,
@@ -877,33 +1011,48 @@
 			if (rc) {
 				dev_err(&led->spmi_dev->dev,
 					"Torch reg write failed(%d)\n", rc);
+				goto error_torch_set;
+			}
+
+			rc = qpnp_torch_regulator_operate(led, false);
+			if (rc) {
+				dev_err(&led->spmi_dev->dev,
+					"Torch regulator operate failed(%d)\n",
+					rc);
 				return rc;
 			}
-		}
-
-		rc = qpnp_led_masked_write(led,
-			FLASH_LED_STROBE_CTRL(led->base),
-			FLASH_STROBE_MASK,
-			FLASH_DISABLE_ALL);
-		if (rc) {
-			dev_err(&led->spmi_dev->dev,
-				"LED %d flash write failed(%d)\n", led->id, rc);
-			return rc;
-		}
-
-		rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base),
-			FLASH_ENABLE_MASK,
-			FLASH_DISABLE_ALL);
-		if (rc) {
-			dev_err(&led->spmi_dev->dev,
-				"Enable reg write failed(%d)\n", rc);
-			return rc;
+		} else {
+			rc = qpnp_flash_regulator_operate(led, false);
+			if (rc) {
+				dev_err(&led->spmi_dev->dev,
+					"Flash regulator operate failed(%d)\n",
+					rc);
+				return rc;
+			}
 		}
 	}
 
 	qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs));
 
 	return 0;
+
+error_torch_set:
+	error = qpnp_torch_regulator_operate(led, false);
+	if (error) {
+		dev_err(&led->spmi_dev->dev,
+			"Torch regulator operate failed(%d)\n", rc);
+		return error;
+	}
+	return rc;
+
+error_flash_set:
+	error = qpnp_flash_regulator_operate(led, false);
+	if (error) {
+		dev_err(&led->spmi_dev->dev,
+			"Flash regulator operate failed(%d)\n", rc);
+		return error;
+	}
+	return rc;
 }
 
 static int qpnp_kpdbl_set(struct qpnp_led_data *led)
@@ -1062,35 +1211,7 @@
 static void __qpnp_led_work(struct qpnp_led_data *led,
 				enum led_brightness value)
 {
-	int rc, i;
-	struct qpnp_led_data *led_array;
-
-	if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1) {
-		if (!led->flash_cfg->flash_on && value > 0) {
-			led_array = dev_get_drvdata(&led->spmi_dev->dev);
-			if (!led_array) {
-				dev_err(&led->spmi_dev->dev,
-					"Unable to unable to get array\n");
-				return;
-			}
-
-			for (i = 0; i < led->num_leds; i++) {
-				if (led_array[i].flash_cfg->regulator_get) {
-					rc = regulator_enable(led_array[i].\
-							flash_cfg->\
-							flash_boost_reg);
-					if (rc) {
-						dev_err(&led->spmi_dev->dev,
-							"Regulator enable" \
-							 "failed(%d)\n",
-							rc);
-						return;
-					}
-				}
-			}
-			led->flash_cfg->flash_on = true;
-		}
-	}
+	int rc;
 
 	mutex_lock(&led->lock);
 
@@ -1134,31 +1255,6 @@
 	}
 	mutex_unlock(&led->lock);
 
-	if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1) {
-		if (led->flash_cfg->flash_on && !value) {
-			led_array = dev_get_drvdata(&led->spmi_dev->dev);
-			if (!led_array) {
-				dev_err(&led->spmi_dev->dev,
-						"Unable to get LED array\n");
-				return;
-			}
-
-			for (i = 0; i < led->num_leds; i++) {
-				if (led_array[i].flash_cfg->regulator_get) {
-					rc = regulator_disable(led_array[i]\
-						.flash_cfg->flash_boost_reg);
-					if (rc) {
-						dev_err(&led->spmi_dev->dev,
-							"Unable to disable" \
-							" regulator(%d)\n",
-							rc);
-						return;
-					}
-				}
-			}
-			led->flash_cfg->flash_on = false;
-		}
-	}
 }
 
 static void qpnp_led_work(struct work_struct *work)
@@ -1290,11 +1386,12 @@
 	}
 
 	/* program switch frequency */
-	rc = qpnp_led_masked_write(led, WLED_SWITCHING_FREQ_REG(led->base),
+	rc = qpnp_led_masked_write(led,
+		WLED_SWITCHING_FREQ_REG(led->base),
 		WLED_SWITCH_FREQ_MASK, led->wled_cfg->switch_freq);
 	if (rc) {
 		dev_err(&led->spmi_dev->dev,
-				"WLED switch freq reg write failed(%d)\n", rc);
+			"WLED switch freq reg write failed(%d)\n", rc);
 		return rc;
 	}
 
@@ -1982,6 +2079,18 @@
 		return rc;
 	}
 
+	/* Disable flash LED module */
+	rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base),
+		FLASH_ENABLE_MODULE_MASK, FLASH_DISABLE_ALL);
+	if (rc) {
+		dev_err(&led->spmi_dev->dev,
+			"Enable reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	if (led->flash_cfg->torch_enable)
+		return 0;
+
 	/* Set headroom */
 	rc = qpnp_led_masked_write(led, FLASH_HEADROOM(led->base),
 		FLASH_HEADROOM_MASK, led->flash_cfg->headroom);
@@ -2041,15 +2150,6 @@
 		return rc;
 	}
 
-	/* Disable flash LED module */
-	rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base),
-		FLASH_ENABLE_MODULE_MASK, FLASH_DISABLE_ALL);
-	if (rc) {
-		dev_err(&led->spmi_dev->dev,
-			"Enable reg write failed(%d)\n", rc);
-		return rc;
-	}
-
 	led->flash_cfg->strobe_type = 0;
 
 	/* dump flash registers */
@@ -2378,12 +2478,12 @@
 				rc = PTR_ERR(led->flash_cfg->flash_boost_reg);
 				dev_err(&led->spmi_dev->dev,
 					"Regulator get failed(%d)\n", rc);
-				return rc;
+				goto error_get_flash_reg;
 			}
-			led->flash_cfg->regulator_get = true;
+			led->flash_cfg->flash_reg_get = true;
 			*reg_set = true;
 		} else
-			led->flash_cfg->regulator_get = false;
+			led->flash_cfg->flash_reg_get = false;
 	} else if (led->id == QPNP_ID_FLASH1_LED1) {
 		led->flash_cfg->enable_module = FLASH_ENABLE_ALL;
 		led->flash_cfg->current_addr = FLASH_LED_1_CURR(led->base);
@@ -2397,12 +2497,12 @@
 				rc = PTR_ERR(led->flash_cfg->flash_boost_reg);
 				dev_err(&led->spmi_dev->dev,
 					"Regulator get failed(%d)\n", rc);
-				return rc;
+				goto error_get_flash_reg;
 			}
-			led->flash_cfg->regulator_get = true;
+			led->flash_cfg->flash_reg_get = true;
 			*reg_set = true;
 		} else
-			led->flash_cfg->regulator_get = false;
+			led->flash_cfg->flash_reg_get = false;
 	} else {
 		dev_err(&led->spmi_dev->dev, "Unknown flash LED name given\n");
 		return -EINVAL;
@@ -2411,16 +2511,32 @@
 	led->flash_cfg->torch_enable =
 		of_property_read_bool(node, "qcom,torch-enable");
 
+	if (led->flash_cfg->torch_enable) {
+		led->flash_cfg->torch_boost_reg =
+			regulator_get(&led->spmi_dev->dev, "torch_boost");
+		if (IS_ERR(led->flash_cfg->torch_boost_reg)) {
+			rc = PTR_ERR(led->flash_cfg->torch_boost_reg);
+			dev_err(&led->spmi_dev->dev,
+				"Torch regulator get failed(%d)\n", rc);
+			goto error_get_torch_reg;
+		}
+	}
+
 	rc = of_property_read_u32(node, "qcom,current", &val);
 	if (!rc) {
-		if (led->flash_cfg->torch_enable)
+		if (led->flash_cfg->torch_enable) {
 			led->flash_cfg->current_prgm = (val *
 				TORCH_MAX_LEVEL / led->max_current);
+			return 0;
+		}
 		else
 			led->flash_cfg->current_prgm = (val *
 				FLASH_MAX_LEVEL / led->max_current);
 	} else
-		return -EINVAL;
+		if (led->flash_cfg->torch_enable)
+			goto error_get_torch_reg;
+		else
+			goto error_get_flash_reg;
 
 	rc = of_property_read_u32(node, "qcom,headroom", &val);
 	if (!rc)
@@ -2428,7 +2544,7 @@
 	else if (rc == -EINVAL)
 		led->flash_cfg->headroom = HEADROOM_500mV;
 	else
-		return rc;
+		goto error_get_flash_reg;
 
 	rc = of_property_read_u32(node, "qcom,duration", &val);
 	if (!rc)
@@ -2436,7 +2552,7 @@
 	else if (rc == -EINVAL)
 		led->flash_cfg->duration = FLASH_DURATION_200ms;
 	else
-		return rc;
+		goto error_get_flash_reg;
 
 	rc = of_property_read_u32(node, "qcom,clamp-curr", &val);
 	if (!rc)
@@ -2445,7 +2561,7 @@
 	else if (rc == -EINVAL)
 		led->flash_cfg->clamp_curr = FLASH_CLAMP_200mA;
 	else
-		return rc;
+		goto error_get_flash_reg;
 
 	rc = of_property_read_u32(node, "qcom,startup-dly", &val);
 	if (!rc)
@@ -2453,12 +2569,20 @@
 	else if (rc == -EINVAL)
 		led->flash_cfg->startup_dly = DELAY_128us;
 	else
-		return rc;
+		goto error_get_flash_reg;
 
 	led->flash_cfg->safety_timer =
 		of_property_read_bool(node, "qcom,safety-timer");
 
 	return 0;
+
+error_get_torch_reg:
+	regulator_put(led->flash_cfg->torch_boost_reg);
+
+error_get_flash_reg:
+	regulator_put(led->flash_cfg->flash_boost_reg);
+	return rc;
+
 }
 
 static int __devinit qpnp_get_config_pwm(struct pwm_config_data *pwm_cfg,
@@ -3047,9 +3171,12 @@
 			break;
 		case QPNP_ID_FLASH1_LED0:
 		case QPNP_ID_FLASH1_LED1:
-			if (led_array[i].flash_cfg->regulator_get)
+			if (led_array[i].flash_cfg->flash_reg_get)
 				regulator_put(led_array[i].flash_cfg-> \
 							flash_boost_reg);
+			if (led_array[i].flash_cfg->torch_enable)
+				regulator_put(led_array[i].flash_cfg->\
+							torch_boost_reg);
 			sysfs_remove_group(&led_array[i].cdev.dev->kobj,
 							&led_attr_group);
 			break;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 692a04e..9d606a1 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -128,6 +128,20 @@
 	DMX_IDX_H264_NON_IDR_START
 };
 
+static const struct dvb_dmx_video_patterns h264_non_access_unit_del = {
+	{0x00, 0x00, 0x01, 0x09},
+	{0xFF, 0xFF, 0xFF, 0x1F},
+	4,
+	DMX_IDX_H264_ACCESS_UNIT_DEL
+};
+
+static const struct dvb_dmx_video_patterns h264_non_sei = {
+	{0x00, 0x00, 0x01, 0x06},
+	{0xFF, 0xFF, 0xFF, 0x1F},
+	4,
+	DMX_IDX_H264_SEI
+};
+
 static const struct dvb_dmx_video_patterns vc1_seq_hdr = {
 	{0x00, 0x00, 0x01, 0x0F},
 	{0xFF, 0xFF, 0xFF, 0xFF},
@@ -1791,6 +1805,12 @@
 	case DMX_IDX_H264_NON_IDR_START:
 		return &h264_non_idr;
 
+	case DMX_IDX_H264_ACCESS_UNIT_DEL:
+		return &h264_non_access_unit_del;
+
+	case DMX_IDX_H264_SEI:
+		return &h264_non_sei;
+
 	case DMX_IDX_VC1_SEQ_HEADER:
 		return &vc1_seq_hdr;
 
@@ -1913,6 +1933,20 @@
 	}
 
 	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types & DMX_IDX_H264_ACCESS_UNIT_DEL)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_ACCESS_UNIT_DEL);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types & DMX_IDX_H264_SEI)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
 		(feed->idx_params.types &
 		 (DMX_IDX_VC1_SEQ_HEADER |
 		  DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
index d3618c0..0313161 100644
--- a/drivers/media/platform/msm/camera_v2/camera/camera.c
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -328,6 +328,10 @@
 
 		pr_debug("%s: num planes :%c\n", __func__,
 					user_fmt->num_planes);
+		/*num_planes need to bound checked, otherwise for loop
+		can execute forever */
+		if (WARN_ON(user_fmt->num_planes > VIDEO_MAX_PLANES))
+			return -EINVAL;
 		for (i = 0; i < user_fmt->num_planes; i++)
 			pr_debug("%s: plane size[%d]\n", __func__,
 					user_fmt->plane_sizes[i]);
@@ -349,6 +353,7 @@
 
 set_fmt_fail:
 	kzfree(sp->vb2_q.drv_priv);
+	sp->vb2_q.drv_priv = NULL;
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index f1f4c17..8c42ed2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -90,7 +90,8 @@
 	void (*enable_wm) (struct vfe_device *vfe_dev,
 		uint8_t wm_idx, uint8_t enable);
 	void (*cfg_io_format) (struct vfe_device *vfe_dev,
-		struct msm_vfe_axi_stream *stream_info);
+		enum msm_vfe_axi_stream_src stream_src,
+		uint32_t io_format);
 	void (*cfg_framedrop) (struct vfe_device *vfe_dev,
 		struct msm_vfe_axi_stream *stream_info);
 	void (*clear_framedrop) (struct vfe_device *vfe_dev,
@@ -289,6 +290,7 @@
 	enum msm_vfe_inputmux input_mux;
 	uint32_t width;
 	long pixel_clock;
+	uint32_t input_format;
 };
 
 enum msm_wm_ub_cfg_type {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 07a66e6..aac973e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -144,7 +144,7 @@
 	/* CGC_OVERRIDE */
 	msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
 	/* BUS_CFG */
-	msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x3C);
+	msm_camera_io_w(0x00000009, vfe_dev->vfe_base + 0x3C);
 	msm_camera_io_w(0x01000025, vfe_dev->vfe_base + 0x1C);
 	msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
 	msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
@@ -481,11 +481,11 @@
 }
 
 static void msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
-	struct msm_vfe_axi_stream *stream_info)
+	enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
 {
 	int bpp, bpp_reg = 0;
 	uint32_t io_format_reg;
-	bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+	bpp = msm_isp_get_bit_per_pixel(io_format);
 
 	switch (bpp) {
 	case 8:
@@ -499,7 +499,9 @@
 		break;
 	}
 	io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x6F8);
-	switch (stream_info->stream_src) {
+	switch (stream_src) {
+	case PIX_ENCODER:
+	case PIX_VIEWFINDER:
 	case CAMIF_RAW:
 		io_format_reg &= 0xFFFFCFFF;
 		io_format_reg |= bpp_reg << 12;
@@ -508,8 +510,6 @@
 		io_format_reg &= 0xFFFFFFC8;
 		io_format_reg |= bpp_reg << 4;
 		break;
-	case PIX_ENCODER:
-	case PIX_VIEWFINDER:
 	case RDI_INTF_0:
 	case RDI_INTF_1:
 	case RDI_INTF_2:
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 2db25a6..84b95f1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -688,11 +688,11 @@
 }
 
 static void msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
-	struct msm_vfe_axi_stream *stream_info)
+	enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
 {
 	int bpp, bpp_reg = 0;
 	uint32_t io_format_reg;
-	bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+	bpp = msm_isp_get_bit_per_pixel(io_format);
 
 	switch (bpp) {
 	case 8:
@@ -706,7 +706,9 @@
 		break;
 	}
 	io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
-	switch (stream_info->stream_src) {
+	switch (stream_src) {
+	case PIX_ENCODER:
+	case PIX_VIEWFINDER:
 	case CAMIF_RAW:
 		io_format_reg &= 0xFFFFCFFF;
 		io_format_reg |= bpp_reg << 12;
@@ -715,8 +717,6 @@
 		io_format_reg &= 0xFFFFFFC8;
 		io_format_reg |= bpp_reg << 4;
 		break;
-	case PIX_ENCODER:
-	case PIX_VIEWFINDER:
 	case RDI_INTF_0:
 	case RDI_INTF_1:
 	case RDI_INTF_2:
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index d3138ed..5b7658d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -474,6 +474,7 @@
 int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
 {
 	int rc = 0, i;
+	uint32_t io_format = 0;
 	struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
 	struct msm_vfe_axi_stream *stream_info;
 
@@ -497,10 +498,20 @@
 		stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
 	msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_info);
 
-	if (stream_cfg_cmd->stream_src == CAMIF_RAW ||
-		stream_cfg_cmd->stream_src == IDEAL_RAW)
-			vfe_dev->hw_info->vfe_ops.axi_ops.
-				cfg_io_format(vfe_dev, stream_info);
+	if (stream_info->stream_src < RDI_INTF_0) {
+		io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+		if (stream_info->stream_src == CAMIF_RAW ||
+			stream_info->stream_src == IDEAL_RAW) {
+			if (stream_info->stream_src == CAMIF_RAW &&
+				io_format != stream_info->output_format)
+				pr_warn("%s: Overriding input format\n",
+					__func__);
+
+			io_format = stream_info->output_format;
+		}
+		vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+			vfe_dev, stream_info->stream_src, io_format);
+	}
 
 	msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index d857a14..33f63b3 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -150,6 +150,12 @@
 	stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
 		get_stats_idx(stream_req_cmd->stats_type);
 
+	if ((stats_idx > MSM_ISP_STATS_MAX) ||
+		(stats_idx == -EINVAL)) {
+		pr_err("%s: Stats idx Error\n", __func__);
+		return rc;
+	}
+
 	stream_info = &stats_data->stream_info[stats_idx];
 	if (stream_info->state != STATS_AVALIABLE) {
 		pr_err("%s: Stats already requested\n", __func__);
@@ -188,7 +194,7 @@
 
 int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
 {
-	int rc = 0;
+	int rc = -1;
 	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
 	struct msm_vfe_stats_stream *stream_info = NULL;
 	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
@@ -202,6 +208,11 @@
 	}
 
 	stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+	if (stats_idx > MSM_ISP_STATS_MAX) {
+		pr_err("%s: Stats idx Error\n", __func__);
+		return rc;
+	}
+
 	stream_info = &stats_data->stream_info[stats_idx];
 
 	framedrop_period = msm_isp_get_framedrop_period(
@@ -228,9 +239,14 @@
 	struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
 	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
 	int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
-	struct msm_vfe_stats_stream *stream_info =
-		&stats_data->stream_info[stats_idx];
+	struct msm_vfe_stats_stream *stream_info = NULL;
 
+	if (stats_idx > MSM_ISP_STATS_MAX) {
+		pr_err("%s: Stats idx Error\n", __func__);
+		return rc;
+	}
+
+	stream_info = &stats_data->stream_info[stats_idx];
 	if (stream_info->state == STATS_AVALIABLE) {
 		pr_err("%s: stream already release\n", __func__);
 		return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 3806213..590b636 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -273,6 +273,9 @@
 		return rc;
 	}
 
+	vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+		input_cfg->d.pix_cfg.input_format;
+
 	vfe_dev->hw_info->vfe_ops.core_ops.cfg_camif(
 		vfe_dev, &input_cfg->d.pix_cfg);
 	return rc;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 3082e99..2f6e6a7 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -18,6 +18,7 @@
 #include <linux/videodev2.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
+#include <linux/iopoll.h>
 #include <media/msmb_isp.h>
 
 #include "msm_ispif.h"
@@ -38,6 +39,9 @@
 #define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY  0x01
 #define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY    0x02
 
+#define ISPIF_TIMEOUT_SLEEP_US                1000
+#define ISPIF_TIMEOUT_ALL_US                500000
+
 #undef CDBG
 #ifdef CONFIG_MSMB_CAMERA_DEBUG
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -63,6 +67,78 @@
 	{"ispif_ahb_clk", -1},
 };
 
+static struct msm_cam_clk_info ispif_8974_reset_clk_info[] = {
+	{"csi0_src_clk", INIT_RATE},
+	{"csi0_clk", NO_SET_RATE},
+	{"csi0_pix_clk", NO_SET_RATE},
+	{"csi0_rdi_clk", NO_SET_RATE},
+	{"vfe0_clk_src", INIT_RATE},
+	{"camss_vfe_vfe0_clk", NO_SET_RATE},
+	{"camss_csi_vfe0_clk", NO_SET_RATE},
+	{"vfe1_clk_src", INIT_RATE},
+	{"camss_vfe_vfe1_clk", NO_SET_RATE},
+	{"camss_csi_vfe1_clk", NO_SET_RATE},
+};
+
+static int msm_ispif_reset_hw(struct ispif_device *ispif)
+{
+	int rc = 0;
+	long timeout = 0;
+	struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+
+	if (ispif->csid_version < CSID_VERSION_V30) {
+		/* currently reset is done only for 8974 */
+		return 0;
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+	if (rc < 0) {
+		pr_err("%s: cannot enable clock, error = %d",
+			__func__, rc);
+	}
+
+	init_completion(&ispif->reset_complete[VFE0]);
+	if (ispif->hw_num_isps > 1)
+		init_completion(&ispif->reset_complete[VFE1]);
+
+	/* initiate reset of ISPIF */
+	msm_camera_io_w(ISPIF_RST_CMD_MASK,
+				ispif->base + ISPIF_RST_CMD_ADDR);
+	if (ispif->hw_num_isps > 1)
+		msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
+					ispif->base + ISPIF_RST_CMD_1_ADDR);
+
+	timeout = wait_for_completion_interruptible_timeout(
+			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+	CDBG("%s: VFE0 done\n", __func__);
+	if (timeout <= 0) {
+		pr_err("%s: VFE0 reset wait timeout\n", __func__);
+		return -ETIMEDOUT;
+	}
+
+	if (ispif->hw_num_isps > 1) {
+		timeout = wait_for_completion_interruptible_timeout(
+				&ispif->reset_complete[VFE1],
+				msecs_to_jiffies(500));
+		CDBG("%s: VFE1 done\n", __func__);
+		if (timeout <= 0) {
+			pr_err("%s: VFE1 reset wait timeout\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+	if (rc < 0) {
+		pr_err("%s: cannot disable clock, error = %d",
+			__func__, rc);
+	}
+	return rc;
+}
+
 static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
 {
 	int rc = 0;
@@ -573,6 +649,7 @@
 	uint16_t cid_mask = 0;
 	uint32_t intf_addr;
 	enum msm_ispif_vfe_intf vfe_intf;
+	uint32_t stop_flag = 0;
 
 	BUG_ON(!ispif);
 	BUG_ON(!params);
@@ -625,10 +702,12 @@
 			goto end;
 		}
 
-		/* todo_bug_fix? very bad. use readl_poll_timeout */
-		while ((msm_camera_io_r(ispif->base + intf_addr) & 0xF) != 0xF)
-			CDBG("%s: Wait for %d Idle\n", __func__,
-				params->entries[i].intftype);
+		rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
+					(stop_flag & 0xF) == 0xF,
+					ISPIF_TIMEOUT_SLEEP_US,
+					ISPIF_TIMEOUT_ALL_US);
+		if (rc < 0)
+			goto end;
 
 		/* disable CIDs in CID_MASK register */
 		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
@@ -706,6 +785,9 @@
 	ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
 	if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
+		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE0]);
+
 		if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
 			pr_err("%s: VFE0 pix0 overflow.\n", __func__);
 
@@ -721,6 +803,9 @@
 		ispif_process_irq(ispif, out, VFE0);
 	}
 	if (ispif->vfe_info.num_vfe > 1) {
+		if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE1]);
+
 		if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
 			pr_err("%s: VFE1 pix0 overflow.\n", __func__);
 
@@ -811,7 +896,7 @@
 		pr_err("%s: ahb_clk enable failed", __func__);
 		goto error_ahb;
 	}
-
+	msm_ispif_reset_hw(ispif);
 	rc = msm_ispif_reset(ispif);
 	if (rc == 0) {
 		ispif->ispif_state = ISPIF_POWER_UP;
@@ -1002,9 +1087,18 @@
 		goto error_sd_register;
 	}
 
-	if (pdev->dev.of_node)
+
+	if (pdev->dev.of_node) {
 		of_property_read_u32((&pdev->dev)->of_node,
 		"cell-index", &pdev->id);
+		rc = of_property_read_u32((&pdev->dev)->of_node,
+		"qcom,num-isps", &ispif->hw_num_isps);
+		if (rc)
+			/* backward compatibility */
+			ispif->hw_num_isps = 1;
+		/* not an error condition */
+		rc = 0;
+	}
 
 	ispif->mem = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "ispif");
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index faa32aa..45e7354 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -58,5 +58,7 @@
 	enum msm_ispif_state_t ispif_state;
 	struct msm_ispif_vfe_info vfe_info;
 	struct clk *ahb_clk;
+	struct completion reset_complete[VFE_MAX];
+	uint32_t hw_num_isps;
 };
 #endif
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index a4eb274..822c0c8 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1219,6 +1219,14 @@
 		goto ERROR1;
 	}
 
+	if ((new_frame->msg_len == 0) ||
+		(new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
+		pr_err("%s:%d: Invalid frame len:%d\n", __func__,
+			__LINE__, new_frame->msg_len);
+		rc = -EINVAL;
+		goto ERROR1;
+	}
+
 	cpp_frame_msg = kzalloc(sizeof(uint32_t)*new_frame->msg_len,
 		GFP_KERNEL);
 	if (!cpp_frame_msg) {
@@ -1380,7 +1388,10 @@
 		pr_err("ioctl_ptr is null\n");
 		return -EINVAL;
 	}
-
+	if (cpp_dev == NULL) {
+		pr_err("cpp_dev is null\n");
+		return -EINVAL;
+	}
 	mutex_lock(&cpp_dev->mutex);
 	CPP_DBG("E cmd: %d\n", cmd);
 	switch (cmd) {
@@ -1396,8 +1407,16 @@
 
 	case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
 		if (cpp_dev->is_firmware_loaded == 0) {
-			kfree(cpp_dev->fw_name_bin);
-			cpp_dev->fw_name_bin = NULL;
+			if (cpp_dev->fw_name_bin != NULL) {
+				kfree(cpp_dev->fw_name_bin);
+				cpp_dev->fw_name_bin = NULL;
+			}
+			if ((ioctl_ptr->len == 0) ||
+				(ioctl_ptr->len > MSM_CPP_MAX_FW_NAME_LEN)) {
+				pr_err("ioctl_ptr->len is 0\n");
+				mutex_unlock(&cpp_dev->mutex);
+				return -EINVAL;
+			}
 			cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
 				GFP_KERNEL);
 			if (!cpp_dev->fw_name_bin) {
@@ -1406,13 +1425,9 @@
 				mutex_unlock(&cpp_dev->mutex);
 				return -EINVAL;
 			}
-
 			if (ioctl_ptr->ioctl_ptr == NULL) {
 				pr_err("ioctl_ptr->ioctl_ptr=NULL\n");
-				return -EINVAL;
-			}
-			if (ioctl_ptr->len == 0) {
-				pr_err("ioctl_ptr->len is 0\n");
+				mutex_unlock(&cpp_dev->mutex);
 				return -EINVAL;
 			}
 			rc = (copy_from_user(cpp_dev->fw_name_bin,
@@ -1426,11 +1441,6 @@
 				return -EINVAL;
 			}
 			*(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
-			if (cpp_dev == NULL) {
-				pr_err("cpp_dev is null\n");
-				return -EINVAL;
-			}
-
 			disable_irq(cpp_dev->irq->start);
 			cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
 			enable_irq(cpp_dev->irq->start);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
index c2a5cad..6bd7feb 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
@@ -140,6 +140,7 @@
 {
 	int i;
 	int rc = 0;
+	long clk_rate;
 	if (enable) {
 		for (i = 0; i < num_clk; i++) {
 			CDBG("%s enable %s\n", __func__,
@@ -158,6 +159,24 @@
 						   clk_info[i].clk_name);
 					goto cam_clk_set_err;
 				}
+			} else if (clk_info[i].clk_rate == INIT_RATE) {
+				clk_rate = clk_get_rate(clk_ptr[i]);
+				if (clk_rate == 0) {
+					clk_rate =
+						  clk_round_rate(clk_ptr[i], 0);
+					if (clk_rate < 0) {
+						pr_err("%s round rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+					rc = clk_set_rate(clk_ptr[i],
+								clk_rate);
+					if (rc < 0) {
+						pr_err("%s set rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+				}
 			}
 			rc = clk_prepare(clk_ptr[i]);
 			if (rc < 0) {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
index 73376e2..2e6f809 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
@@ -18,6 +18,9 @@
 #include <mach/camera2.h>
 #include <media/msm_cam_sensor.h>
 
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
 void msm_camera_io_w(u32 data, void __iomem *addr);
 void msm_camera_io_w_mb(u32 data, void __iomem *addr);
 u32 msm_camera_io_r(void __iomem *addr);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 8de8997..2bc460b 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -198,7 +198,8 @@
 		patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
 		patterns[2] = dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
 		patterns[3] = dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
-		*patterns_num = 4;
+		patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+		*patterns_num = 5;
 		break;
 
 	case DMX_VIDEO_CODEC_VC1:
diff --git a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
index 0908a6e..9ddb9b7 100644
--- a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
+++ b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
@@ -134,6 +134,8 @@
 			case DMX_IDX_H264_SPS:
 			case DMX_IDX_MPEG_SEQ_HEADER:
 			case DMX_IDX_VC1_SEQ_HEADER:
+			case DMX_IDX_H264_ACCESS_UNIT_DEL:
+			case DMX_IDX_H264_SEI:
 				DBG("SPS FOUND\n");
 				frame_found = false;
 				break;
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index d92a9c1..02b36f8 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -191,7 +191,7 @@
 			(struct ocmem_buf *) resource_value;
 
 		pkt->resource_type = HFI_RESOURCE_OCMEM;
-		pkt->size += sizeof(struct hfi_resource_ocmem);
+		pkt->size += sizeof(struct hfi_resource_ocmem) - sizeof(u32);
 		hfioc_mem->size = (u32) ocmem->len;
 		hfioc_mem->mem = (u8 *) ocmem->addr;
 		break;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index a63ee61..653ba46 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -45,6 +45,7 @@
 	case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
 	case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
 	case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+	case HFI_ERR_SESSION_UNSUPPORTED_STREAM:
 		vidc_err = VIDC_ERR_NOT_SUPPORTED;
 		break;
 	case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
@@ -786,6 +787,8 @@
 	data_done.input_done.offset = pkt->offset;
 	data_done.input_done.filled_len = pkt->filled_len;
 	data_done.input_done.packet_buffer = pkt->packet_buffer;
+	data_done.input_done.status =
+		hfi_map_err_status((u32) pkt->error_type);
 	callback(SESSION_ETB_DONE, &data_done);
 }
 
@@ -1003,7 +1006,6 @@
 		struct hfi_msg_sys_session_end_done_packet *pkt)
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
-	struct hal_session *sess_close;
 
 	dprintk(VIDC_DBG, "RECEIVED:SESSION_END_DONE");
 
@@ -1021,12 +1023,6 @@
 	cmd_done.status = hfi_map_err_status((u32)pkt->error_type);
 	cmd_done.data = NULL;
 	cmd_done.size = 0;
-	sess_close = (struct hal_session *)pkt->session_id;
-	dprintk(VIDC_INFO, "deleted the session: 0x%x",
-		sess_close->session_id);
-	list_del(&sess_close->list);
-	kfree(sess_close);
-	sess_close = NULL;
 	callback(SESSION_END_DONE, &cmd_done);
 }
 
@@ -1035,7 +1031,6 @@
 	struct hfi_msg_sys_session_abort_done_packet *pkt)
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
-	struct hal_session *sess_close;
 
 	dprintk(VIDC_DBG, "RECEIVED:SESSION_ABORT_DONE");
 
@@ -1053,16 +1048,6 @@
 	cmd_done.data = NULL;
 	cmd_done.size = 0;
 
-	sess_close = (struct hal_session *)pkt->session_id;
-	if (!sess_close) {
-		dprintk(VIDC_ERR, "%s: invalid session pointer\n", __func__);
-		return;
-	}
-	dprintk(VIDC_ERR, "deleted the session: 0x%x",
-		sess_close->session_id);
-	list_del(&sess_close->list);
-	kfree(sess_close);
-	sess_close = NULL;
 	callback(SESSION_ABORT_DONE, &cmd_done);
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 79a492e..5140a03 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -500,8 +500,13 @@
 	struct smem_client *client = clt;
 	struct iommu_set *iommu_group_set = &client->res->iommu_group_set;
 	int i;
+	int j;
 	bool is_secure = (flags & SMEM_SECURE);
 	struct iommu_info *iommu_map;
+	if (!domain_num || !partition_num) {
+		dprintk(VIDC_DBG, "passed null to get domain partition!");
+		return -EINVAL;
+	}
 
 	*domain_num = -1;
 	*partition_num = -1;
@@ -512,14 +517,14 @@
 
 	for (i = 0; i < iommu_group_set->count; i++) {
 		iommu_map = &iommu_group_set->iommu_maps[i];
-		if ((iommu_map->is_secure == is_secure) &&
-			(iommu_map->buffer_type & buffer_type)) {
-			*domain_num = iommu_map->domain;
-			*partition_num = 0;
-			if ((buffer_type & HAL_BUFFER_INTERNAL_CMD_QUEUE) &&
-				(iommu_map->npartitions == 2))
-				*partition_num = 1;
-			break;
+		if (iommu_map->is_secure == is_secure) {
+			for (j = 0; j < iommu_map->npartitions; j++) {
+				if (iommu_map->buffer_type[j] & buffer_type) {
+					*domain_num = iommu_map->domain;
+					*partition_num = j;
+					break;
+				}
+			}
 		}
 	}
 	dprintk(VIDC_DBG, "domain: %d, partition: %d found!\n",
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 7897068..cddca74 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -683,6 +683,18 @@
 		.qmenu = perf_level,
 		.step = 0,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+		.name = "Intra Refresh CIR MBS",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_INTRA_REFRESH_MBS,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.cluster = MSM_VENC_CTRL_CLUSTER_INTRA_REFRESH,
+	}
 };
 
 #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1641,8 +1653,8 @@
 		air_ref = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF);
 		cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS);
 
-		property_id =
-			HAL_PARAM_VENC_INTRA_REFRESH;
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
 		intra_refresh.air_mbs = ctrl->val;
 		intra_refresh.mode = ir_mode->val;
 		intra_refresh.air_ref = air_ref->val;
@@ -1658,8 +1670,8 @@
 		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
 		cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS);
 
-		property_id =
-			HAL_PARAM_VENC_INTRA_REFRESH;
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
 		intra_refresh.air_ref = ctrl->val;
 		intra_refresh.air_mbs = air_mbs->val;
 		intra_refresh.mode = ir_mode->val;
@@ -1676,8 +1688,7 @@
 		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
 		air_ref = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF);
 
-		property_id =
-			HAL_PARAM_VENC_INTRA_REFRESH;
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
 
 		intra_refresh.cir_mbs = ctrl->val;
 		intra_refresh.air_mbs = air_mbs->val;
@@ -1687,6 +1698,22 @@
 		pdata = &intra_refresh;
 		break;
 	}
+	case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: {
+		struct v4l2_ctrl *air_mbs, *air_ref;
+
+		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
+		air_ref = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF);
+
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
+		intra_refresh.cir_mbs = ctrl->val;
+		intra_refresh.air_mbs = air_mbs->val;
+		intra_refresh.air_ref = air_ref->val;
+		intra_refresh.mode = HAL_INTRA_REFRESH_CYCLIC;
+
+		pdata = &intra_refresh;
+		break;
+	}
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
 		property_id =
 			HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 57b98dc..f94b6f1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -627,6 +627,8 @@
 				if (inst->core)
 					hdev = inst->core->device;
 				if (hdev && inst->session) {
+					dprintk(VIDC_DBG,
+					"cleaning up inst: 0x%p", inst);
 					rc = call_hfi_op(hdev, session_clean,
 						(void *) inst->session);
 					if (rc)
@@ -693,10 +695,24 @@
 {
 	struct msm_vidc_cb_cmd_done *response = data;
 	struct msm_vidc_inst *inst;
+	struct hfi_device *hdev = NULL;
+
 	if (response) {
 		inst = (struct msm_vidc_inst *)response->session_id;
-		signal_session_msg_receipt(cmd, inst);
+		if (!inst || !inst->core || !inst->core->device) {
+			dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+			return;
+		}
+		hdev = inst->core->device;
+		mutex_lock(&inst->lock);
+		if (inst->session) {
+			dprintk(VIDC_DBG, "cleaning up inst: 0x%p", inst);
+			call_hfi_op(hdev, session_clean,
+				(void *) inst->session);
+		}
 		inst->session = NULL;
+		mutex_unlock(&inst->lock);
+		signal_session_msg_receipt(cmd, inst);
 		show_stats(inst);
 	} else {
 		dprintk(VIDC_ERR,
@@ -737,22 +753,45 @@
 	struct msm_vidc_cb_data_done *response = data;
 	struct vb2_buffer *vb;
 	struct msm_vidc_inst *inst;
+	struct vidc_hal_ebd *empty_buf_done;
+
 	if (!response) {
 		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
 		return;
 	}
 	vb = response->clnt_data;
 	inst = (struct msm_vidc_inst *)response->session_id;
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s Invalid response from vidc_hal\n",
+			__func__);
+		return;
+	}
 	if (vb) {
 		vb->v4l2_planes[0].bytesused = response->input_done.filled_len;
 		vb->v4l2_planes[0].data_offset = response->input_done.offset;
 		if (vb->v4l2_planes[0].data_offset > vb->v4l2_planes[0].length)
-			dprintk(VIDC_ERR, "Error: data_offset overflow\n");
+			dprintk(VIDC_INFO, "data_offset overflow length\n");
 		if (vb->v4l2_planes[0].bytesused > vb->v4l2_planes[0].length)
-			dprintk(VIDC_ERR, "Error: buffer overflow\n");
+			dprintk(VIDC_INFO, "bytesused overflow length\n");
 		if ((u8 *)vb->v4l2_planes[0].m.userptr !=
 			response->input_done.packet_buffer)
-			dprintk(VIDC_ERR, "Error: unexpected buffer address\n");
+			dprintk(VIDC_INFO, "Unexpected buffer address\n");
+		vb->v4l2_buf.flags = 0;
+		empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+		if (empty_buf_done) {
+			if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
+				dprintk(VIDC_INFO,
+					"Failed : Unsupported input stream\n");
+				vb->v4l2_buf.flags |=
+					V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
+			}
+			if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
+				dprintk(VIDC_INFO,
+					"Failed : Corrupted input stream\n");
+				vb->v4l2_buf.flags |=
+					V4L2_QCOM_BUF_DATA_CORRUPT;
+			}
+		}
 		mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
 		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 		mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index cb08da7..394ecdc5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -386,65 +386,56 @@
 {
 	int rc = 0;
 	struct platform_device *pdev = res->pdev;
-	struct device_node *ctx_node;
+	struct device_node *domains_parent_node = NULL;
+	struct device_node *domains_child_node = NULL;
 	struct iommu_set *iommu_group_set = &res->iommu_group_set;
-	int array_size;
-	int i;
+	int domain_idx = 0;
 	struct iommu_info *iommu_map;
-	u32 *buffer_types = NULL;
+	int array_size = 0;
 
-	if (!of_get_property(pdev->dev.of_node, "qcom,iommu-groups",
-				&array_size)) {
-		dprintk(VIDC_DBG, "iommu_groups property not present\n");
-		iommu_group_set->count = 0;
+	domains_parent_node = of_find_node_by_name(pdev->dev.of_node,
+				"qcom,vidc-iommu-domains");
+	if (!domains_parent_node) {
+		dprintk(VIDC_DBG, "Node qcom,vidc-iommu-domains not found.\n");
 		return 0;
 	}
 
-	iommu_group_set->count = array_size / sizeof(u32);
+	iommu_group_set->count = 0;
+	for_each_child_of_node(domains_parent_node, domains_child_node) {
+		iommu_group_set->count++;
+	}
+
 	if (iommu_group_set->count == 0) {
-		dprintk(VIDC_ERR, "No group present in iommu_groups\n");
+		dprintk(VIDC_ERR, "No group present in iommu_domains\n");
 		rc = -ENOENT;
 		goto err_no_of_node;
 	}
-
 	iommu_group_set->iommu_maps = kzalloc(iommu_group_set->count *
 			sizeof(*(iommu_group_set->iommu_maps)), GFP_KERNEL);
+
 	if (!iommu_group_set->iommu_maps) {
-		dprintk(VIDC_ERR, "%s Failed to alloc iommu_maps\n",
-			__func__);
+		dprintk(VIDC_ERR, "Cannot allocate iommu_maps\n");
 		rc = -ENOMEM;
 		goto err_no_of_node;
 	}
 
-	buffer_types = kzalloc(iommu_group_set->count * sizeof(*buffer_types),
-				GFP_KERNEL);
-	if (!buffer_types) {
-		dprintk(VIDC_ERR,
-			"%s Failed to alloc iommu group buffer types\n",
-			__func__);
-		rc = -ENOMEM;
-		goto err_load_groups;
-	}
+	/* set up each context bank */
+	for_each_child_of_node(domains_parent_node, domains_child_node) {
+		struct device_node *ctx_node = of_parse_phandle(
+						domains_child_node,
+						"qcom,vidc-domain-phandle",
+						0);
+		if (domain_idx >= iommu_group_set->count)
+			break;
 
-	rc = of_property_read_u32_array(pdev->dev.of_node,
-			"qcom,iommu-group-buffer-types", buffer_types,
-			iommu_group_set->count);
-	if (rc) {
-		dprintk(VIDC_ERR,
-		    "%s Failed to read iommu group buffer types\n", __func__);
-		goto err_load_groups;
-	}
-
-	for (i = 0; i < iommu_group_set->count; i++) {
-		iommu_map = &iommu_group_set->iommu_maps[i];
-		ctx_node = of_parse_phandle(pdev->dev.of_node,
-				"qcom,iommu-groups", i);
+		iommu_map = &iommu_group_set->iommu_maps[domain_idx];
 		if (!ctx_node) {
-			dprintk(VIDC_ERR, "Unable to parse phandle : %u\n", i);
+			dprintk(VIDC_ERR, "Unable to parse pHandle\n");
 			rc = -EBADHANDLE;
 			goto err_load_groups;
 		}
 
+		/* domain info from domains.dtsi */
 		rc = of_property_read_string(ctx_node, "label",
 				&(iommu_map->name));
 		if (rc) {
@@ -452,6 +443,11 @@
 			goto err_load_groups;
 		}
 
+		dprintk(VIDC_DBG,
+				"domain %d has name %s\n",
+				domain_idx,
+				iommu_map->name);
+
 		if (!of_get_property(ctx_node, "qcom,virtual-addr-pool",
 				&array_size)) {
 			dprintk(VIDC_ERR,
@@ -463,25 +459,48 @@
 
 		iommu_map->npartitions = array_size / sizeof(u32) / 2;
 
+		dprintk(VIDC_DBG,
+				"%d partitions in domain %d",
+				iommu_map->npartitions,
+				domain_idx);
+
 		rc = of_property_read_u32_array(ctx_node,
 				"qcom,virtual-addr-pool",
 				(u32 *)iommu_map->addr_range,
 				iommu_map->npartitions * 2);
 		if (rc) {
 			dprintk(VIDC_ERR,
-				"Could not read addr pool for group : %s\n",
-				iommu_map->name);
+				"Could not read addr pool for group : %s (%d)\n",
+				iommu_map->name,
+				rc);
 			goto err_load_groups;
 		}
 
-		iommu_map->buffer_type = buffer_types[i];
 		iommu_map->is_secure =
 			of_property_read_bool(ctx_node,	"qcom,secure-domain");
+
+		dprintk(VIDC_DBG,
+				"domain %s : secure = %d",
+				iommu_map->name,
+				iommu_map->is_secure);
+
+		/* setup partitions and buffer type per partition */
+		rc = of_property_read_u32_array(domains_child_node,
+				"qcom,vidc-partition-buffer-types",
+				iommu_map->buffer_type,
+				iommu_map->npartitions);
+
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"cannot load partition buffertype information (%d)",
+					rc);
+			rc = -ENOENT;
+			goto err_load_groups;
+		}
+		domain_idx++;
 	}
-	kfree(buffer_types);
-	return 0;
+	return rc;
 err_load_groups:
-	kfree(buffer_types);
 	msm_vidc_free_iommu_groups(res);
 err_no_of_node:
 	return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 43af909..8176ea5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -16,6 +16,7 @@
 
 #include <linux/platform_device.h>
 #include <media/msm_vidc.h>
+#define MAX_BUFFER_TYPES 32
 
 struct load_freq_table {
 	u32 load;
@@ -39,11 +40,11 @@
 
 struct iommu_info {
 	const char *name;
-	u32 buffer_type;
+	u32 buffer_type[MAX_BUFFER_TYPES];
 	struct iommu_group *group;
 	int domain;
 	bool is_secure;
-	struct addr_range addr_range[2];
+	struct addr_range addr_range[MAX_BUFFER_TYPES];
 	int npartitions;
 };
 
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 47b88db..5416210 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -2804,6 +2804,9 @@
 				iommu_map->addr_range[0].start;
 			memprot.cp_nonpixel_size =
 				iommu_map->addr_range[0].size;
+		} else if (strcmp(iommu_map->name, "venus_cp") == 0) {
+			memprot.cp_nonpixel_start =
+				iommu_map->addr_range[1].start;
 		}
 	}
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 5c22552..010f15d 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -936,6 +936,7 @@
 	u32 timestamp_hi;
 	u32 timestamp_lo;
 	u32 flags;
+	u32 status;
 	u32 mark_target;
 	u32 mark_data;
 	u32 stats;
diff --git a/drivers/media/platform/msm/wfd/vsg-subdev.c b/drivers/media/platform/msm/wfd/vsg-subdev.c
index 6ffaffa..0f2fbbb 100644
--- a/drivers/media/platform/msm/wfd/vsg-subdev.c
+++ b/drivers/media/platform/msm/wfd/vsg-subdev.c
@@ -424,7 +424,8 @@
 			struct timespec diff = timespec_sub(buf_info->time,
 					context->last_buffer->time);
 			struct timespec temp = ns_to_timespec(
-						context->frame_interval);
+					context->frame_interval -
+					context->frame_interval_variance);
 
 			if (timespec_compare(&diff, &temp) >= 0)
 				push = true;
@@ -633,6 +634,61 @@
 	return 0;
 }
 
+static long vsg_set_frame_interval_variance(struct v4l2_subdev *sd, void *arg)
+{
+	struct vsg_context *context = NULL;
+	int64_t variance;
+
+	if (!arg || !sd) {
+		WFD_MSG_ERR("ERROR, invalid arguments into %s\n", __func__);
+		return -EINVAL;
+	}
+
+	context = (struct vsg_context *)sd->dev_priv;
+	variance = *(int64_t *)arg;
+
+	if (variance < 0 || variance > 100) {
+		WFD_MSG_ERR("ERROR, invalid variance %lld%% into %s\n",
+				variance, __func__);
+		return -EINVAL;
+	} else if (context->mode == VSG_MODE_CFR) {
+		WFD_MSG_ERR("Setting FPS variance not supported in CFR mode\n");
+		return -ENOTSUPP;
+	}
+
+	mutex_lock(&context->mutex);
+
+	/* Convert from percentage to a value in nano seconds */
+	variance *= context->frame_interval;
+	do_div(variance, 100);
+
+	context->frame_interval_variance = variance;
+	mutex_unlock(&context->mutex);
+
+	return 0;
+}
+
+static long vsg_get_frame_interval_variance(struct v4l2_subdev *sd, void *arg)
+{
+	struct vsg_context *context = NULL;
+	int64_t variance;
+
+	if (!arg || !sd) {
+		WFD_MSG_ERR("ERROR, invalid arguments into %s\n", __func__);
+		return -EINVAL;
+	}
+
+	context = (struct vsg_context *)sd->dev_priv;
+
+	mutex_lock(&context->mutex);
+	variance = context->frame_interval_variance * 100;
+	do_div(variance, context->frame_interval);
+	*(int64_t *)arg = variance;
+	mutex_unlock(&context->mutex);
+
+	return 0;
+}
+
 static long vsg_set_mode(struct v4l2_subdev *sd, void *arg)
 {
 	struct vsg_context *context = NULL;
@@ -702,6 +758,12 @@
 	case VSG_SET_FRAME_INTERVAL:
 		rc = vsg_set_frame_interval(sd, arg);
 		break;
+	case VSG_SET_FRAME_INTERVAL_VARIANCE:
+		rc = vsg_set_frame_interval_variance(sd, arg);
+		break;
+	case VSG_GET_FRAME_INTERVAL_VARIANCE:
+		rc = vsg_get_frame_interval_variance(sd, arg);
+		break;
 	case VSG_GET_MAX_FRAME_INTERVAL:
 		rc = vsg_get_max_frame_interval(sd, arg);
 		break;
diff --git a/drivers/media/platform/msm/wfd/vsg-subdev.h b/drivers/media/platform/msm/wfd/vsg-subdev.h
index f5e4f5d..3347e5b 100644
--- a/drivers/media/platform/msm/wfd/vsg-subdev.h
+++ b/drivers/media/platform/msm/wfd/vsg-subdev.h
@@ -59,7 +59,7 @@
 	struct vsg_buf_info	free_queue, busy_queue;
 	struct vsg_msg_ops vmops;
 	/* All time related values below in nanosecs */
-	int64_t frame_interval, max_frame_interval;
+	int64_t frame_interval, max_frame_interval, frame_interval_variance;
 	struct workqueue_struct *work_queue;
 	struct hrtimer threshold_timer;
 	struct mutex mutex;
@@ -90,9 +90,11 @@
 /* Time related arguments for frame interval ioctls are always in nanosecs*/
 #define VSG_SET_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 9, int64_t *)
 #define VSG_GET_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 10, int64_t *)
-#define VSG_SET_MAX_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 11, int64_t *)
-#define VSG_GET_MAX_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 12, int64_t *)
-#define VSG_SET_MODE _IOW(VSG_MAGIC_IOCTL, 13, enum vsg_modes *)
+#define VSG_SET_FRAME_INTERVAL_VARIANCE _IOW(VSG_MAGIC_IOCTL, 11, int64_t *)
+#define VSG_GET_FRAME_INTERVAL_VARIANCE _IOR(VSG_MAGIC_IOCTL, 12, int64_t *)
+#define VSG_SET_MAX_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 13, int64_t *)
+#define VSG_GET_MAX_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 14, int64_t *)
+#define VSG_SET_MODE _IOW(VSG_MAGIC_IOCTL, 15, enum vsg_modes *)
 
 extern int vsg_init(struct v4l2_subdev *sd, u32 val);
 extern long vsg_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index 6554947..58e008d 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -1124,7 +1124,9 @@
 	struct wfd_device *wfd_dev = video_drvdata(filp);
 	struct wfd_inst *inst = file_to_inst(filp);
 	struct v4l2_qcom_frameskip frameskip;
-	int64_t frame_interval, max_frame_interval;
+	int64_t frame_interval = 0,
+		max_frame_interval = 0,
+		frame_interval_variance = 0;
 	void *extendedmode = NULL;
 	enum vsg_modes vsg_mode = VSG_MODE_VFR;
 	enum venc_framerate_modes venc_mode = VENC_MODE_VFR;
@@ -1177,6 +1179,7 @@
 			goto set_parm_fail;
 
 		max_frame_interval = (int64_t)frameskip.maxframeinterval;
+		frame_interval_variance = frameskip.fpsvariance;
 		vsg_mode = VSG_MODE_VFR;
 		venc_mode = VENC_MODE_VFR;
 
@@ -1206,6 +1209,16 @@
 		goto set_parm_fail;
 	}
 
+	if (frame_interval_variance) {
+		rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
+				ioctl, VSG_SET_FRAME_INTERVAL_VARIANCE,
+				&frame_interval_variance);
+		if (rc) {
+			WFD_MSG_ERR("Setting FR variance for VSG failed\n");
+			goto set_parm_fail;
+		}
+	}
+
 set_parm_fail:
 	return rc;
 }
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 7934234..b9eb8f9 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1635,8 +1635,9 @@
 
 	if (status)
 		return;
-	if (radio->mode != FM_CALIB)
+	if ((radio->mode != FM_CALIB) && (radio->mode != FM_OFF))
 		iris_q_event(radio, IRIS_EVT_RADIO_DISABLED);
+	radio->mode = FM_OFF;
 
 	radio_hci_req_complete(hdev, status);
 }
@@ -2694,7 +2695,7 @@
 			radio->fm_hdev);
 	if (retval < 0)
 		FMDERR("Disable Failed after calibration %d", retval);
-	radio->mode = FM_OFF;
+	radio->mode = FM_TURNING_OFF;
 	return retval;
 }
 static int iris_vidioc_g_ctrl(struct file *file, void *priv,
@@ -3240,7 +3241,7 @@
 						   " %d\n", retval);
 					return retval;
 				}
-				radio->mode = FM_OFF;
+				radio->mode = FM_TURNING_OFF;
 				break;
 			case FM_TRANS:
 				retval = hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
@@ -3251,7 +3252,7 @@
 						" %d\n", retval);
 					return retval;
 				}
-				radio->mode = FM_OFF;
+				radio->mode = FM_TURNING_OFF;
 				break;
 			default:
 				retval = -EINVAL;
@@ -4042,16 +4043,18 @@
 	if (radio->mode == FM_OFF)
 		return 0;
 
-	if (radio->mode == FM_RECV)
+	if (radio->mode == FM_RECV) {
+		radio->mode = FM_OFF;
 		retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
 						radio->fm_hdev);
-	else if (radio->mode == FM_TRANS)
+	} else if (radio->mode == FM_TRANS) {
+		radio->mode = FM_OFF;
 		retval = hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
 					radio->fm_hdev);
+	}
 	if (retval < 0)
 		FMDERR("Err on disable FM %d\n", retval);
 
-	radio->mode = FM_OFF;
 	return retval;
 }
 
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 3d0abce..1eebe61 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -470,7 +470,7 @@
 	found = wcd9xxx_check_codec_type(wcd9xxx, &version);
 	if (!found) {
 		ret = -ENODEV;
-		goto err_irq;
+		goto err;
 	} else {
 		wcd9xxx->codec_type = found;
 		wcd9xxx->version = version;
diff --git a/drivers/misc/qpnp-misc.c b/drivers/misc/qpnp-misc.c
index 608be81..6ad4816 100644
--- a/drivers/misc/qpnp-misc.c
+++ b/drivers/misc/qpnp-misc.c
@@ -21,7 +21,8 @@
 
 #define QPNP_MISC_DEV_NAME "qcom,qpnp-misc"
 
-#define REVID_REVISION2	0x1
+#define REG_DIG_MAJOR_REV	0x01
+#define REG_SUBTYPE		0x05
 
 static DEFINE_MUTEX(qpnp_misc_dev_list_mutex);
 static LIST_HEAD(qpnp_misc_dev_list);
@@ -45,6 +46,11 @@
 	struct spmi_device		*spmi;
 };
 
+struct qpnp_misc_version {
+	u8				subtype;
+	u8				dig_major_rev;
+};
+
 static struct of_device_id qpnp_misc_match_table[] = {
 	{ .compatible = QPNP_MISC_DEV_NAME },
 	{}
@@ -63,17 +69,28 @@
 	return val;
 }
 
-#define REV2_IRQ_AVAILABLE_VERSION	2
+static struct qpnp_misc_version irq_support_version[] = {
+	{0x01, 0x02}, /* PM8941 */
+	{0x07, 0x00}, /* PM8226 */
+	{0x09, 0x00}, /* PMA8084 */
+};
+
 static bool __misc_irqs_available(struct qpnp_misc_dev *dev)
 {
-	u8 rev2;
+	int i;
+	u8 subtype, dig_major_rev;
 
-	rev2 = qpnp_read_byte(dev->spmi,
-		dev->resource->start + REVID_REVISION2);
-	pr_debug("rev2 0x%x\n", rev2);
+	subtype = qpnp_read_byte(dev->spmi, dev->resource->start + REG_SUBTYPE);
+	pr_debug("subtype = 0x%02X\n", subtype);
 
-	if (rev2 >= REV2_IRQ_AVAILABLE_VERSION)
-		return 1;
+	dig_major_rev = qpnp_read_byte(dev->spmi,
+		dev->resource->start + REG_DIG_MAJOR_REV);
+	pr_debug("dig_major rev = 0x%02X\n", dig_major_rev);
+
+	for (i = 0; i < ARRAY_SIZE(irq_support_version); i++)
+		if (subtype == irq_support_version[i].subtype
+		    && dig_major_rev >= irq_support_version[i].dig_major_rev)
+			return 1;
 
 	return 0;
 }
@@ -84,6 +101,11 @@
 	struct qpnp_misc_dev *mdev = NULL;
 	struct qpnp_misc_dev *mdev_found = NULL;
 
+	if (IS_ERR_OR_NULL(consumer_dev)) {
+		pr_err("Invalid consumer device pointer\n");
+		return -EINVAL;
+	}
+
 	misc_node = of_parse_phandle(consumer_dev->of_node, "qcom,misc-ref", 0);
 	if (!misc_node) {
 		pr_debug("Could not find qcom,misc-ref property in %s\n",
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index bd838fc..0987b6b 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -890,8 +890,8 @@
 	data->type = QSEECOM_SECURE_SERVICE;
 
 	switch (req.cmd_id) {
-	case QSEE_RPMB_PROVISION_KEY_COMMAND:
-	case QSEE_RPMB_ERASE_COMMAND:
+	case QSEOS_RPMB_PROVISION_KEY_COMMAND:
+	case QSEOS_RPMB_ERASE_COMMAND:
 		if (__qseecom_process_rpmb_svc_cmd(data, &req,
 				&send_svc_ireq))
 			return -EINVAL;
@@ -2647,6 +2647,11 @@
 	struct qseecom_dev_handle *data = file->private_data;
 	void __user *argp = (void __user *) arg;
 
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
 	if (data->abort) {
 		pr_err("Aborting qseecom driver\n");
 		return -ENODEV;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index a990f43..b140510 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -416,7 +416,7 @@
 		if (ret)
 			pr_err("%s: %s: failed setting runtime active: ret: %d\n",
 			       mmc_hostname(card->host), __func__, ret);
-		else
+		else if (!mmc_card_sdio(card))
 			pm_runtime_enable(&card->dev);
 	}
 
@@ -424,7 +424,7 @@
 	if (ret)
 		return ret;
 
-	if (mmc_use_core_runtime_pm(card->host)) {
+	if (mmc_use_core_runtime_pm(card->host) && !mmc_card_sdio(card)) {
 		card->rpm_attrib.show = show_rpm_delay;
 		card->rpm_attrib.store = store_rpm_delay;
 		sysfs_attr_init(&card->rpm_attrib.attr);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3495f4d..96f8f2e 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2472,6 +2472,7 @@
 	msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
 	msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
 	msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
+	msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
 
 	if (msm_host->pdata->nonremovable)
 		msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
diff --git a/drivers/net/ethernet/msm/msm_rmnet_bam.c b/drivers/net/ethernet/msm/msm_rmnet_bam.c
index 83f486c..44f86dd 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_bam.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_bam.c
@@ -55,15 +55,16 @@
 #define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
 
 /* Configure device instances */
-#define RMNET_DEVICE_COUNT (8)
+#define RMNET_DEVICE_COUNT  9
 
 /* allow larger frames */
 #define RMNET_DATA_LEN 2000
 
 #define DEVICE_ID_INVALID   -1
 
-#define DEVICE_INACTIVE      0
+#define DEVICE_INACTIVE      2
 #define DEVICE_ACTIVE        1
+#define DEVICE_UNINITIALIZED 0
 
 #define HEADROOM_FOR_BAM   8 /* for mux header */
 #define HEADROOM_FOR_QOS    8
@@ -85,6 +86,7 @@
 	u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
 	uint8_t device_up;
 	uint8_t in_reset;
+	struct platform_driver *bam_pdev;
 };
 
 #ifdef CONFIG_MSM_RMNET_DEBUG
@@ -393,7 +395,7 @@
 
 	DBG0("[%s] __rmnet_open()\n", dev->name);
 
-	if (!p->device_up) {
+	if (p->device_up == DEVICE_UNINITIALIZED) {
 		r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
 
 		if (r < 0) {
@@ -401,6 +403,14 @@
 					__func__, p->ch_id, r);
 			return -ENODEV;
 		}
+
+		r = platform_driver_register(p->bam_pdev);
+		if (r) {
+			pr_err("%s: bam pdev registration failed n=%d rc=%d\n",
+					__func__, p->ch_id, r);
+			msm_bam_dmux_close(p->ch_id);
+			return r;
+		}
 	}
 
 	p->device_up = DEVICE_ACTIVE;
@@ -427,7 +437,7 @@
 	struct rmnet_private *p = netdev_priv(dev);
 	int rc = 0;
 
-	if (p->device_up) {
+	if (p->device_up == DEVICE_ACTIVE) {
 		/* do not close rmnet port once up,  this causes
 		   remote side to hang if tried to open again */
 		p->device_up = DEVICE_INACTIVE;
@@ -711,6 +721,11 @@
 			break;
 	}
 
+	if (i >= RMNET_DEVICE_COUNT) {
+		pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
+		return -ENODEV;
+	}
+
 	p = netdev_priv(netdevs[i]);
 	if (p->in_reset) {
 		p->in_reset = 0;
@@ -766,7 +781,7 @@
 
 	if (i >= RMNET_REV_DEVICE_COUNT) {
 		pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
-		return 0;
+		return -ENODEV;
 	}
 
 	p = netdev_priv(netdevs_rev[i]);
@@ -871,8 +886,13 @@
 #endif
 
 	for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+		const char *dev_name = "rmnet%d";
+
+		if (n == BAM_DMUX_USB_RMNET_0)
+			dev_name = "rmnet_usb%d";
+
 		dev = alloc_netdev(sizeof(struct rmnet_private),
-				   "rmnet%d", rmnet_setup);
+				   dev_name, rmnet_setup);
 
 		if (!dev) {
 			pr_err("%s: no memory for netdev %d\n", __func__, n);
@@ -887,6 +907,7 @@
 		p->ch_id = n;
 		p->waiting_for_ul_skb = NULL;
 		p->in_reset = 0;
+		p->device_up = DEVICE_UNINITIALIZED;
 		spin_lock_init(&p->lock);
 		spin_lock_init(&p->tx_queue_lock);
 #ifdef CONFIG_MSM_RMNET_DEBUG
@@ -898,6 +919,7 @@
 		if (ret) {
 			pr_err("%s: unable to register netdev"
 				   " %d rc=%d\n", __func__, n, ret);
+			netdevs[n] = NULL;
 			free_netdev(dev);
 			return ret;
 		}
@@ -921,18 +943,16 @@
 		bam_rmnet_drivers[n].probe = bam_rmnet_probe;
 		bam_rmnet_drivers[n].remove = bam_rmnet_remove;
 		tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
-		if (tempname == NULL)
-			return -ENOMEM;
+		if (tempname == NULL) {
+			netdevs[n] = NULL;
+			ret = -ENOMEM;
+			goto error;
+		}
 		scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
 									n);
 		bam_rmnet_drivers[n].driver.name = tempname;
 		bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
-		ret = platform_driver_register(&bam_rmnet_drivers[n]);
-		if (ret) {
-			pr_err("%s: registration failed n=%d rc=%d\n",
-					__func__, n, ret);
-			return ret;
-		}
+		p->bam_pdev = &bam_rmnet_drivers[n];
 	}
 	/*Support for new rmnet ports */
 	for (n = 0; n < RMNET_REV_DEVICE_COUNT; n++) {
@@ -953,6 +973,7 @@
 		p->ch_id = n+BAM_DMUX_DATA_REV_RMNET_0;
 		p->waiting_for_ul_skb = NULL;
 		p->in_reset = 0;
+		p->device_up = DEVICE_UNINITIALIZED;
 		spin_lock_init(&p->lock);
 		spin_lock_init(&p->tx_queue_lock);
 
@@ -960,6 +981,7 @@
 		if (ret) {
 			pr_err("%s: unable to register rev netdev %d rc=%d\n",
 							__func__, n, ret);
+			netdevs_rev[n] = NULL;
 			free_netdev(dev);
 			return ret;
 		}
@@ -968,20 +990,23 @@
 		bam_rmnet_rev_drivers[n].probe = bam_rmnet_rev_probe;
 		bam_rmnet_rev_drivers[n].remove = bam_rmnet_rev_remove;
 		tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
-		if (tempname == NULL)
-			return -ENOMEM;
+		if (tempname == NULL) {
+			netdevs_rev[n] = NULL;
+			ret = -ENOMEM;
+			goto error;
+		}
 		scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
 					(n+BAM_DMUX_DATA_REV_RMNET_0));
 		bam_rmnet_rev_drivers[n].driver.name = tempname;
 		bam_rmnet_rev_drivers[n].driver.owner = THIS_MODULE;
-		ret = platform_driver_register(&bam_rmnet_rev_drivers[n]);
-		if (ret) {
-			pr_err("%s: new rev driver registration failed n=%d rc=%d\n",
-					__func__, n, ret);
-			return ret;
-		}
+		p->bam_pdev = &bam_rmnet_rev_drivers[n];
 	}
 	return 0;
+
+error:
+	unregister_netdev(dev);
+	free_netdev(dev);
+	return ret;
 }
 
 module_init(rmnet_init);
diff --git a/drivers/nfc/nfc-nci.c b/drivers/nfc/nfc-nci.c
index e832716..87c7c30 100644
--- a/drivers/nfc/nfc-nci.c
+++ b/drivers/nfc/nfc-nci.c
@@ -49,6 +49,7 @@
 #define MAX_QCA_REG		(116)
 
 static int nfc_i2c_write(struct i2c_client *client, u8 *buf, int len);
+static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr);
 
 struct qca199x_dev {
 	wait_queue_head_t read_wq;
@@ -167,6 +168,8 @@
 		count = MAX_BUFFER_SIZE;
 
 	mutex_lock(&qca199x_dev->read_mutex);
+	memset(tmp, 0, sizeof(tmp));
+	memset(len, 0, sizeof(len));
 	dmode = device_mode.handle_flavour;
 	/* FTM-RAW-I2C RD/WR MODE - Special Case */
 	if ((dmode == UNSOLICITED_FTM_RAW_MODE) ||
@@ -199,20 +202,25 @@
 	/* NORMAL NCI Behaviour */
 	/* Read the header */
 	ret = i2c_master_recv(qca199x_dev->client, len, PAYLOAD_HEADER_LENGTH);
-	if (ret != PAYLOAD_HEADER_LENGTH)
+	if (ret != PAYLOAD_HEADER_LENGTH) {
+		total = 0;
 		goto err;
+	}
 	length = len[PAYLOAD_HEADER_LENGTH - 1];
-
+	if (length == 0)
+		total = 0;
 	/** make sure full packet fits in the buffer **/
 	if ((length > 0) && ((length + PAYLOAD_HEADER_LENGTH) <= count)) {
 		/* Read the packet */
 		ret = i2c_master_recv(qca199x_dev->client, tmp, (length +
 			PAYLOAD_HEADER_LENGTH));
-		if (ret < 0)
+		if (ret < 0) {
+			total = 0;
 			goto err;
+		}
 		total = (length + PAYLOAD_HEADER_LENGTH);
 	}
-	mutex_unlock(&qca199x_dev->read_mutex);
+
 	if (total > 0) {
 		if ((total > count) || copy_to_user(buf, tmp, total)) {
 			dev_err(&qca199x_dev->client->dev,
@@ -221,6 +229,7 @@
 			total = -EFAULT;
 		}
 	}
+	mutex_unlock(&qca199x_dev->read_mutex);
 err:
 	if (ret < 0)
 		mutex_unlock(&qca199x_dev->read_mutex);
@@ -375,6 +384,7 @@
 			goto err_req;
 		}
 		gpio_set_value(qca199x_dev->dis_gpio, 0);
+		msleep(20);
 	} else if (arg == 1) {
 		gpio_set_value(qca199x_dev->dis_gpio, 0);
 		r = gpio_direction_output(qca199x_dev->dis_gpio, 1);
@@ -385,12 +395,20 @@
 			goto err_req;
 		}
 		gpio_set_value(qca199x_dev->dis_gpio, 1);
+		usleep(1000);
 	} else if (arg == 2) {
-		msleep(20);
+		r = nfcc_initialise(qca199x_dev->client, 0xE);
+		if (r) {
+			dev_err(&qca199x_dev->client->dev,
+					"nfc-nci probe: request nfcc initialise failed\n");
+			goto err_req;
+		}
 	} else if (arg == 3) {
 		msleep(20);
 	} else if (arg == 4) {
+		mutex_lock(&qca199x_dev->read_mutex);
 		nfcc_wake(NFCC_WAKE, filp);
+		mutex_unlock(&qca199x_dev->read_mutex);
 	} else if (arg == 5) {
 		nfcc_wake(NFCC_SLEEP, filp);
 	} else {
@@ -568,7 +586,7 @@
 	return r;
 }
 
-int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr)
+static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr)
 {
 	int r = 0;
 	unsigned char raw_1p8_CONTROL_011[]	= {0x11, XTAL_CLOCK};
@@ -764,9 +782,9 @@
 		dev_err(&client->dev, "dis gpio not provided\n");
 		goto err_irq;
 	}
-	gpio_set_value(qca199x_dev->dis_gpio, 1);
+	gpio_set_value(platform_data->dis_gpio, 1);/* HPD */
 	msleep(20);
-	gpio_set_value(qca199x_dev->dis_gpio, 0);
+	gpio_set_value(platform_data->dis_gpio, 0);/* ULPM */
 
 	nfc_clk  = clk_get(&client->dev, "ref_clk");
 
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
index c2585a7..977a1e0 100644
--- a/drivers/of/of_batterydata.c
+++ b/drivers/of/of_batterydata.c
@@ -157,6 +157,8 @@
 
 #define OF_PROP_READ(property, qpnp_dt_property, node, rc, optional)	\
 do {									\
+	if (rc)								\
+		break;							\
 	rc = of_property_read_u32(node, "qcom," qpnp_dt_property,	\
 					&property);			\
 									\
@@ -166,7 +168,6 @@
 	} else if (rc) {						\
 		pr_err("Error reading " #qpnp_dt_property		\
 				" property rc = %d\n", rc);		\
-		return rc;						\
 	}								\
 } while (0)
 
@@ -234,6 +235,8 @@
 	node = batterydata_container_node;
 	OF_PROP_READ(rpull_up_kohm, "rpull-up-kohm", node, rc, false);
 	OF_PROP_READ(vadc_vdd_uv, "vref-batt-therm", node, rc, false);
+	if (rc)
+		return rc;
 
 	batt_id_kohm = of_batterydata_convert_battery_id_kohm(batt_id_uv,
 					rpull_up_kohm, vadc_vdd_uv);
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index efb78e5..e6493b1 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -1,6 +1,10 @@
 #
 # Makefile for the MSM specific device drivers.
 #
+
+ccflags-y += -Idrivers/misc/
+
+
 obj-$(CONFIG_MSM_SSBI) += ssbi.o
 obj-$(CONFIG_USB_BAM) += usb_bam.o
 obj-$(CONFIG_IPA) += ipa/
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 897dc2a..9471188 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -30,7 +30,10 @@
 	"PM8841",
 	"PM8019",
 	"PM8226",
-	"PM8110"
+	"PM8110",
+	"PMA8084",
+	"PMI8962",
+	"PMD9635",
 };
 
 static struct of_device_id qpnp_revid_match_table[] = {
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 6ccfd29..4f10cf8 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -81,6 +81,7 @@
 
 u32 d_type;
 bool enhd_pipe;
+bool imem;
 
 static void sps_device_de_init(void);
 
@@ -2430,13 +2431,16 @@
 
 	resource = platform_get_resource(pdev, IORESOURCE_MEM, 2);
 	if (resource) {
+		imem = true;
 		sps->pipemem_phys_base = resource->start;
 		sps->pipemem_size = resource_size(resource);
 		SPS_DBG("sps:pipemem.base=0x%x,size=0x%x.",
 			sps->pipemem_phys_base,
 			sps->pipemem_size);
-	} else
+	} else {
+		imem = false;
 		SPS_DBG("sps:No pipe memory on this target.\n");
+	}
 
 	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (resource) {
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index d972e7b..bd4328a 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -445,6 +445,10 @@
 	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
 		/* No, so just mark it disabled */
 		dev->state &= ~BAM_STATE_ENABLED;
+		if ((dev->state & BAM_STATE_IRQ) && (dev->props.irq > 0)) {
+			free_irq(dev->props.irq, dev);
+			dev->state &= ~BAM_STATE_IRQ;
+		}
 		return 0;
 	}
 
diff --git a/drivers/platform/msm/sps/sps_mem.c b/drivers/platform/msm/sps/sps_mem.c
index faa1618..34a2001 100644
--- a/drivers/platform/msm/sps/sps_mem.c
+++ b/drivers/platform/msm/sps/sps_mem.c
@@ -109,7 +109,7 @@
 	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
 	int min_alloc_order = 8;
 
-	if ((d_type == 0) || (d_type == 2)) {
+	if ((d_type == 0) || (d_type == 2) || imem) {
 		iomem_phys = pipemem_phys_base;
 		iomem_size = pipemem_size;
 
@@ -136,7 +136,7 @@
 		return -ENOMEM;
 	}
 
-	if ((d_type == 0) || (d_type == 2)) {
+	if ((d_type == 0) || (d_type == 2) || imem) {
 		res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid);
 		if (res)
 			return res;
diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h
index f65fef7..353ece6 100644
--- a/drivers/platform/msm/sps/spsi.h
+++ b/drivers/platform/msm/sps/spsi.h
@@ -50,6 +50,7 @@
 
 extern u32 d_type;
 extern bool enhd_pipe;
+extern bool imem;
 
 #ifdef CONFIG_DEBUG_FS
 extern u8 debugfs_record_enabled;
diff --git a/drivers/platform/msm/ssm.c b/drivers/platform/msm/ssm.c
index 3afb954..1544b14 100644
--- a/drivers/platform/msm/ssm.c
+++ b/drivers/platform/msm/ssm.c
@@ -23,7 +23,6 @@
 #include <linux/mutex.h>
 #include <linux/ion.h>
 #include <linux/types.h>
-#include <linux/firmware.h>
 #include <linux/elf.h>
 #include <linux/platform_device.h>
 #include <linux/msm_ion.h>
@@ -31,45 +30,33 @@
 #include <mach/scm.h>
 #include <mach/msm_smd.h>
 
+#include "qseecom_kernel.h"
 #include "ssm.h"
 
 /* Macros */
 #define SSM_DEV_NAME			"ssm"
 #define MPSS_SUBSYS			0
 #define SSM_INFO_CMD_ID			1
-#define QSEOS_CHECK_VERSION_CMD		0x00001803
-
 #define MAX_APP_NAME_SIZE		32
-#define SSM_MSG_LEN			(104  + 4) /* bytes + pad */
+#define SSM_MSG_LEN			200
 #define SSM_MSG_FIELD_LEN		11
-#define SSM_HEADER_LEN			(SSM_MSG_FIELD_LEN * 4)
-#define ATOM_MSG_LEN			(SSM_HEADER_LEN + SSM_MSG_LEN)
-#define FIRMWARE_NAME			"ssmapp"
-#define TZAPP_NAME			"SsmApp"
-#define CHANNEL_NAME			"SSM_RTR"
+#define ATOM_MSG_LEN			(SSM_MSG_FIELD_LEN + SSM_MSG_LEN + 40)
 
-#define ALIGN_BUFFER(size)		((size + 4095) & ~4095)
+#define TZAPP_NAME			"SsmApp"
+#define CHANNEL_NAME			"SSM_RTR_MODEM_APPS"
 
 /* SSM driver structure.*/
 struct ssm_driver {
-	int32_t app_id;
 	int32_t app_status;
 	int32_t update_status;
-	int32_t atom_replay;
-	int32_t mtoa_replay;
-	uint32_t buff_len;
 	unsigned char *channel_name;
 	unsigned char *smd_buffer;
-	struct ion_client *ssm_ion_client;
-	struct ion_handle *ssm_ion_handle;
-	struct tzapp_get_mode_info_rsp *resp;
 	struct device *dev;
 	smd_channel_t *ch;
-	ion_phys_addr_t buff_phys;
-	void *buff_virt;
-	dev_t ssm_device_no;
 	struct work_struct ipc_work;
 	struct mutex mutex;
+	struct qseecom_handle *qseecom_handle;
+	struct tzapp_get_mode_info_rsp *resp;
 	bool key_status;
 	bool ready;
 };
@@ -87,32 +74,45 @@
 }
 
 /*
+ * Setup CMD/RSP pointers.
+ */
+static void setup_cmd_rsp_buffers(struct qseecom_handle *handle, void **cmd,
+		int *cmd_len, void **resp, int *resp_len)
+{
+	*cmd = handle->sbuf;
+	if (*cmd_len & QSEECOM_ALIGN_MASK)
+		*cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+	*resp = handle->sbuf + *cmd_len;
+	if (*resp_len & QSEECOM_ALIGN_MASK)
+		*resp_len = QSEECOM_ALIGN(*resp_len);
+}
+
+/*
  * Send packet to modem over SMD channel.
  */
 static int update_modem(enum ssm_ipc_req ipc_req, struct ssm_driver *ssm,
 		int length, char *data)
 {
-	unsigned int packet_len = SSM_HEADER_LEN + length + 1;
-	int rc = 0;
+	unsigned int packet_len = length + SSM_MSG_FIELD_LEN;
+	int rc = 0, count;
 
-	ssm->atom_replay += 1;
-	snprintf(ssm->smd_buffer, SSM_HEADER_LEN + 1, "%10u|%10u|%10u|%10u|"
-			, packet_len, ssm->atom_replay, ipc_req, length);
-	memcpy(ssm->smd_buffer + SSM_HEADER_LEN, data, length);
-
-	ssm->smd_buffer[packet_len - 1] = '|';
+	snprintf(ssm->smd_buffer, SSM_MSG_FIELD_LEN + 1, "%10u|", ipc_req);
+	memcpy(ssm->smd_buffer + SSM_MSG_FIELD_LEN, data, length);
 
 	if (smd_write_avail(ssm->ch) < packet_len) {
 		dev_err(ssm->dev, "Not enough space dropping request\n");
 		rc = -ENOSPC;
+		goto out;
 	}
 
-	rc = smd_write(ssm->ch, ssm->smd_buffer, packet_len);
-	if (rc < packet_len) {
+	count = smd_write(ssm->ch, ssm->smd_buffer, packet_len);
+	if (count < packet_len) {
 		dev_err(ssm->dev, "smd_write failed for %d\n", ipc_req);
 		rc = -EIO;
 	}
 
+out:
 	return rc;
 }
 
@@ -120,144 +120,44 @@
  * Header Format
  * Each member of header is of 10 byte (ASCII).
  * Each entry is separated by '|' delimiter.
- * |<-10 bytes->|<-10 bytes->|<-10 bytes->|<-10 bytes->|<-10 bytes->|
- * |-----------------------------------------------------------------
- * | length     | replay no. | request    | msg_len    | message    |
- * |-----------------------------------------------------------------
+ * |<-10 bytes->|<-10 bytes->|
+ * |-------------------------|
+ * | IPC code   | error code |
+ * |-------------------------|
  *
  */
-static int  decode_header(char *buffer, int length,
-		struct ssm_common_msg *pkt)
+static int decode_packet(char *buffer, struct ssm_common_msg *pkt)
 {
 	int rc;
 
-	rc =  getint(buffer, &pkt->pktlen);
-	if (rc < 0)
-		return -EINVAL;
-
-	buffer += SSM_MSG_FIELD_LEN;
-	rc =  getint(buffer, &pkt->replaynum);
-	if (rc < 0)
-		return -EINVAL;
-
-	buffer += SSM_MSG_FIELD_LEN;
 	rc =  getint(buffer, (unsigned long *)&pkt->ipc_req);
 	if (rc < 0)
 		return -EINVAL;
 
 	buffer += SSM_MSG_FIELD_LEN;
-	rc =  getint(buffer, &pkt->msg_len);
-	if ((rc < 0) || (pkt->msg_len > SSM_MSG_LEN))
+	rc =  getint(buffer, (unsigned long *)&pkt->err_code);
+	if (rc < 0)
 		return -EINVAL;
 
-	pkt->msg = buffer + SSM_MSG_FIELD_LEN;
-
-	dev_dbg(ssm_drv->dev, "len %lu rep %lu req %d msg_len %lu\n",
-			pkt->pktlen, pkt->replaynum, pkt->ipc_req,
-			pkt->msg_len);
+	dev_dbg(ssm_drv->dev, "req %d error code %d\n",
+			pkt->ipc_req, pkt->err_code);
 	return 0;
 }
 
-/*
- * Decode address for storing the decryption key.
- * Only for Key Exchange
- * Message Format
- * |Length@Address|
- */
-static int decode_message(char *msg, unsigned int len, unsigned long *length,
-		unsigned long *address)
+static void process_message(struct ssm_common_msg pkt, struct ssm_driver *ssm)
 {
-	int i = 0, rc = 0;
-	char *buff;
 
-	buff = kzalloc(len, GFP_KERNEL);
-	if (!buff)
-		return -ENOMEM;
-	while (i < len) {
-		if (msg[i] == '@')
-			break;
-		i++;
-	}
-	if ((i < len) && (msg[i] == '@')) {
-		memcpy(buff, msg, i);
-		buff[i] = '\0';
-		rc = kstrtoul(skip_spaces(buff), 10, length);
-		if (rc || (length <= 0)) {
-			rc = -EINVAL;
-			goto exit;
-		}
-		memcpy(buff, &msg[i + 1], len - (i + 1));
-		buff[len - i] = '\0';
-		rc = kstrtoul(skip_spaces(buff), 10, address);
-	} else
-		rc = -EINVAL;
-
-exit:
-	kfree(buff);
-	return rc;
-}
-
-static void process_message(int cmd, char *msg, int len,
-		struct ssm_driver *ssm)
-{
-	int rc;
-	unsigned long key_len = 0, key_add = 0, val;
-	struct ssm_keyexchg_req req;
-
-	switch (cmd) {
-	case SSM_MTOA_KEY_EXCHANGE:
-		if (len < 3) {
-			dev_err(ssm->dev, "Invalid message\n");
-			break;
-		}
-
-		if (ssm->key_status) {
-			dev_err(ssm->dev, "Key exchange already done\n");
-			break;
-		}
-
-		rc = decode_message(msg, len, &key_len, &key_add);
-		if (rc) {
-			rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
-					1, "1");
-			break;
-		}
-
-		/*
-		 * We are doing key-exchange part here as it is very
-		 * specific for this case. For all other tz
-		 * communication we have generic function.
-		 */
-		req.ssid = MPSS_SUBSYS;
-		req.address = (void *)key_add;
-		req.length = key_len;
-		req.status = (uint32_t *)ssm->buff_phys;
-
-		*(unsigned int *)ssm->buff_virt = -1;
-		rc = scm_call(KEY_EXCHANGE, 0x1, &req,
-				sizeof(struct ssm_keyexchg_req), NULL, 0);
-		if (rc) {
-			dev_err(ssm->dev, "Call for key exchg failed %d", rc);
-			rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
-								1, "1");
-		} else {
-			/* Success encode packet and update modem */
-			rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
-					1, "0");
-			ssm->key_status = true;
-		}
-		break;
+	switch (pkt.ipc_req) {
 
 	case SSM_MTOA_MODE_UPDATE_STATUS:
-		msg[len] = '\0';
-		rc = kstrtoul(skip_spaces(msg), 10, &val);
-		if (val) {
+		if (pkt.err_code) {
 			dev_err(ssm->dev, "Modem mode update failed\n");
 			ssm->update_status = FAILED;
 		} else
 			ssm->update_status = SUCCESS;
 
-		dev_dbg(ssm->dev, "Modem mode update status %lu\n", val);
+		dev_dbg(ssm->dev, "Modem mode update status %d\n",
+								pkt.err_code);
 		break;
 
 	default:
@@ -279,7 +179,7 @@
 
 	mutex_lock(&ssm->mutex);
 	sz = smd_cur_packet_size(ssm->ch);
-	if ((sz <= 0) || (sz > ATOM_MSG_LEN)) {
+	if ((sz < SSM_MSG_FIELD_LEN) || (sz > ATOM_MSG_LEN)) {
 		dev_dbg(ssm_drv->dev, "Garbled message size\n");
 		goto unlock;
 	}
@@ -289,35 +189,18 @@
 		goto unlock;
 	}
 
-	if (sz < SSM_HEADER_LEN) {
-		dev_err(ssm_drv->dev, "Invalid packet\n");
-		goto unlock;
-	}
-
 	if (smd_read(ssm->ch, ssm->smd_buffer, sz) != sz) {
 		dev_err(ssm_drv->dev, "Incomplete data\n");
 		goto unlock;
 	}
 
-	rc = decode_header(ssm->smd_buffer, sz, &pkt);
+	rc = decode_packet(ssm->smd_buffer, &pkt);
 	if (rc < 0) {
 		dev_err(ssm_drv->dev, "Corrupted header\n");
 		goto unlock;
 	}
 
-	/* Check validity of message */
-	if (ssm->mtoa_replay >= (int)pkt.replaynum) {
-		dev_err(ssm_drv->dev, "Replay attack...\n");
-		goto unlock;
-	}
-
-	if (pkt.msg[pkt.msg_len] != '|') {
-		dev_err(ssm_drv->dev, "Garbled message\n");
-		goto unlock;
-	}
-
-	ssm->mtoa_replay = pkt.replaynum;
-	process_message(pkt.ipc_req, pkt.msg, pkt.msg_len, ssm);
+	process_message(pkt, ssm);
 
 unlock:
 	mutex_unlock(&ssm->mutex);
@@ -335,8 +218,7 @@
 	switch (event) {
 	case SMD_EVENT_OPEN:
 	case SMD_EVENT_CLOSE:
-		dev_info(ssm->dev, "Port %s\n",
-			(event == SMD_EVENT_OPEN) ? "opened" : "closed");
+		dev_dbg(ssm->dev, "SMD port status changed\n");
 		break;
 	case SMD_EVENT_DATA:
 		if (smd_read_avail(ssm->ch) > 0)
@@ -346,280 +228,22 @@
 }
 
 /*
- * Communication interface between ssm driver and TZ.
- */
-static int tz_scm_call(struct ssm_driver *ssm, void *tz_req, int tz_req_len,
-			void **tz_resp, int tz_resp_len)
-{
-	int rc;
-	struct common_req req;
-	struct common_resp resp;
-
-	memcpy((void *)ssm->buff_virt, tz_req, tz_req_len);
-
-	req.cmd_id = CLIENT_SEND_DATA_COMMAND;
-	req.app_id = ssm->app_id;
-	req.req_ptr = (void *)ssm->buff_phys;
-	req.req_len = tz_req_len;
-	req.resp_ptr = (void *)(ssm->buff_phys + tz_req_len);
-	req.resp_len = tz_resp_len;
-
-	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *) &req,
-			sizeof(req), (void *)&resp, sizeof(resp));
-	if (rc) {
-		dev_err(ssm->dev, "SCM call failed for data command\n");
-		return rc;
-	}
-
-	if (resp.result != RESULT_SUCCESS) {
-		dev_err(ssm->dev, "Data command response failure %d\n",
-				resp.result);
-		return -EINVAL;
-	}
-
-	*tz_resp = (void *)(ssm->buff_virt + tz_req_len);
-
-	return rc;
-}
-
-/*
  * Load SSM application in TZ and start application:
- * 1. Check if SSM application is already loaded.
- * 2. Load SSM application firmware.
- * 3. Start SSM application in TZ.
  */
 static int ssm_load_app(struct ssm_driver *ssm)
 {
-	unsigned char name[MAX_APP_NAME_SIZE], *pos;
-	int rc, i, fw_count;
-	uint32_t buff_len, size = 0, ion_len;
-	struct check_app_req app_req;
-	struct scm_resp app_resp;
-	struct load_app app_img_info;
-	const struct firmware **fw, *fw_mdt;
-	const struct elf32_hdr *ehdr;
-	const struct elf32_phdr *phdr;
-	struct ion_handle *ion_handle;
-	ion_phys_addr_t buff_phys;
-	void *buff_virt;
+	int rc;
 
-	/* Check if TZ app already loaded */
-	app_req.cmd_id = APP_LOOKUP_COMMAND;
-	memcpy(app_req.app_name, TZAPP_NAME, MAX_APP_NAME_SIZE);
-
-	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &app_req,
-				sizeof(struct check_app_req),
-				&app_resp, sizeof(app_resp));
-	if (rc) {
-		dev_err(ssm->dev, "SCM call failed for LOOKUP COMMAND\n");
-		return -EINVAL;
-	}
-
-	if (app_resp.result == RESULT_FAILURE)
-		ssm->app_id = 0;
-	else
-		ssm->app_id = app_resp.data;
-
-	if (ssm->app_id) {
-		rc = 0;
-		dev_info(ssm->dev, "TZAPP already loaded...\n");
-		goto out;
-	}
-
-	/* APP not loaded get the firmware */
-	/* Get .mdt first */
-	rc =  request_firmware(&fw_mdt, FIRMWARE_NAME".mdt", ssm->dev);
-	if (rc) {
-		dev_err(ssm->dev, "Unable to get mdt file %s\n",
-						FIRMWARE_NAME".mdt");
-		rc = -EIO;
-		goto out;
-	}
-
-	if (fw_mdt->size < sizeof(*ehdr)) {
-		dev_err(ssm->dev, "Not big enough to be an elf header\n");
-		rc = -EIO;
-		goto release_mdt;
-	}
-
-	ehdr = (struct elf32_hdr *)fw_mdt->data;
-	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
-		dev_err(ssm->dev, "Not an elf header\n");
-		rc = -EIO;
-		goto release_mdt;
-	}
-
-	if (ehdr->e_phnum == 0) {
-		dev_err(ssm->dev, "No loadable segments\n");
-		rc = -EIO;
-		goto release_mdt;
-	}
-
-	phdr = (const struct elf32_phdr *)(fw_mdt->data +
-					sizeof(struct elf32_hdr));
-
-	fw = kzalloc((sizeof(struct firmware *) * ehdr->e_phnum), GFP_KERNEL);
-	if (!fw) {
-		rc = -ENOMEM;
-		goto release_mdt;
-	}
-
-	/* Valid .mdt now we need to load other parts .b0* */
-	for (fw_count = 0; fw_count < ehdr->e_phnum ; fw_count++) {
-		snprintf(name, MAX_APP_NAME_SIZE, FIRMWARE_NAME".b%02d",
-								fw_count);
-		rc = request_firmware(&fw[fw_count], name, ssm->dev);
-		if (rc < 0) {
-			rc = -EIO;
-			dev_err(ssm->dev, "Unable to get blob file\n");
-			goto release_blob;
-		}
-
-		if (fw[fw_count]->size != phdr->p_filesz) {
-			dev_err(ssm->dev, "Blob size %u doesn't match %u\n",
-					fw[fw_count]->size, phdr->p_filesz);
-			rc = -EIO;
-			goto release_blob;
-		}
-
-		phdr++;
-		size += fw[fw_count]->size;
-	}
-
-	/* Ion allocation for loading tzapp */
-	/* ION buffer size 4k aligned */
-	ion_len = ALIGN_BUFFER(size);
-	ion_handle = ion_alloc(ssm_drv->ssm_ion_client,
-			ion_len, SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
-	if (IS_ERR_OR_NULL(ion_handle)) {
-		rc = PTR_ERR(ion_handle);
-		dev_err(ssm->dev, "Unable to get ion handle\n");
-		goto release_blob;
-	}
-
-	rc = ion_phys(ssm_drv->ssm_ion_client, ion_handle,
-			&buff_phys, &buff_len);
+	/* Load the APP */
+	rc = qseecom_start_app(&ssm->qseecom_handle, TZAPP_NAME, SZ_4K);
 	if (rc < 0) {
-		dev_err(ssm->dev, "Unable to get ion physical address\n");
-		goto ion_free;
+		dev_err(ssm->dev, "Unable to load SSM app\n");
+		ssm->app_status = FAILED;
+		return -EIO;
 	}
 
-	if (buff_len < size) {
-		rc = -ENOMEM;
-		goto ion_free;
-	}
-
-	buff_virt = ion_map_kernel(ssm_drv->ssm_ion_client,
-				ion_handle);
-	if (IS_ERR_OR_NULL((void *)buff_virt)) {
-		rc = PTR_ERR((void *)buff_virt);
-		dev_err(ssm->dev, "Unable to get ion virtual address\n");
-		goto ion_free;
-	}
-
-	/* Copy firmware to ION memory */
-	memcpy((unsigned char *)buff_virt, fw_mdt->data, fw_mdt->size);
-	pos = (unsigned char *)buff_virt + fw_mdt->size;
-	for (i = 0; i < ehdr->e_phnum; i++) {
-		memcpy(pos, fw[i]->data, fw[i]->size);
-		pos += fw[i]->size;
-	}
-
-	/* Loading app */
-	app_img_info.cmd_id = APP_START_COMMAND;
-	app_img_info.mdt_len = fw_mdt->size;
-	app_img_info.img_len = size;
-	app_img_info.phy_addr = buff_phys;
-
-	/* SCM call to load the TZ APP */
-	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &app_img_info,
-		sizeof(struct load_app), &app_resp, sizeof(app_resp));
-	if (rc) {
-		rc = -EIO;
-		dev_err(ssm->dev, "SCM call to load APP failed\n");
-		goto ion_unmap;
-	}
-
-	if (app_resp.result == RESULT_FAILURE) {
-		rc = -EIO;
-		dev_err(ssm->dev, "SCM command to load TzAPP failed\n");
-		goto ion_unmap;
-	}
-
-	ssm->app_id = app_resp.data;
 	ssm->app_status = SUCCESS;
-
-ion_unmap:
-	ion_unmap_kernel(ssm_drv->ssm_ion_client, ion_handle);
-ion_free:
-	ion_free(ssm_drv->ssm_ion_client, ion_handle);
-release_blob:
-	while (--fw_count >= 0)
-		release_firmware(fw[fw_count]);
-	kfree(fw);
-release_mdt:
-	release_firmware(fw_mdt);
-out:
-	return rc;
-}
-
-/*
- * Allocate buffer for transactions.
- */
-static int ssm_setup_ion(struct ssm_driver *ssm)
-{
-	int rc = 0;
-	unsigned int size;
-
-	size = ALIGN_BUFFER(ATOM_MSG_LEN);
-
-	/* ION client for communicating with TZ */
-	ssm->ssm_ion_client = msm_ion_client_create(UINT_MAX,
-							"ssm-kernel");
-	if (IS_ERR_OR_NULL(ssm->ssm_ion_client)) {
-		rc = PTR_ERR(ssm->ssm_ion_client);
-		dev_err(ssm->dev, "Ion client not created\n");
-		return rc;
-	}
-
-	/* Setup a small ION buffer for tz communication */
-	ssm->ssm_ion_handle = ion_alloc(ssm->ssm_ion_client,
-				size, SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
-	if (IS_ERR_OR_NULL(ssm->ssm_ion_handle)) {
-		rc = PTR_ERR(ssm->ssm_ion_handle);
-		dev_err(ssm->dev, "Unable to get ion handle\n");
-		goto out;
-	}
-
-	rc = ion_phys(ssm->ssm_ion_client, ssm->ssm_ion_handle,
-			&ssm->buff_phys, &ssm->buff_len);
-	if (rc < 0) {
-		dev_err(ssm->dev,
-			"Unable to get ion buffer physical address\n");
-		goto ion_free;
-	}
-
-	if (ssm->buff_len < size) {
-		rc = -ENOMEM;
-		goto ion_free;
-	}
-
-	ssm->buff_virt = ion_map_kernel(ssm->ssm_ion_client,
-				ssm->ssm_ion_handle);
-	if (IS_ERR_OR_NULL((void *)ssm->buff_virt)) {
-		rc = PTR_ERR((void *)ssm->buff_virt);
-		dev_err(ssm->dev,
-			"Unable to get ion buffer virtual address\n");
-		goto ion_free;
-	}
-
-	return rc;
-
-ion_free:
-	ion_free(ssm->ssm_ion_client, ssm->ssm_ion_handle);
-out:
-	ion_client_destroy(ssm_drv->ssm_ion_client);
-	return rc;
+	return 0;
 }
 
 static struct ssm_platform_data *populate_ssm_pdata(struct device *dev)
@@ -649,8 +273,6 @@
 static int __devinit ssm_probe(struct platform_device *pdev)
 {
 	int rc;
-	uint32_t system_call_id;
-	char legacy = '\0';
 	struct ssm_platform_data *pdata;
 	struct ssm_driver *drv;
 
@@ -671,10 +293,16 @@
 		return -ENOMEM;
 	}
 
+	/* Allocate response buffer */
+	drv->resp = devm_kzalloc(&pdev->dev,
+			sizeof(struct tzapp_get_mode_info_rsp),
+			GFP_KERNEL);
+	if (!drv->resp) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
 	/* Initialize the driver structure */
-	drv->atom_replay = -1;
-	drv->mtoa_replay = -1;
-	drv->app_id = -1;
 	drv->app_status = RETRY;
 	drv->ready = false;
 	drv->update_status = FAILED;
@@ -691,40 +319,6 @@
 		goto exit;
 	}
 
-	/* Allocate response buffer */
-	drv->resp = devm_kzalloc(&pdev->dev,
-				sizeof(struct tzapp_get_mode_info_rsp),
-				GFP_KERNEL);
-	if (!drv->resp) {
-		rc = -ENOMEM;
-		goto exit;
-	}
-
-
-	/* Check for TZ version */
-	system_call_id = QSEOS_CHECK_VERSION_CMD;
-	rc = scm_call(SCM_SVC_INFO, SSM_INFO_CMD_ID, &system_call_id,
-			sizeof(system_call_id), &legacy, sizeof(legacy));
-	if (rc) {
-		dev_err(&pdev->dev, "Get version failed %d\n", rc);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	/* This driver only support 1.4 TZ and QSEOS */
-	if (!legacy) {
-		dev_err(&pdev->dev,
-				"Driver doesn't support legacy version\n");
-		rc = -EINVAL;
-		goto exit;
-
-	}
-
-	/* Setup the ion buffer for transaction */
-	rc = ssm_setup_ion(drv);
-	if (rc < 0)
-		goto exit;
-
 	drv->dev = &pdev->dev;
 	ssm_drv = drv;
 	platform_set_drvdata(pdev, ssm_drv);
@@ -741,10 +335,6 @@
 
 static int __devexit ssm_remove(struct platform_device *pdev)
 {
-	int rc;
-
-	struct scm_shutdown_req req;
-	struct scm_resp resp;
 
 	if (!ssm_drv)
 		return 0;
@@ -758,20 +348,11 @@
 	smd_close(ssm_drv->ch);
 	flush_work_sync(&ssm_drv->ipc_work);
 
-	/* ION clean up*/
-	ion_unmap_kernel(ssm_drv->ssm_ion_client, ssm_drv->ssm_ion_handle);
-	ion_free(ssm_drv->ssm_ion_client, ssm_drv->ssm_ion_handle);
-	ion_client_destroy(ssm_drv->ssm_ion_client);
-
 	/* Shutdown tzapp */
-	req.app_id = ssm_drv->app_id;
-	req.cmd_id = APP_SHUTDOWN_COMMAND;
-	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
-			&resp, sizeof(resp));
-	if (rc)
-		dev_err(&pdev->dev, "TZ_app Unload failed\n");
+	dev_dbg(&pdev->dev, "Shutting down TZapp\n");
+	qseecom_shutdown_app(&ssm_drv->qseecom_handle);
 
-	return rc;
+	return 0;
 }
 
 static struct of_device_id ssm_match_table[] = {
@@ -795,17 +376,15 @@
 /*
  * Interface for external OEM driver.
  * This interface supports following functionalities:
- * 1. Get TZAPP ID.
- * 2. Set default mode.
- * 3. Set mode (encrypted mode and it's length is passed as parameter).
- * 4. Set mode from TZ.
- * 5. Get status of mode update.
+ * 1. Set mode (encrypted mode and it's length is passed as parameter).
+ * 2. Set mode from TZ (read encrypted mode from TZ)
+ * 3. Get status of mode update.
  *
  */
 int ssm_oem_driver_intf(int cmd, char *mode, int len)
 {
 	int rc, req_len, resp_len;
-	struct tzapp_get_mode_info_req get_mode_req;
+	struct tzapp_get_mode_info_req *get_mode_req;
 	struct tzapp_get_mode_info_rsp *get_mode_resp;
 
 	/* If ssm_drv is NULL, probe failed */
@@ -819,7 +398,6 @@
 		rc = ssm_load_app(ssm_drv);
 		if (rc) {
 			rc = -ENODEV;
-			ssm_drv->app_status = FAILED;
 			goto unlock;
 		}
 	} else if (ssm_drv->app_status == FAILED) {
@@ -851,23 +429,43 @@
 	case SSM_READY:
 		break;
 
-	case SSM_GET_APP_ID:
-		rc = ssm_drv->app_id;
-		break;
-
 	case SSM_MODE_INFO_READY:
 		ssm_drv->update_status = RETRY;
 		/* Fill command structure */
 		req_len = sizeof(struct tzapp_get_mode_info_req);
 		resp_len = sizeof(struct tzapp_get_mode_info_rsp);
-		get_mode_req.tzapp_ssm_cmd = GET_ENC_MODE;
-		rc = tz_scm_call(ssm_drv, (void *)&get_mode_req,
-				req_len, (void **)&get_mode_resp, resp_len);
+		setup_cmd_rsp_buffers(ssm_drv->qseecom_handle,
+				(void **)&get_mode_req, &req_len,
+				(void **)&get_mode_resp, &resp_len);
+		get_mode_req->tzapp_ssm_cmd = GET_ENC_MODE;
+
+		rc = qseecom_set_bandwidth(ssm_drv->qseecom_handle, 1);
 		if (rc) {
 			ssm_drv->update_status = FAILED;
+			dev_err(ssm_drv->dev, "set bandwidth failed\n");
+			rc = -EIO;
+			break;
+		}
+		rc = qseecom_send_command(ssm_drv->qseecom_handle,
+				(void *)get_mode_req, req_len,
+				(void *)get_mode_resp, resp_len);
+		if (rc || get_mode_resp->status) {
+			ssm_drv->update_status = FAILED;
 			break;
 		}
+		rc = qseecom_set_bandwidth(ssm_drv->qseecom_handle, 0);
+		if (rc) {
+			ssm_drv->update_status = FAILED;
+			dev_err(ssm_drv->dev, "clear bandwidth failed\n");
+			rc = -EIO;
+			break;
+		}
 
+		if (get_mode_resp->enc_mode_len > ENC_MODE_MAX_SIZE) {
+			ssm_drv->update_status = FAILED;
+			rc = -EINVAL;
+			break;
+		}
 		/* Send mode_info to modem */
 		rc = update_modem(SSM_ATOM_MODE_UPDATE, ssm_drv,
 				get_mode_resp->enc_mode_len,
@@ -899,19 +497,6 @@
 		rc = ssm_drv->update_status;
 		break;
 
-	case SSM_SET_DEFAULT_MODE:
-		/* Modem does not send response for this */
-		ssm_drv->update_status = RETRY;
-		rc = update_modem(SSM_ATOM_SET_DEFAULT_MODE, ssm_drv,
-				1, "0");
-		if (rc)
-			ssm_drv->update_status = FAILED;
-		else
-			/* For default mode we don't get any resp
-			 * from modem.
-			 */
-			ssm_drv->update_status = SUCCESS;
-		break;
 	default:
 		rc = -EINVAL;
 		dev_err(ssm_drv->dev, "Invalid command\n");
diff --git a/drivers/platform/msm/ssm.h b/drivers/platform/msm/ssm.h
index 97add11..dcda1387 100644
--- a/drivers/platform/msm/ssm.h
+++ b/drivers/platform/msm/ssm.h
@@ -14,8 +14,7 @@
 #define __SSM_H_
 
 #define MAX_APP_NAME_SIZE	32
-#define MODE_INFO_MAX_SIZE	4
-#define ENC_MODE_MAX_SIZE	(100 + MODE_INFO_MAX_SIZE)
+#define ENC_MODE_MAX_SIZE	200
 
 /* tzapp response.*/
 enum tz_response {
@@ -30,35 +29,20 @@
 	KEY_EXCHANGE = 11,
 };
 
-/* Command list for QSEOS.*/
-enum qceos_cmd_id {
-	APP_START_COMMAND = 0x01,
-	APP_SHUTDOWN_COMMAND,
-	APP_LOOKUP_COMMAND,
-	CLIENT_SEND_DATA_COMMAND = 0x6,
-	QSEOS_CMD_MAX = 0xEFFFFFFF,
-};
-
 /* MODEM/SSM command list.*/
 enum ssm_ipc_req {
-	SSM_MTOA_KEY_EXCHANGE = 0x0000AAAA,
-	SSM_ATOM_KEY_STATUS,
+	SSM_IPC_MIN = 0x0000AAAB,
 	SSM_ATOM_MODE_UPDATE,
-	SSM_MTOA_MODE_UPDATE_STATUS,
-	SSM_MTOA_PREV_INVALID,
-	SSM_ATOM_PREV_INVALID,
-	SSM_ATOM_SET_DEFAULT_MODE,
+	SSM_MTOA_MODE_UPDATE_STATUS = SSM_IPC_MIN + 4,
 	SSM_INVALID_REQ,
 };
 
 /* OEM reuest commands list.*/
 enum oem_req {
 	SSM_READY,
-	SSM_GET_APP_ID,
 	SSM_MODE_INFO_READY,
 	SSM_SET_MODE,
 	SSM_GET_MODE_STATUS,
-	SSM_SET_DEFAULT_MODE,
 	SSM_INVALID,
 };
 
@@ -69,33 +53,6 @@
 	FAILED = -1,
 };
 
-__packed struct load_app {
-	uint32_t cmd_id;
-	uint32_t mdt_len;
-	uint32_t img_len;
-	uint32_t phy_addr;
-	char     app_name[MAX_APP_NAME_SIZE];
-};
-
-/* Stop tzapp reuest.*/
-__packed struct scm_shutdown_req {
-	uint32_t cmd_id;
-	uint32_t app_id;
-};
-
-/* Common tzos response.*/
-__packed struct scm_resp {
-	uint32_t result;
-	enum tz_response resp_type;
-	unsigned int data;
-};
-
-/* tzos request.*/
-__packed struct check_app_req {
-	uint32_t cmd_id;
-	char     app_name[MAX_APP_NAME_SIZE];
-};
-
 /* tzapp encode mode reuest.*/
 __packed struct tzapp_mode_enc_req {
 	uint32_t tzapp_ssm_cmd;
@@ -123,38 +80,11 @@
 	long status;
 };
 
-/* tzos key exchange request.*/
-__packed struct ssm_keyexchg_req {
-	uint32_t ssid;
-	void *address;
-	uint32_t length;
-	uint32_t *status;
-};
-
-/* tzos common request.*/
-__packed struct common_req {
-	uint32_t cmd_id;
-	uint32_t app_id;
-	void *req_ptr;
-	uint32_t req_len;
-	void *resp_ptr;
-	uint32_t resp_len;
-};
-
-/* tzos common response.*/
-__packed struct common_resp {
-	uint32_t result;
-	uint32_t type;
-	uint32_t data;
-};
-
 /* Modem/SSM packet format.*/
 struct ssm_common_msg {
-	unsigned long pktlen;
-	unsigned long replaynum;
 	enum ssm_ipc_req ipc_req;
-	unsigned long msg_len;
-	char *msg;
+	int err_code;
+
 };
 
 #endif
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 7c73a82..408681c 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -1069,10 +1069,11 @@
 		info.connect_complete = 1;
 	spin_unlock(&usb_bam_ipa_handshake_info_lock);
 
-	if (info.cur_cons_state[HSUSB_BAM] == IPA_RM_RESOURCE_GRANTED) {
-		pr_debug("%s: Notify CONS_GRANTED\n", __func__);
+	if (info.cur_cons_state[cur_bam] == IPA_RM_RESOURCE_GRANTED) {
+		pr_debug("%s: Notify %s_CONS_GRANTED\n", __func__,
+			bam_enable_strings[cur_bam]);
 		ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
-				 ipa_rm_resource_cons[HSUSB_BAM]);
+				 ipa_rm_resource_cons[cur_bam]);
 	}
 }
 
@@ -1505,7 +1506,7 @@
 
 	ctx.pipes_enabled_per_bam[cur_bam] += 1;
 	spin_unlock(&usb_bam_lock);
-	if (ipa_params->dir == PEER_PERIPHERAL_TO_USB && cur_bam == HSUSB_BAM)
+	if (ipa_params->dir == PEER_PERIPHERAL_TO_USB)
 		notify_usb_connected(cur_bam);
 
 	if (cur_bam == HSUSB_BAM)
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index b518f1f..c246036 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -1970,6 +1970,11 @@
 		pr_debug("new delta ocv = %d\n", delta_ocv_uv);
 	}
 
+	if (wake_lock_active(&chip->low_voltage_wake_lock)) {
+		pr_debug("Low Voltage, apply only ibat limited corrections\n");
+		goto skip_limiting_corrections;
+	}
+
 	if (chip->last_ocv_uv > 3800000)
 		correction_limit_uv = the_chip->high_ocv_correction_limit_uv;
 	else
@@ -1986,6 +1991,7 @@
 		pr_debug("new delta ocv = %d\n", delta_ocv_uv);
 	}
 
+skip_limiting_corrections:
 	chip->last_ocv_uv -= delta_ocv_uv;
 
 	if (chip->last_ocv_uv >= chip->max_voltage_uv)
@@ -2278,7 +2284,6 @@
 	int new_calculated_soc;
 	static int firsttime = 1;
 
-	calib_hkadc_check(chip, batt_temp);
 	calculate_soc_params(chip, raw, batt_temp, chargecycles,
 						&fcc_uah,
 						&unusable_charge_uah,
@@ -2426,6 +2431,7 @@
 	get_batt_temp(chip, &batt_temp);
 
 	mutex_lock(&chip->last_ocv_uv_mutex);
+	calib_hkadc_check(chip, batt_temp);
 	read_soc_params_raw(chip, &raw, batt_temp);
 
 	soc = calculate_state_of_charge(chip, &raw,
@@ -2762,6 +2768,7 @@
 	get_batt_temp(the_chip, &batt_temp);
 
 	mutex_lock(&the_chip->last_ocv_uv_mutex);
+	calib_hkadc_check(the_chip, batt_temp);
 	read_soc_params_raw(the_chip, &raw, batt_temp);
 	mutex_unlock(&the_chip->last_ocv_uv_mutex);
 
@@ -2907,6 +2914,7 @@
 
 	mutex_lock(&the_chip->last_ocv_uv_mutex);
 
+	calib_hkadc_check(the_chip, batt_temp);
 	read_soc_params_raw(the_chip, &raw, batt_temp);
 
 	calculate_cc_uah(the_chip, raw.cc, &bms_end_cc_uah);
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index d5b2cc6..f3f59e6 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -298,6 +298,7 @@
 	bool				disable_aicl;
 	int				usb_type;
 	bool				disable_chg_rmvl_wrkarnd;
+	struct msm_xo_voter		*voter;
 };
 
 /* user space parameter to limit usb current */
@@ -4034,6 +4035,7 @@
 	int err;
 	u8 temp;
 
+	msm_xo_mode_vote(chip->voter, MSM_XO_MODE_ON);
 	temp  = 0xD1;
 	err = pm_chg_write(chip, CHG_TEST, temp);
 	if (err) {
@@ -4092,6 +4094,8 @@
 		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
 		return;
 	}
+
+	msm_xo_mode_vote(chip->voter, MSM_XO_MODE_OFF);
 }
 
 static void pm8921_chg_set_hw_clk_switching(struct pm8921_chg_chip *chip)
@@ -4099,6 +4103,7 @@
 	int err;
 	u8 temp;
 
+	msm_xo_mode_vote(chip->voter, MSM_XO_MODE_ON);
 	temp  = 0xD1;
 	err = pm_chg_write(chip, CHG_TEST, temp);
 	if (err) {
@@ -4112,6 +4117,7 @@
 		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
 		return;
 	}
+	msm_xo_mode_vote(chip->voter, MSM_XO_MODE_OFF);
 }
 
 #define VREF_BATT_THERM_FORCE_ON	BIT(7)
@@ -4800,6 +4806,7 @@
 	chip->ibatmax_max_adj_ma = find_ibat_max_adj_ma(
 					chip->max_bat_chg_current);
 
+	chip->voter = msm_xo_get(MSM_XO_TCXO_D0, "pm8921_charger");
 	rc = pm8921_chg_hw_init(chip);
 	if (rc) {
 		pr_err("couldn't init hardware rc=%d\n", rc);
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index aac0fa2..73c7e62 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -47,6 +47,7 @@
 #define BMS1_S1_DELAY_CTL		0x5A
 /* OCV interrupt threshold */
 #define BMS1_OCV_THR0			0x50
+#define BMS1_S2_SAMP_AVG_CTL		0x61
 /* SW CC interrupt threshold */
 #define BMS1_SW_CC_THR0			0xA0
 /* OCV for r registers */
@@ -71,6 +72,7 @@
 #define CHARGE_CYCLE_STORAGE_LSB	0xBE /* LSB=0xBE, MSB=0xBF */
 
 /* IADC Channel Select */
+#define IADC1_BMS_REVISION2		0x01
 #define IADC1_BMS_ADC_CH_SEL_CTL	0x48
 #define IADC1_BMS_ADC_INT_RSNSN_CTL	0x49
 #define IADC1_BMS_FAST_AVG_EN		0x5B
@@ -152,6 +154,7 @@
 
 	int				battery_present;
 	int				battery_status;
+	bool				batfet_closed;
 	bool				new_battery;
 	bool				done_charging;
 	bool				last_soc_invalid;
@@ -176,6 +179,7 @@
 
 	struct delayed_work		calculate_soc_delayed_work;
 	struct work_struct		recalc_work;
+	struct work_struct		batfet_open_work;
 
 	struct mutex			bms_output_lock;
 	struct mutex			last_ocv_uv_mutex;
@@ -271,6 +275,9 @@
 	bool				battery_removed;
 	struct bms_irq			sw_cc_thr_irq;
 	struct bms_irq			ocv_thr_irq;
+	struct qpnp_vadc_chip		*vadc_dev;
+	struct qpnp_iadc_chip		*iadc_dev;
+	struct qpnp_adc_tm_chip		*adc_tm_dev;
 };
 
 static struct of_device_id qpnp_bms_match_table[] = {
@@ -476,7 +483,7 @@
 	pr_debug("%u raw converted into %lld uv\n", reading, uv);
 	uv = adjust_vbatt_reading(chip, uv);
 	pr_debug("adjusted into %lld uv\n", uv);
-	rc = qpnp_vbat_sns_comp_result(&uv);
+	rc = qpnp_vbat_sns_comp_result(chip->vadc_dev, &uv);
 	if (rc)
 		pr_debug("could not compensate vbatt\n");
 	pr_debug("compensated into %lld uv\n", uv);
@@ -510,13 +517,13 @@
 	return result_uv;
 }
 
-static s64 cc_reverse_adjust_for_gain(s64 uv)
+static s64 cc_reverse_adjust_for_gain(struct qpnp_bms_chip *chip, s64 uv)
 {
 	struct qpnp_iadc_calib calibration;
 	int gain;
 	s64 result_uv;
 
-	qpnp_iadc_get_gain_and_offset(&calibration);
+	qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration);
 	gain = (int)calibration.gain_raw - (int)calibration.offset_raw;
 
 	pr_debug("reverse adjusting_uv = %lld\n", uv);
@@ -539,7 +546,7 @@
 {
 	struct qpnp_iadc_calib calibration;
 
-	qpnp_iadc_get_gain_and_offset(&calibration);
+	qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration);
 	return cc_adjust_for_gain(cc_reading_to_uv(reading),
 			calibration.gain_raw - calibration.offset_raw);
 }
@@ -582,7 +589,7 @@
 	temp_current = div_s64((vsense_uv * 1000000LL),
 				(int)chip->r_sense_uohm);
 
-	rc = qpnp_iadc_comp_result(&temp_current);
+	rc = qpnp_iadc_comp_result(chip->iadc_dev, &temp_current);
 	if (rc)
 		pr_debug("error compensation failed: %d\n", rc);
 
@@ -591,12 +598,12 @@
 	return 0;
 }
 
-static int get_battery_voltage(int *result_uv)
+static int get_battery_voltage(struct qpnp_bms_chip *chip, int *result_uv)
 {
 	int rc;
 	struct qpnp_vadc_result adc_result;
 
-	rc = qpnp_vadc_read(VBAT_SNS, &adc_result);
+	rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &adc_result);
 	if (rc) {
 		pr_err("error reading adc channel = %d, rc = %d\n",
 					VBAT_SNS, rc);
@@ -650,14 +657,14 @@
 	int rc, raw_0625, raw_1250;
 	struct qpnp_vadc_result result;
 
-	rc = qpnp_vadc_read(REF_625MV, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, REF_625MV, &result);
 	if (rc) {
 		pr_debug("vadc read failed with rc = %d\n", rc);
 		return rc;
 	}
 	raw_0625 = result.adc_code;
 
-	rc = qpnp_vadc_read(REF_125V, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, REF_125V, &result);
 	if (rc) {
 		pr_debug("vadc read failed with rc = %d\n", rc);
 		return rc;
@@ -745,6 +752,11 @@
 	return get_battery_status(chip) == POWER_SUPPLY_STATUS_CHARGING;
 }
 
+static bool is_battery_full(struct qpnp_bms_chip *chip)
+{
+	return get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL;
+}
+
 static bool is_battery_present(struct qpnp_bms_chip *chip)
 {
 	union power_supply_propval ret = {0,};
@@ -763,9 +775,22 @@
 	return false;
 }
 
-static bool is_battery_full(struct qpnp_bms_chip *chip)
+static bool is_batfet_closed(struct qpnp_bms_chip *chip)
 {
-	return get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL;
+	union power_supply_propval ret = {0,};
+
+	if (chip->batt_psy == NULL)
+		chip->batt_psy = power_supply_get_by_name("battery");
+	if (chip->batt_psy) {
+		/* if battery has been registered, use the online property */
+		chip->batt_psy->get_property(chip->batt_psy,
+					POWER_SUPPLY_PROP_ONLINE, &ret);
+		return !!ret.intval;
+	}
+
+	/* Default to true if the battery power supply is not registered. */
+	pr_debug("battery power supply is not registered\n");
+	return true;
 }
 
 static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
@@ -784,14 +809,15 @@
 			pr_err("bms current read failed with rc: %d\n", rc);
 			return rc;
 		}
-		rc = qpnp_vadc_read(VBAT_SNS, &v_result);
+		rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &v_result);
 		if (rc) {
 			pr_err("vadc read failed with rc: %d\n", rc);
 			return rc;
 		}
 		*vbat_uv = (int)v_result.physical;
 	} else {
-		rc = qpnp_iadc_vadc_sync_read(iadc_channel, &i_result,
+		rc = qpnp_iadc_vadc_sync_read(chip->iadc_dev,
+					iadc_channel, &i_result,
 					VBAT_SNS, &v_result);
 		if (rc) {
 			pr_err("adc sync read failed with rc: %d\n", rc);
@@ -1026,13 +1052,13 @@
 
 	software_counter = cc_type == SHDW_CC ?
 			&chip->software_shdw_cc_uah : &chip->software_cc_uah;
-	rc = qpnp_vadc_read(DIE_TEMP, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result);
 	if (rc) {
 		pr_err("could not read pmic die temperature: %d\n", rc);
 		return *software_counter;
 	}
 
-	qpnp_iadc_get_gain_and_offset(&calibration);
+	qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration);
 	pr_debug("%scc = %lld, die_temp = %lld\n",
 			cc_type == SHDW_CC ? "shdw_" : "",
 			cc, result.physical);
@@ -1042,7 +1068,7 @@
 					- calibration.offset_raw);
 	cc_pvh = cc_uv_to_pvh(cc_voltage_uv);
 	cc_uah = div_s64(cc_pvh, chip->r_sense_uohm);
-	rc = qpnp_iadc_comp_result(&cc_uah);
+	rc = qpnp_iadc_comp_result(chip->iadc_dev, &cc_uah);
 	if (rc)
 		pr_debug("error compensation failed: %d\n", rc);
 	if (clear_cc == RESET) {
@@ -1407,7 +1433,7 @@
 	int rc;
 	struct qpnp_vadc_result result;
 
-	rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result);
 	if (rc) {
 		pr_err("Unable to read battery temperature\n");
 		return rc;
@@ -1667,7 +1693,7 @@
 	int rc;
 	bool charging, charging_since_last_report;
 
-	rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result);
 
 	if (rc) {
 		pr_err("error reading adc channel = %d, rc = %d\n",
@@ -2038,7 +2064,7 @@
 {
 	int rc, vbat_uv;
 
-	rc = get_battery_voltage(&vbat_uv);
+	rc = get_battery_voltage(chip, &vbat_uv);
 	if (rc < 0) {
 		pr_err("adc vbat failed err = %d\n", rc);
 		return soc;
@@ -2109,9 +2135,9 @@
 		target_cc_uah = CC_STEP_INCREMENT_UAH;
 	}
 	iadc_comp_factor = 100000;
-	qpnp_iadc_comp_result(&iadc_comp_factor);
+	qpnp_iadc_comp_result(chip->iadc_dev, &iadc_comp_factor);
 	target_cc_uah = div64_s64(target_cc_uah * 100000, iadc_comp_factor);
-	target_cc_uah = cc_reverse_adjust_for_gain(target_cc_uah);
+	target_cc_uah = cc_reverse_adjust_for_gain(chip, target_cc_uah);
 	cc_raw_64 = convert_cc_uah_to_raw(chip, target_cc_uah);
 	cc_raw = convert_s64_to_s36(cc_raw_64);
 
@@ -2297,7 +2323,7 @@
 	int voltage_range_uv, voltage_remaining_uv, voltage_based_soc;
 	int rc, vbat_uv;
 
-	rc = get_battery_voltage(&vbat_uv);
+	rc = get_battery_voltage(chip, &vbat_uv);
 	if (rc < 0) {
 		pr_err("adc vbat failed err = %d\n", rc);
 		return rc;
@@ -2328,13 +2354,18 @@
 
 	bms_stay_awake(&chip->soc_wake_source);
 	mutex_lock(&chip->vbat_monitor_mutex);
-	qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+	if (chip->vbat_monitor_params.state_request !=
+			ADC_TM_HIGH_LOW_THR_DISABLE)
+		qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->vbat_monitor_params);
 	mutex_unlock(&chip->vbat_monitor_mutex);
 	if (chip->use_voltage_soc) {
 		soc = calculate_soc_from_voltage(chip);
 	} else {
-		qpnp_iadc_calibrate_for_trim(true);
-		rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
+		if (!chip->batfet_closed)
+			qpnp_iadc_calibrate_for_trim(chip->iadc_dev, true);
+		rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM,
+								&result);
 		if (rc) {
 			pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n",
 						LR_MUX1_BATT_THERM, rc);
@@ -2428,7 +2459,8 @@
 				chip->vbat_monitor_params.low_thr,
 				chip->vbat_monitor_params.high_thr);
 	}
-	qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+	qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->vbat_monitor_params);
 	mutex_unlock(&chip->vbat_monitor_mutex);
 }
 
@@ -2476,7 +2508,8 @@
 				chip->vbat_monitor_params.low_thr,
 				chip->vbat_monitor_params.high_thr);
 	}
-	qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+	qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->vbat_monitor_params);
 	mutex_unlock(&chip->vbat_monitor_mutex);
 }
 
@@ -2487,10 +2520,10 @@
 	struct qpnp_vadc_result result;
 	int rc;
 
-	rc = qpnp_vadc_read(VBAT_SNS, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &result);
 	pr_debug("vbat = %lld, raw = 0x%x\n", result.physical, result.adc_code);
 
-	get_battery_voltage(&vbat_uv);
+	get_battery_voltage(chip, &vbat_uv);
 	pr_debug("vbat is at %d, state is at %d\n", vbat_uv, state);
 
 	if (state == ADC_TM_LOW_STATE) {
@@ -2500,7 +2533,7 @@
 			configure_vbat_monitor_low(chip);
 		} else {
 			pr_debug("faulty btm trigger, discarding\n");
-			qpnp_adc_tm_channel_measure(
+			qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
 					&chip->vbat_monitor_params);
 		}
 	} else if (state == ADC_TM_HIGH_STATE) {
@@ -2510,7 +2543,7 @@
 			configure_vbat_monitor_high(chip);
 		} else {
 			pr_debug("faulty btm trigger, discarding\n");
-			qpnp_adc_tm_channel_measure(
+			qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
 					&chip->vbat_monitor_params);
 		}
 	} else {
@@ -2525,9 +2558,11 @@
 	int rc;
 
 	chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE;
-	rc = qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+
+	rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+						&chip->vbat_monitor_params);
 	if (rc) {
-		pr_err("tm measure failed: %d\n", rc);
+		pr_err("tm disable failed: %d\n", rc);
 		return rc;
 	}
 	if (wake_lock_active(&chip->low_voltage_wake_lock)) {
@@ -2545,17 +2580,6 @@
 {
 	int rc;
 
-	rc = qpnp_adc_tm_is_ready();
-	if (rc) {
-		pr_info("adc tm is not ready yet: %d, defer probe\n", rc);
-		return -EPROBE_DEFER;
-	}
-
-	if (!is_battery_present(chip)) {
-		pr_debug("no battery inserted, do not setup vbat monitoring\n");
-		return 0;
-	}
-
 	chip->vbat_monitor_params.low_thr = chip->low_voltage_threshold;
 	chip->vbat_monitor_params.high_thr = chip->max_voltage_uv
 							- VBATT_ERROR_MARGIN;
@@ -2567,11 +2591,20 @@
 	pr_debug("set low thr to %d and high to %d\n",
 			chip->vbat_monitor_params.low_thr,
 			chip->vbat_monitor_params.high_thr);
-	rc = qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
-	if (rc) {
-		pr_err("tm setup failed: %d\n", rc);
+
+	if (!is_battery_present(chip)) {
+		pr_debug("no battery inserted, do not enable vbat monitoring\n");
+		chip->vbat_monitor_params.state_request =
+			ADC_TM_HIGH_LOW_THR_DISABLE;
+	} else {
+		rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+						&chip->vbat_monitor_params);
+		if (rc) {
+			pr_err("tm setup failed: %d\n", rc);
 		return rc;
+		}
 	}
+
 	pr_debug("setup complete\n");
 	return 0;
 }
@@ -2865,7 +2898,7 @@
 	struct qpnp_vadc_result result;
 	int fcc_uah, new_fcc_uah, delta_cc_uah, delta_soc;
 
-	rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result);
 	if (rc) {
 		pr_err("Unable to read batt_temp\n");
 		return;
@@ -2904,9 +2937,55 @@
 	}
 }
 
+#define MAX_CAL_TRIES	200
+#define MIN_CAL_UA	3000
+static void batfet_open_work(struct work_struct *work)
+{
+	int i;
+	int rc;
+	int result_ua;
+	u8 orig_delay, sample_delay;
+	struct qpnp_bms_chip *chip = container_of(work,
+				struct qpnp_bms_chip,
+				batfet_open_work);
+
+	rc = qpnp_read_wrapper(chip, &orig_delay,
+			chip->base + BMS1_S1_DELAY_CTL, 1);
+
+	sample_delay = 0x0;
+	rc = qpnp_write_wrapper(chip, &sample_delay,
+			chip->base + BMS1_S1_DELAY_CTL, 1);
+
+	/*
+	 * In certain PMICs there is a coupling issue which causes
+	 * bad calibration value that result in a huge battery current
+	 * even when the BATFET is open. Do continious calibrations until
+	 * we hit reasonable cal values which result in low battery current
+	 */
+
+	for (i = 0; (!chip->batfet_closed) && i < MAX_CAL_TRIES; i++) {
+		rc = qpnp_iadc_calibrate_for_trim(chip->iadc_dev, false);
+		/*
+		 * Wait 20mS after calibration and before reading battery
+		 * current. The BMS h/w uses calibration values in the
+		 * next sampling of vsense.
+		 */
+		msleep(20);
+		rc |= get_battery_current(chip, &result_ua);
+		if (rc == 0 && abs(result_ua) <= MIN_CAL_UA) {
+			pr_debug("good cal at %d attempt\n", i);
+			break;
+		}
+	}
+	pr_debug("batfet_closed = %d i = %d result_ua = %d\n",
+			chip->batfet_closed, i, result_ua);
+
+	rc = qpnp_write_wrapper(chip, &orig_delay,
+			chip->base + BMS1_S1_DELAY_CTL, 1);
+}
+
 static void charging_began(struct qpnp_bms_chip *chip)
 {
-
 	mutex_lock(&chip->last_soc_mutex);
 	chip->charge_start_tm_sec = 0;
 	chip->catch_up_time_sec = 0;
@@ -2997,6 +3076,27 @@
 }
 
 #define CALIB_WRKARND_DIG_MAJOR_MAX		0x03
+static void batfet_status_check(struct qpnp_bms_chip *chip)
+{
+	bool batfet_closed;
+
+	if (chip->iadc_bms_revision2 > CALIB_WRKARND_DIG_MAJOR_MAX)
+		return;
+
+	batfet_closed = is_batfet_closed(chip);
+	if (chip->batfet_closed != batfet_closed) {
+		chip->batfet_closed = batfet_closed;
+		if (batfet_closed == false) {
+			/* batfet opened */
+			schedule_work(&chip->batfet_open_work);
+			qpnp_iadc_skip_calibration(chip->iadc_dev);
+		} else {
+			/* batfet closed */
+			qpnp_iadc_calibrate_for_trim(chip->iadc_dev, true);
+			qpnp_iadc_resume_calibration(chip->iadc_dev);
+		}
+	}
+}
 
 static void battery_insertion_check(struct qpnp_bms_chip *chip)
 {
@@ -3032,6 +3132,7 @@
 								bms_psy);
 
 	battery_insertion_check(chip);
+	batfet_status_check(chip);
 	battery_status_check(chip);
 }
 
@@ -3196,7 +3297,7 @@
 	int rc;
 	struct qpnp_vadc_result result;
 
-	rc = qpnp_vadc_read(LR_MUX2_BAT_ID, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX2_BAT_ID, &result);
 	if (rc) {
 		pr_err("error reading batt id channel = %d, rc = %d\n",
 					LR_MUX2_BAT_ID, rc);
@@ -3287,15 +3388,48 @@
 	return 0;
 }
 
+static int bms_get_adc(struct qpnp_bms_chip *chip,
+					struct spmi_device *spmi)
+{
+	int rc = 0;
+
+	chip->vadc_dev = qpnp_get_vadc(&spmi->dev, "bms");
+	if (IS_ERR(chip->vadc_dev)) {
+		rc = PTR_ERR(chip->vadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("vadc property missing, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->iadc_dev = qpnp_get_iadc(&spmi->dev, "bms");
+	if (IS_ERR(chip->iadc_dev)) {
+		rc = PTR_ERR(chip->iadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("iadc property missing, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->adc_tm_dev = qpnp_get_adc_tm(&spmi->dev, "bms");
+	if (IS_ERR(chip->adc_tm_dev)) {
+		rc = PTR_ERR(chip->adc_tm_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("adc-tm not ready, defer probe\n");
+		return rc;
+	}
+
+	return 0;
+}
+
 #define SPMI_PROP_READ(chip_prop, qpnp_spmi_property, retval)		\
 do {									\
+	if (retval)							\
+		break;							\
 	retval = of_property_read_u32(chip->spmi->dev.of_node,		\
 				"qcom," qpnp_spmi_property,		\
 					&chip->chip_prop);		\
 	if (retval) {							\
 		pr_err("Error reading " #qpnp_spmi_property		\
 						" property %d\n", rc);	\
-		return -EINVAL;						\
 	}								\
 } while (0)
 
@@ -3307,7 +3441,7 @@
 
 static inline int bms_read_properties(struct qpnp_bms_chip *chip)
 {
-	int rc;
+	int rc = 0;
 
 	SPMI_PROP_READ(r_sense_uohm, "r-sense-uohm", rc);
 	SPMI_PROP_READ(v_cutoff_uv, "v-cutoff-uv", rc);
@@ -3324,17 +3458,6 @@
 	SPMI_PROP_READ(low_soc_calculate_soc_ms,
 			"low-soc-calculate-soc-ms", rc);
 	SPMI_PROP_READ(calculate_soc_ms, "calculate-soc-ms", rc);
-	chip->use_external_rsense = of_property_read_bool(
-			chip->spmi->dev.of_node,
-			"qcom,use-external-rsense");
-	chip->ignore_shutdown_soc = of_property_read_bool(
-			chip->spmi->dev.of_node,
-			"qcom,ignore-shutdown-soc");
-	chip->use_voltage_soc = of_property_read_bool(chip->spmi->dev.of_node,
-			"qcom,use-voltage-soc");
-	chip->use_ocv_thresholds = of_property_read_bool(
-			chip->spmi->dev.of_node,
-			"qcom,use-ocv-thresholds");
 	SPMI_PROP_READ(high_ocv_correction_limit_uv,
 			"high-ocv-correction-limit-uv", rc);
 	SPMI_PROP_READ(low_ocv_correction_limit_uv,
@@ -3348,6 +3471,18 @@
 	SPMI_PROP_READ(low_voltage_threshold, "low-voltage-threshold", rc);
 	SPMI_PROP_READ(temperature_margin, "tm-temp-margin", rc);
 
+	chip->use_external_rsense = of_property_read_bool(
+			chip->spmi->dev.of_node,
+			"qcom,use-external-rsense");
+	chip->ignore_shutdown_soc = of_property_read_bool(
+			chip->spmi->dev.of_node,
+			"qcom,ignore-shutdown-soc");
+	chip->use_voltage_soc = of_property_read_bool(chip->spmi->dev.of_node,
+			"qcom,use-voltage-soc");
+	chip->use_ocv_thresholds = of_property_read_bool(
+			chip->spmi->dev.of_node,
+			"qcom,use-ocv-thresholds");
+
 	if (chip->adjust_soc_low_threshold >= 45)
 		chip->adjust_soc_low_threshold = 45;
 
@@ -3371,6 +3506,11 @@
 			chip->min_fcc_learning_samples);
 	}
 
+	if (rc) {
+		pr_err("Missing required properties.\n");
+		return rc;
+	}
+
 	pr_debug("dts data: r_sense_uohm:%d, v_cutoff_uv:%d, max_v:%d\n",
 			chip->r_sense_uohm, chip->v_cutoff_uv,
 			chip->max_voltage_uv);
@@ -3578,7 +3718,7 @@
 			chip->software_shdw_cc_uah = 0;
 		}
 
-		rc = qpnp_iadc_get_rsense(&rds_rsense_nohm);
+		rc = qpnp_iadc_get_rsense(chip->iadc_dev, &rds_rsense_nohm);
 		if (rc) {
 			pr_err("Unable to read RDS resistance value from IADC; rc = %d\n",
 								rc);
@@ -3624,7 +3764,7 @@
 	struct qpnp_vadc_result result;
 	int rc;
 
-	rc = qpnp_vadc_read(DIE_TEMP, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result);
 
 	pr_debug("low = %lld, high = %lld\n",
 			result.physical - chip->temperature_margin,
@@ -3635,7 +3775,8 @@
 						- chip->temperature_margin;
 	chip->die_temp_monitor_params.state_request =
 						ADC_TM_HIGH_LOW_THR_ENABLE;
-	return qpnp_adc_tm_channel_measure(&chip->die_temp_monitor_params);
+	return qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+					&chip->die_temp_monitor_params);
 }
 
 static void btm_notify_die_temp(enum qpnp_tm_state state, void *ctx)
@@ -3644,7 +3785,7 @@
 	struct qpnp_vadc_result result;
 	int rc;
 
-	rc = qpnp_vadc_read(DIE_TEMP, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result);
 
 	if (state == ADC_TM_LOW_STATE)
 		pr_debug("low state triggered\n");
@@ -3658,17 +3799,14 @@
 
 static int setup_die_temp_monitoring(struct qpnp_bms_chip *chip)
 {
-	int rc = qpnp_adc_tm_is_ready();
-	if (rc) {
-		pr_info("adc tm is not ready yet: %d, defer probe\n", rc);
-		return -EPROBE_DEFER;
-	}
+	int rc;
+
 	chip->die_temp_monitor_params.channel = DIE_TEMP;
 	chip->die_temp_monitor_params.btm_ctx = (void *)chip;
 	chip->die_temp_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
 	chip->die_temp_monitor_params.threshold_notification =
 						&btm_notify_die_temp;
-	refresh_die_temp_monitor(chip);
+	rc = refresh_die_temp_monitor(chip);
 	if (rc) {
 		pr_err("tm setup failed: %d\n", rc);
 		return rc;
@@ -3690,19 +3828,9 @@
 		return -ENOMEM;
 	}
 
-	rc = qpnp_vadc_is_ready();
-	if (rc) {
-		pr_info("vadc not ready: %d, deferring probe\n", rc);
-		rc = -EPROBE_DEFER;
+	rc = bms_get_adc(chip, spmi);
+	if (rc < 0)
 		goto error_read;
-	}
-
-	rc = qpnp_iadc_is_ready();
-	if (rc) {
-		pr_info("iadc not ready: %d, deferring probe\n", rc);
-		rc = -EPROBE_DEFER;
-		goto error_read;
-	}
 
 	mutex_init(&chip->bms_output_lock);
 	mutex_init(&chip->last_ocv_uv_mutex);
@@ -3791,6 +3919,7 @@
 	INIT_DELAYED_WORK(&chip->calculate_soc_delayed_work,
 			calculate_soc_work);
 	INIT_WORK(&chip->recalc_work, recalculate_work);
+	INIT_WORK(&chip->batfet_open_work, batfet_open_work);
 
 	read_shutdown_soc_and_iavg(chip);
 
@@ -3828,6 +3957,7 @@
 	}
 
 	battery_insertion_check(chip);
+	batfet_status_check(chip);
 	battery_status_check(chip);
 
 	calculate_soc_work(&(chip->calculate_soc_delayed_work.work));
@@ -3852,7 +3982,7 @@
 
 	chip->bms_psy_registered = true;
 	vbatt = 0;
-	rc = get_battery_voltage(&vbatt);
+	rc = get_battery_voltage(chip, &vbatt);
 	if (rc) {
 		pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
 						VBAT_SNS, rc);
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 88e00ba..7f77b75 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -100,6 +100,9 @@
 #define BOOST_ENABLE_CONTROL			0x46
 #define COMP_OVR1				0xEA
 #define BAT_IF_BTC_CTRL				0x49
+#define USB_OCP_THR				0x52
+#define USB_OCP_CLR				0x53
+#define BAT_IF_TEMP_STATUS			0x09
 
 #define REG_OFFSET_PERP_SUBTYPE			0x05
 
@@ -142,6 +145,11 @@
 #define BAT_THM_EN			BIT(1)
 #define BAT_ID_EN			BIT(0)
 #define BOOST_PWR_EN			BIT(7)
+#define OCP_CLR_BIT			BIT(7)
+#define OCP_THR_MASK			0x03
+#define OCP_THR_900_MA			0x02
+#define OCP_THR_500_MA			0x01
+#define OCP_THR_200_MA			0x00
 
 /* Interrupt definitions */
 /* smbb_chg_interrupts */
@@ -267,6 +275,7 @@
 	u16				misc_base;
 	u16				freq_base;
 	struct qpnp_chg_irq		usbin_valid;
+	struct qpnp_chg_irq		usb_ocp;
 	struct qpnp_chg_irq		dcin_valid;
 	struct qpnp_chg_irq		chg_gone;
 	struct qpnp_chg_irq		chg_fastchg;
@@ -275,6 +284,7 @@
 	struct qpnp_chg_irq		chg_vbatdet_lo;
 	struct qpnp_chg_irq		batt_pres;
 	struct qpnp_chg_irq		vchg_loop;
+	struct qpnp_chg_irq		batt_temp_ok;
 	bool				bat_is_cool;
 	bool				bat_is_warm;
 	bool				chg_done;
@@ -305,8 +315,8 @@
 	unsigned int			maxinput_dc_ma;
 	unsigned int			hot_batt_p;
 	unsigned int			cold_batt_p;
-	unsigned int			warm_bat_decidegc;
-	unsigned int			cool_bat_decidegc;
+	int				warm_bat_decidegc;
+	int				cool_bat_decidegc;
 	unsigned int			safe_current;
 	unsigned int			revision;
 	unsigned int			type;
@@ -326,6 +336,8 @@
 	struct wake_lock		eoc_wake_lock;
 	struct qpnp_chg_regulator	otg_vreg;
 	struct qpnp_chg_regulator	boost_vreg;
+	struct qpnp_vadc_chip		*vadc_dev;
+	struct qpnp_adc_tm_chip		*adc_tm_dev;
 };
 
 
@@ -503,6 +515,23 @@
 }
 
 static int
+qpnp_chg_is_batt_temp_ok(struct qpnp_chg_chip *chip)
+{
+	u8 batt_rt_sts;
+	int rc;
+
+	rc = qpnp_chg_read(chip, &batt_rt_sts,
+				 INT_RT_STS(chip->bat_if_base), 1);
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->bat_if_base), rc);
+		return rc;
+	}
+
+	return (batt_rt_sts & BAT_TEMP_OK_IRQ) ? 1 : 0;
+}
+
+static int
 qpnp_chg_is_batt_present(struct qpnp_chg_chip *chip)
 {
 	u8 batt_pres_rt_sts;
@@ -519,6 +548,23 @@
 	return (batt_pres_rt_sts & BATT_PRES_IRQ) ? 1 : 0;
 }
 
+static int
+qpnp_chg_is_batfet_closed(struct qpnp_chg_chip *chip)
+{
+	u8 batfet_closed_rt_sts;
+	int rc;
+
+	rc = qpnp_chg_read(chip, &batfet_closed_rt_sts,
+				 INT_RT_STS(chip->bat_if_base), 1);
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->bat_if_base), rc);
+		return rc;
+	}
+
+	return (batfet_closed_rt_sts & BAT_FET_ON_IRQ) ? 1 : 0;
+}
+
 #define USB_VALID_BIT	BIT(7)
 static int
 qpnp_chg_is_usb_chg_plugged_in(struct qpnp_chg_chip *chip)
@@ -882,7 +928,7 @@
 	struct qpnp_chg_chip *chip = container_of(work,
 				struct qpnp_chg_chip, adc_measure_work);
 
-	if (qpnp_adc_tm_channel_measure(&chip->adc_param))
+	if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
 		pr_err("request ADC error\n");
 }
 
@@ -946,6 +992,32 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t
+qpnp_chg_usb_usb_ocp_irq_handler(int irq, void *_chip)
+{
+	struct qpnp_chg_chip *chip = _chip;
+	int rc;
+
+	pr_debug("usb-ocp triggered\n");
+
+	rc = qpnp_chg_masked_write(chip,
+			chip->usb_chgpth_base + USB_OCP_CLR,
+			OCP_CLR_BIT,
+			OCP_CLR_BIT, 1);
+	if (rc)
+		pr_err("Failed to clear OCP bit rc = %d\n", rc);
+
+	/* force usb ovp fet off */
+	rc = qpnp_chg_masked_write(chip,
+			chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+			USB_OTG_EN_BIT,
+			USB_OTG_EN_BIT, 1);
+	if (rc)
+		pr_err("Failed to turn off usb ovp rc = %d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
 #define ENUM_T_STOP_BIT		BIT(0)
 static irqreturn_t
 qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
@@ -980,6 +1052,19 @@
 }
 
 static irqreturn_t
+qpnp_chg_bat_if_batt_temp_irq_handler(int irq, void *_chip)
+{
+	struct qpnp_chg_chip *chip = _chip;
+	int batt_temp_good;
+
+	batt_temp_good = qpnp_chg_is_batt_temp_ok(chip);
+	pr_debug("batt-temp triggered: %d\n", batt_temp_good);
+
+	power_supply_changed(&chip->batt_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
 qpnp_chg_bat_if_batt_pres_irq_handler(int irq, void *_chip)
 {
 	struct qpnp_chg_chip *chip = _chip;
@@ -1217,6 +1302,7 @@
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_HEALTH,
 	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
@@ -1281,7 +1367,7 @@
 		pr_err("vbat reading not supported for 1.0 rc=%d\n", rc);
 		return 0;
 	} else {
-		rc = qpnp_vadc_read(VBAT_SNS, &results);
+		rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &results);
 		if (rc) {
 			pr_err("Unable to read vbat rc=%d\n", rc);
 			return 0;
@@ -1357,7 +1443,7 @@
 get_prop_batt_status(struct qpnp_chg_chip *chip)
 {
 	int rc;
-	u8 chgr_sts;
+	u8 chgr_sts, bat_if_sts;
 
 	if ((qpnp_chg_is_usb_chg_plugged_in(chip) ||
 		qpnp_chg_is_dc_chg_plugged_in(chip)) && chip->chg_done) {
@@ -1370,9 +1456,15 @@
 		return POWER_SUPPLY_CHARGE_TYPE_NONE;
 	}
 
-	if (chgr_sts & TRKL_CHG_ON_IRQ)
+	rc = qpnp_chg_read(chip, &bat_if_sts, INT_RT_STS(chip->bat_if_base), 1);
+	if (rc) {
+		pr_err("failed to read bat_if sts %d\n", rc);
+		return POWER_SUPPLY_CHARGE_TYPE_NONE;
+	}
+
+	if (chgr_sts & TRKL_CHG_ON_IRQ && bat_if_sts & BAT_FET_ON_IRQ)
 		return POWER_SUPPLY_STATUS_CHARGING;
-	if (chgr_sts & FAST_CHG_ON_IRQ)
+	if (chgr_sts & FAST_CHG_ON_IRQ && bat_if_sts & BAT_FET_ON_IRQ)
 		return POWER_SUPPLY_STATUS_CHARGING;
 
 	return POWER_SUPPLY_STATUS_DISCHARGING;
@@ -1474,7 +1566,7 @@
 	if (chip->use_default_batt_values || !get_prop_batt_present(chip))
 		return DEFAULT_TEMP;
 
-	rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &results);
+	rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &results);
 	if (rc) {
 		pr_debug("Unable to read batt temperature rc=%d\n", rc);
 		return 0;
@@ -1512,6 +1604,11 @@
 	return (buck_sts & VCHG_LOOP_IRQ) ? 1 : 0;
 }
 
+static int get_prop_online(struct qpnp_chg_chip *chip)
+{
+	return qpnp_chg_is_batfet_closed(chip);
+}
+
 static void
 qpnp_batt_external_power_changed(struct power_supply *psy)
 {
@@ -1621,6 +1718,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
 		val->intval = qpnp_chg_vinmin_get(chip) * 1000;
 		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = get_prop_online(chip);
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -1676,8 +1776,7 @@
 		return -EINVAL;
 	}
 
-	temp = (safe_current - QPNP_CHG_IBATSAFE_MIN_MA)
-				/ QPNP_CHG_I_STEP_MA;
+	temp = safe_current / QPNP_CHG_I_STEP_MA;
 	return qpnp_chg_masked_write(chip,
 			chip->chgr_base + CHGR_IBAT_SAFE,
 			QPNP_CHG_I_MASK, temp, 1);
@@ -2289,8 +2388,10 @@
 		qpnp_chg_set_appropriate_vbatdet(chip);
 	}
 
-	if (qpnp_adc_tm_channel_measure(&chip->adc_param))
+	if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
 		pr_err("request ADC error\n");
+
+	power_supply_changed(&chip->batt_psy);
 }
 
 static int
@@ -2494,6 +2595,25 @@
 			}
 
 			enable_irq_wake(chip->batt_pres.irq);
+
+			chip->batt_temp_ok.irq = spmi_get_irq_byname(spmi,
+						spmi_resource, "bat-temp-ok");
+			if (chip->batt_temp_ok.irq < 0) {
+				pr_err("Unable to get bat-temp-ok irq\n");
+				return rc;
+			}
+			rc = devm_request_irq(chip->dev, chip->batt_temp_ok.irq,
+				qpnp_chg_bat_if_batt_temp_irq_handler,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"bat-temp-ok", chip);
+			if (rc < 0) {
+				pr_err("Can't request %d bat-temp-ok irq: %d\n",
+						chip->batt_temp_ok.irq, rc);
+				return rc;
+			}
+
+			enable_irq_wake(chip->batt_temp_ok.irq);
+
 			break;
 		case SMBB_BUCK_SUBTYPE:
 		case SMBBP_BUCK_SUBTYPE:
@@ -2553,6 +2673,27 @@
 				return rc;
 			}
 
+			if ((subtype == SMBBP_USB_CHGPTH_SUBTYPE) ||
+				(subtype == SMBCL_USB_CHGPTH_SUBTYPE)) {
+				chip->usb_ocp.irq = spmi_get_irq_byname(spmi,
+						spmi_resource, "usb-ocp");
+				if (chip->usb_ocp.irq < 0) {
+					pr_err("Unable to get usbin irq\n");
+					return rc;
+				}
+				rc = devm_request_irq(chip->dev,
+					chip->usb_ocp.irq,
+					qpnp_chg_usb_usb_ocp_irq_handler,
+					IRQF_TRIGGER_RISING, "usb-ocp", chip);
+				if (rc < 0) {
+					pr_err("Can't request %d usb-ocp: %d\n",
+							chip->usb_ocp.irq, rc);
+					return rc;
+				}
+
+				enable_irq_wake(chip->usb_ocp.irq);
+			}
+
 			enable_irq_wake(chip->usbin_valid.irq);
 			enable_irq_wake(chip->chg_gone.irq);
 			break;
@@ -2593,7 +2734,7 @@
 			"qcom,battery-data");
 	if (node) {
 		memset(&batt_data, 0, sizeof(struct bms_battery_data));
-		rc = qpnp_vadc_read(LR_MUX2_BAT_ID, &result);
+		rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX2_BAT_ID, &result);
 		if (rc) {
 			pr_err("error reading batt id channel = %d, rc = %d\n",
 						LR_MUX2_BAT_ID, rc);
@@ -2607,8 +2748,11 @@
 			return rc;
 		}
 
-		if (batt_data.max_voltage_uv >= 0)
+		if (batt_data.max_voltage_uv >= 0) {
 			chip->max_voltage_mv = batt_data.max_voltage_uv / 1000;
+			chip->safe_voltage_mv = chip->max_voltage_mv
+				+ MAX_DELTA_VDD_MAX_MV;
+		}
 		if (batt_data.iterm_ua >= 0)
 			chip->term_current = batt_data.iterm_ua / 1000;
 	}
@@ -2804,6 +2948,16 @@
 			0xFF,
 			0x80, 1);
 
+		if ((subtype == SMBBP_USB_CHGPTH_SUBTYPE) ||
+			(subtype == SMBCL_USB_CHGPTH_SUBTYPE)) {
+			rc = qpnp_chg_masked_write(chip,
+				chip->usb_chgpth_base + USB_OCP_THR,
+				OCP_THR_MASK,
+				OCP_THR_900_MA, 1);
+			if (rc)
+				pr_err("Failed to configure OCP rc = %d\n", rc);
+		}
+
 		break;
 	case SMBB_DC_CHGPTH_SUBTYPE:
 		break;
@@ -2921,6 +3075,7 @@
 	if (rc) {
 		/* Select BAT_THM as default BPD scheme */
 		chip->bpd_detection = BPD_TYPE_BAT_THM;
+		rc = 0;
 	} else {
 		chip->bpd_detection = get_bpd(bpd);
 		if (chip->bpd_detection < 0) {
@@ -2931,9 +3086,11 @@
 
 	/* Look up JEITA compliance parameters if cool and warm temp provided */
 	if (chip->cool_bat_decidegc && chip->warm_bat_decidegc) {
-		rc = qpnp_adc_tm_is_ready();
-		if (rc) {
-			pr_err("tm not ready %d\n", rc);
+		chip->adc_tm_dev = qpnp_get_adc_tm(chip->dev, "chg");
+		if (IS_ERR(chip->adc_tm_dev)) {
+			rc = PTR_ERR(chip->adc_tm_dev);
+			if (rc != -EPROBE_DEFER)
+				pr_err("adc-tm not ready, defer probe\n");
 			return rc;
 		}
 
@@ -3053,14 +3210,18 @@
 
 		if (subtype == SMBB_BAT_IF_SUBTYPE ||
 			subtype == SMBBP_BAT_IF_SUBTYPE ||
-			subtype == SMBCL_BAT_IF_SUBTYPE){
-			rc = qpnp_vadc_is_ready();
-			if (rc)
+			subtype == SMBCL_BAT_IF_SUBTYPE) {
+			chip->vadc_dev = qpnp_get_vadc(chip->dev, "chg");
+			if (IS_ERR(chip->vadc_dev)) {
+				rc = PTR_ERR(chip->vadc_dev);
+				if (rc != -EPROBE_DEFER)
+					pr_err("vadc property missing\n");
 				goto fail_chg_enable;
 
 			rc = qpnp_chg_load_battery_data(chip);
 			if (rc)
 				goto fail_chg_enable;
+			}
 		}
 	}
 
@@ -3276,7 +3437,8 @@
 		chip->adc_param.channel = LR_MUX1_BATT_THERM;
 
 		if (get_prop_batt_present(chip)) {
-			rc = qpnp_adc_tm_channel_measure(&chip->adc_param);
+			rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+							&chip->adc_param);
 			if (rc) {
 				pr_err("request ADC error %d\n", rc);
 				goto fail_chg_enable;
@@ -3336,7 +3498,8 @@
 	struct qpnp_chg_chip *chip = dev_get_drvdata(&spmi->dev);
 	if (chip->cool_bat_decidegc && chip->warm_bat_decidegc
 						&& chip->batt_present) {
-		qpnp_adc_tm_disable_chan_meas(&chip->adc_param);
+		qpnp_adc_tm_disable_chan_meas(chip->adc_tm_dev,
+							&chip->adc_param);
 	}
 	cancel_work_sync(&chip->adc_measure_work);
 	cancel_delayed_work_sync(&chip->eoc_work);
@@ -3384,66 +3547,6 @@
 	return rc;
 }
 
-static int
-qpnp_chg_ops_set(const char *val, const struct kernel_param *kp)
-{
-	return -EINVAL;
-}
-
-#define MAX_LEN_VADC	10
-static int
-qpnp_chg_usb_in_get(char *val, const struct kernel_param *kp)
-{
-	int rc;
-	struct qpnp_vadc_result results;
-
-	rc = qpnp_vadc_is_ready();
-	if (rc)
-		return rc;
-
-	rc = qpnp_vadc_read(USBIN, &results);
-	if (rc) {
-		pr_err("Unable to read vchg rc=%d\n", rc);
-		return 0;
-	}
-	rc = snprintf(val, MAX_LEN_VADC, "%lld\n", results.physical);
-
-	return rc;
-}
-
-static int
-qpnp_chg_vchg_get(char *val, const struct kernel_param *kp)
-{
-	int rc;
-	struct qpnp_vadc_result results;
-
-	rc = qpnp_vadc_is_ready();
-	if (rc)
-		return rc;
-
-	rc = qpnp_vadc_read(VCHG_SNS, &results);
-	if (rc) {
-		pr_err("Unable to read vchg rc=%d\n", rc);
-		return 0;
-	}
-	rc = snprintf(val, MAX_LEN_VADC, "%lld\n", results.physical);
-
-	return rc;
-}
-
-static struct kernel_param_ops usb_in_uv_param_ops = {
-	.set = qpnp_chg_ops_set,
-	.get = qpnp_chg_usb_in_get,
-};
-
-static struct kernel_param_ops vchg_uv_param_ops = {
-	.set = qpnp_chg_ops_set,
-	.get = qpnp_chg_vchg_get,
-};
-
-module_param_cb(usb_in_uv, &usb_in_uv_param_ops, NULL, 0644);
-module_param_cb(vchg_uv, &vchg_uv_param_ops, NULL, 0644);
-
 static const struct dev_pm_ops qpnp_chg_pm_ops = {
 	.resume		= qpnp_chg_resume,
 	.suspend	= qpnp_chg_suspend,
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 9a4ea63..e3998a5 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -34,7 +34,7 @@
 
 config SCSI_UFSHCD
 	tristate "Universal Flash Storage Controller Driver Core"
-	depends on SCSI
+	depends on SCSI && SCSI_DMA
 	---help---
 	This selects the support for UFS devices in Linux, say Y and make
 	  sure that you know the name of your UFS host adapter (the card
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 03319ac..5634caf 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -132,12 +132,6 @@
 		goto out_iounmap;
 	}
 
-	err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
-	if (err) {
-		dev_err(&pdev->dev, "set dma mask failed\n");
-		goto out_iounmap;
-	}
-
 	err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
 	if (err) {
 		dev_err(&pdev->dev, "Intialization failed\n");
@@ -184,12 +178,12 @@
 		mem_size = resource_size(mem_res);
 		release_mem_region(mem_res->start, mem_size);
 	}
-	platform_set_drvdata(pdev, NULL);
 	return 0;
 }
 
 static const struct of_device_id ufs_of_match[] = {
 	{ .compatible = "jedec,ufs-1.1"},
+	{},
 };
 
 static const struct dev_pm_ops ufshcd_dev_pm_ops = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2230f14..df02ff1 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1349,7 +1349,7 @@
 		goto fatal_eh;
 
 	if (hba->errors & UIC_ERROR) {
-		reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+		reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
 		if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
 			goto fatal_eh;
 	}
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 863339b..25b4b5e 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -2719,7 +2719,7 @@
 			pdata->use_bam = false;
 		}
 
-		if (pdata->bam_producer_pipe_index) {
+		if (!pdata->bam_producer_pipe_index) {
 			dev_warn(&pdev->dev,
 			"missing qcom,bam-producer-pipe-index entry in device-tree\n");
 			pdata->use_bam = false;
@@ -2919,15 +2919,6 @@
 		goto err_probe_reqmem;
 	}
 
-	if (pdata && pdata->ver_reg_exists) {
-		enum msm_spi_qup_version ver =
-					msm_spi_get_qup_hw_ver(&pdev->dev, dd);
-		if (dd->qup_ver != ver)
-			dev_warn(&pdev->dev,
-			"%s: HW version different then initially assumed by probe",
-			__func__);
-	}
-
 	if (pdata && pdata->rsl_id) {
 		struct remote_mutex_id rmid;
 		rmid.r_spinlock_id = pdata->rsl_id;
@@ -2986,6 +2977,16 @@
 	}
 
 	pclk_enabled = 1;
+
+	if (pdata && pdata->ver_reg_exists) {
+		enum msm_spi_qup_version ver =
+					msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+		if (dd->qup_ver != ver)
+			dev_warn(&pdev->dev,
+			"%s: HW version different then initially assumed by probe",
+			__func__);
+	}
+
 	/* GSBI dose not exists on B-family MSM-chips */
 	if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
 		rc = msm_spi_configure_gsbi(dd, pdev);
@@ -3200,6 +3201,13 @@
 		if (!dd)
 			goto suspend_exit;
 		msm_spi_pm_suspend_runtime(device);
+
+		/*
+		 * set the device's runtime PM status to 'suspended'
+		 */
+		pm_runtime_disable(device);
+		pm_runtime_set_suspended(device);
+		pm_runtime_enable(device);
 	}
 suspend_exit:
 	return 0;
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index eedb1e5..03e9021 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -167,21 +167,20 @@
 		pr_err_ratelimited("%s: decode failed on hwirq %lu\n",
 							__func__, d->hwirq);
 		return rc;
-	} else {
-		if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
-			rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
-						&q_spec, &irq_d->priv_d);
-			if (rc) {
-				pr_err_ratelimited(
-					"%s: decode failed on hwirq %lu\n",
-					__func__, d->hwirq);
-				return rc;
-			}
-
-		}
-		arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
 	}
 
+	if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
+		rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
+					&q_spec, &irq_d->priv_d);
+		if (rc) {
+			pr_err_ratelimited(
+				"%s: decode failed on hwirq %lu rc = %d\n",
+				__func__, d->hwirq, rc);
+			return rc;
+		}
+	}
+	arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
+
 	return 0;
 }
 
@@ -191,6 +190,7 @@
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
 	int rc;
+	uint8_t prev_int_en = per_d->int_en;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
@@ -201,10 +201,16 @@
 		return;
 	}
 
-	qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
-
 	per_d->int_en &= ~irq_d->mask_shift;
 
+	if (prev_int_en && !(per_d->int_en)) {
+		/*
+		 * no interrupt on this peripheral is enabled
+		 * ask the arbiter to ignore this peripheral
+		 */
+		qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+	}
+
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 					(u8 *)&irq_d->mask_shift, 1);
 	if (rc) {
@@ -221,6 +227,7 @@
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
 	int rc;
+	uint8_t prev_int_en = per_d->int_en;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
@@ -231,10 +238,16 @@
 		return;
 	}
 
-	qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
-
 	per_d->int_en &= ~irq_d->mask_shift;
 
+	if (prev_int_en && !(per_d->int_en)) {
+		/*
+		 * no interrupt on this peripheral is enabled
+		 * ask the arbiter to ignore this peripheral
+		 */
+		qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+	}
+
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc) {
@@ -256,6 +269,7 @@
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
 	int rc;
+	uint8_t prev_int_en = per_d->int_en;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
@@ -266,9 +280,15 @@
 		return;
 	}
 
-	qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
-
 	per_d->int_en |= irq_d->mask_shift;
+	if (!prev_int_en && per_d->int_en) {
+		/*
+		 * no interrupt prior to this call was enabled for the
+		 * peripheral. Ask the arbiter to enable interrupts for
+		 * this peripheral
+		 */
+		qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
+	}
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
 					&irq_d->mask_shift, 1);
 	if (rc) {
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 05a4806..f85a576 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -101,6 +101,9 @@
 #define PMIC_ARB_APID_MASK		0xFF
 #define PMIC_ARB_PPID_MASK		0xFFF
 
+/* interrupt enable bit */
+#define SPMI_PIC_ACC_ENABLE_BIT		BIT(0)
+
 /**
  * base - base address of the PMIC Arbiter core registers.
  * intr - base address of the SPMI interrupt control registers
@@ -434,8 +437,10 @@
 
 	spin_lock_irqsave(&pmic_arb->lock, flags);
 	status = readl_relaxed(pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
-	if (!status) {
-		writel_relaxed(0x1, pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
+	if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
+		status = status | SPMI_PIC_ACC_ENABLE_BIT;
+		writel_relaxed(status,
+				pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
 		/* Interrupt needs to be enabled before returning to caller */
 		wmb();
 	}
@@ -467,8 +472,11 @@
 
 	spin_lock_irqsave(&pmic_arb->lock, flags);
 	status = readl_relaxed(pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
-	if (status) {
-		writel_relaxed(0x0, pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
+	if (status & SPMI_PIC_ACC_ENABLE_BIT) {
+		/* clear the enable bit and write */
+		status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
+		writel_relaxed(status,
+				pmic_arb->intr + SPMI_PIC_ACC_ENABLE(apid));
 		/* Interrupt needs to be disabled before returning to caller */
 		wmb();
 	}
@@ -480,7 +488,7 @@
 periph_interrupt(struct spmi_pmic_arb_dev *pmic_arb, u8 apid)
 {
 	u16 ppid = get_peripheral_id(pmic_arb, apid);
-	void __iomem *base = pmic_arb->intr;
+	void __iomem *intr = pmic_arb->intr;
 	u8 sid = (ppid >> 8) & 0x0F;
 	u8 pid = ppid & 0xFF;
 	u32 status;
@@ -491,11 +499,20 @@
 		/* return IRQ_NONE; */
 	}
 
+	status = readl_relaxed(intr + SPMI_PIC_ACC_ENABLE(apid));
+	if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
+		/*
+		 * All interrupts from this peripheral are disabled
+		 * don't bother calling the qpnpint handler
+		 */
+		return IRQ_HANDLED;
+	}
+
 	/* Read the peripheral specific interrupt bits */
-	status = readl_relaxed(base + SPMI_PIC_IRQ_STATUS(apid));
+	status = readl_relaxed(intr + SPMI_PIC_IRQ_STATUS(apid));
 
 	/* Clear the peripheral interrupts */
-	writel_relaxed(status, base + SPMI_PIC_IRQ_CLEAR(apid));
+	writel_relaxed(status, intr + SPMI_PIC_IRQ_CLEAR(apid));
 	/* Interrupt needs to be cleared/acknowledged before exiting ISR */
 	mb();
 
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index fc21fbb..348ed3e 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -363,21 +363,27 @@
 static inline int
 spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
 {
-	BUG_ON(!ctrl || !ctrl->cmd);
+	if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
+		return -EINVAL;
+
 	return ctrl->cmd(ctrl, opcode, sid);
 }
 
 static inline int spmi_read_cmd(struct spmi_controller *ctrl,
 				u8 opcode, u8 sid, u16 addr, u8 bc, u8 *buf)
 {
-	BUG_ON(!ctrl || !ctrl->read_cmd);
+	if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
+		return -EINVAL;
+
 	return ctrl->read_cmd(ctrl, opcode, sid, addr, bc, buf);
 }
 
 static inline int spmi_write_cmd(struct spmi_controller *ctrl,
 				u8 opcode, u8 sid, u16 addr, u8 bc, u8 *buf)
 {
-	BUG_ON(!ctrl || !ctrl->write_cmd);
+	if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
+		return -EINVAL;
+
 	return ctrl->write_cmd(ctrl, opcode, sid, addr, bc, buf);
 }
 
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 974cadc..8c3b291 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -37,7 +37,8 @@
 #define MAX_RAILS 5
 
 static struct msm_thermal_data msm_thermal_info;
-static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
+static uint32_t limited_max_freq = UINT_MAX;
+static uint32_t limited_min_freq;
 static struct delayed_work check_temp_work;
 static bool core_control_enabled;
 static uint32_t cpus_offlined;
@@ -139,6 +140,25 @@
 
 #define PSM_REG_MODE_FROM_ATTRIBS(attr) \
 	(container_of(attr, struct psm_rail, mode_attr));
+
+static int  msm_thermal_cpufreq_callback(struct notifier_block *nfb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_policy *policy = data;
+
+	switch (event) {
+	case CPUFREQ_INCOMPATIBLE:
+		cpufreq_verify_within_limits(policy, limited_min_freq,
+				limited_max_freq);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block msm_thermal_cpufreq_notifier = {
+	.notifier_call = msm_thermal_cpufreq_callback,
+};
+
 /* If freq table exists, then we can send freq request */
 static int check_freq_table(void)
 {
@@ -159,37 +179,26 @@
 {
 	int cpu = 0;
 	int ret = 0;
-	struct cpufreq_policy *policy = NULL;
 
 	if (!freq_table_get) {
 		ret = check_freq_table();
 		if (ret) {
-			pr_err("%s:Fail to get freq table\n", __func__);
+			pr_err("%s:Fail to get freq table\n", KBUILD_MODNAME);
 			return ret;
 		}
 	}
 	/* If min is larger than allowed max */
-	if (min != MSM_CPUFREQ_NO_LIMIT &&
-			min > table[limit_idx_high].frequency)
-		min = table[limit_idx_high].frequency;
+	min = min(min, table[limit_idx_high].frequency);
 
-	for_each_possible_cpu(cpu) {
-		ret = msm_cpufreq_set_freq_limits(cpu, min, limited_max_freq);
-		if (ret) {
-			pr_err("%s:Fail to set limits for cpu%d\n",
-					__func__, cpu);
-			return ret;
-		}
+	limited_min_freq = min;
 
-		if (cpu_online(cpu)) {
-			policy = cpufreq_cpu_get(cpu);
-			if (!policy)
-				continue;
-			cpufreq_driver_target(policy, policy->cur,
-					CPUFREQ_RELATION_L);
-			cpufreq_cpu_put(policy);
-		}
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		if (cpufreq_update_policy(cpu))
+			pr_info("%s: Unable to update policy for cpu:%d\n",
+				KBUILD_MODNAME, cpu);
 	}
+	put_online_cpus();
 
 	return ret;
 }
@@ -584,26 +593,19 @@
 {
 	int ret = 0;
 
-	ret = msm_cpufreq_set_freq_limits(cpu, MSM_CPUFREQ_NO_LIMIT, max_freq);
-	if (ret)
-		return ret;
-
-	limited_max_freq = max_freq;
-	if (max_freq != MSM_CPUFREQ_NO_LIMIT)
+	if (max_freq != UINT_MAX)
 		pr_info("%s: Limiting cpu%d max frequency to %d\n",
 				KBUILD_MODNAME, cpu, max_freq);
 	else
 		pr_info("%s: Max frequency reset for cpu%d\n",
 				KBUILD_MODNAME, cpu);
-
-	if (cpu_online(cpu)) {
-		struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-		if (!policy)
-			return ret;
-		ret = cpufreq_driver_target(policy, policy->cur,
-				CPUFREQ_RELATION_H);
-		cpufreq_cpu_put(policy);
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		if (cpufreq_update_policy(cpu))
+			pr_info("%s: Unable to update policy for cpu:%d\n",
+				KBUILD_MODNAME, cpu);
 	}
+	put_online_cpus();
 
 	return ret;
 }
@@ -764,7 +766,6 @@
 
 static void __ref do_freq_control(long temp)
 {
-	int ret = 0;
 	int cpu = 0;
 	uint32_t max_freq = limited_max_freq;
 
@@ -784,7 +785,7 @@
 		limit_idx += msm_thermal_info.freq_step;
 		if (limit_idx >= limit_idx_high) {
 			limit_idx = limit_idx_high;
-			max_freq = MSM_CPUFREQ_NO_LIMIT;
+			max_freq = UINT_MAX;
 		} else
 			max_freq = table[limit_idx].frequency;
 	}
@@ -792,17 +793,13 @@
 	if (max_freq == limited_max_freq)
 		return;
 
+	limited_max_freq = max_freq;
 	/* Update new limits */
 	for_each_possible_cpu(cpu) {
 		if (!(msm_thermal_info.freq_control_mask & BIT(cpu)))
 			continue;
-		ret = update_cpu_max_freq(cpu, max_freq);
-		if (ret)
-			pr_debug(
-			"%s: Unable to limit cpu%d max freq to %d\n",
-					KBUILD_MODNAME, cpu, max_freq);
+		update_cpu_max_freq(cpu, max_freq);
 	}
-
 }
 
 static void __ref check_temp(struct work_struct *work)
@@ -909,12 +906,17 @@
 	cancel_delayed_work(&check_temp_work);
 	flush_scheduled_work();
 
-	if (limited_max_freq == MSM_CPUFREQ_NO_LIMIT)
+	if (limited_max_freq == UINT_MAX)
 		return;
 
-	for_each_possible_cpu(cpu) {
-		update_cpu_max_freq(cpu, MSM_CPUFREQ_NO_LIMIT);
+	limited_max_freq = UINT_MAX;
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		if (cpufreq_update_policy(cpu))
+			pr_info("%s: Unable to update policy for cpu:%d\n",
+				KBUILD_MODNAME, cpu);
 	}
+	put_online_cpus();
 }
 
 static int __ref set_enabled(const char *val, const struct kernel_param *kp)
@@ -926,7 +928,7 @@
 		disable_msm_thermal();
 	else
 		pr_info("%s: no action for enabled = %d\n",
-				KBUILD_MODNAME, enabled);
+			KBUILD_MODNAME, enabled);
 
 	pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled);
 
@@ -1190,6 +1192,11 @@
 		return -EINVAL;
 
 	enabled = 1;
+	ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
+			CPUFREQ_POLICY_NOTIFIER);
+	if (ret)
+		pr_err("%s: cannot register cpufreq notifier\n",
+			KBUILD_MODNAME);
 	if (num_possible_cpus() > 1)
 		core_control_enabled = 1;
 	INIT_DELAYED_WORK(&check_temp_work, check_temp);
@@ -1505,7 +1512,7 @@
 		key = "qcom,freq-req";
 		rails[i].freq_req = of_property_read_bool(child_node, key);
 		if (rails[i].freq_req)
-			rails[i].min_level = MSM_CPUFREQ_NO_LIMIT;
+			rails[i].min_level = 0;
 		else {
 			key = "qcom,min-level";
 			ret = of_property_read_u32(child_node, key,
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 3b0ef8c..5b78cdd 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -171,6 +171,7 @@
 
 struct qpnp_adc_tm_sensor {
 	struct thermal_zone_device	*tz_dev;
+	struct qpnp_adc_tm_chip		*chip;
 	enum thermal_device_mode	mode;
 	uint32_t			sensor_num;
 	enum qpnp_adc_meas_timer_select	timer_select;
@@ -187,14 +188,18 @@
 	uint32_t			scale_type;
 };
 
-struct qpnp_adc_tm_drv {
+struct qpnp_adc_tm_chip {
 	struct qpnp_adc_drv		*adc;
+	struct list_head		list;
 	bool				adc_tm_initialized;
 	int				max_channels_available;
+	struct qpnp_vadc_chip		*vadc_dev;
+	struct work_struct		trigger_high_thr_work;
+	struct work_struct		trigger_low_thr_work;
 	struct qpnp_adc_tm_sensor	sensor[0];
 };
 
-struct qpnp_adc_tm_drv	*qpnp_adc_tm;
+LIST_HEAD(qpnp_adc_tm_device_list);
 
 struct qpnp_adc_tm_trip_reg_type {
 	uint16_t low_thr_lsb_addr;
@@ -257,67 +262,79 @@
 	[SCALE_RPMIC_THERM] = {qpnp_adc_scale_millidegc_pmic_voltage_thr},
 };
 
-static int32_t qpnp_adc_tm_read_reg(int16_t reg, u8 *data)
+static int32_t qpnp_adc_tm_read_reg(struct qpnp_adc_tm_chip *chip,
+						int16_t reg, u8 *data)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	int rc = 0;
 
-	rc = spmi_ext_register_readl(adc_tm->adc->spmi->ctrl,
-		adc_tm->adc->slave, (adc_tm->adc->offset + reg), data, 1);
+	rc = spmi_ext_register_readl(chip->adc->spmi->ctrl,
+		chip->adc->slave, (chip->adc->offset + reg), data, 1);
 	if (rc < 0)
 		pr_err("adc-tm read reg %d failed with %d\n", reg, rc);
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_write_reg(int16_t reg, u8 data)
+static int32_t qpnp_adc_tm_write_reg(struct qpnp_adc_tm_chip *chip,
+							int16_t reg, u8 data)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	int rc = 0;
 	u8 *buf;
 
 	buf = &data;
 
-	rc = spmi_ext_register_writel(adc_tm->adc->spmi->ctrl,
-		adc_tm->adc->slave, (adc_tm->adc->offset + reg), buf, 1);
+	rc = spmi_ext_register_writel(chip->adc->spmi->ctrl,
+		chip->adc->slave, (chip->adc->offset + reg), buf, 1);
 	if (rc < 0)
 		pr_err("adc-tm write reg %d failed with %d\n", reg, rc);
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_enable(void)
+static int32_t qpnp_adc_tm_enable(struct qpnp_adc_tm_chip *chip)
 {
 	int rc = 0;
 	u8 data = 0;
 
 	data = QPNP_ADC_TM_EN;
-	rc = qpnp_adc_tm_write_reg(QPNP_EN_CTL1, data);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data);
 	if (rc < 0)
 		pr_err("adc-tm enable failed\n");
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_disable(void)
+static int32_t qpnp_adc_tm_disable(struct qpnp_adc_tm_chip *chip)
 {
 	u8 data = 0;
 	int rc = 0;
 
-	rc = qpnp_adc_tm_write_reg(QPNP_EN_CTL1, data);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data);
 	if (rc < 0)
 		pr_err("adc-tm disable failed\n");
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_enable_if_channel_meas(void)
+static int qpnp_adc_tm_is_valid(struct qpnp_adc_tm_chip *chip)
+{
+	struct qpnp_adc_tm_chip *adc_tm_chip = NULL;
+
+	list_for_each_entry(adc_tm_chip, &qpnp_adc_tm_device_list, list)
+		if (chip == adc_tm_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static int32_t qpnp_adc_tm_enable_if_channel_meas(
+					struct qpnp_adc_tm_chip *chip)
 {
 	u8 adc_tm_meas_en = 0;
 	int rc = 0;
 
 	/* Check if a measurement request is still required */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MULTI_MEAS_EN,
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 							&adc_tm_meas_en);
 	if (rc) {
 		pr_err("adc-tm-tm read status high failed with %d\n", rc);
@@ -326,10 +343,11 @@
 
 	/* Enable only if there are pending measurement requests */
 	if (adc_tm_meas_en) {
-		qpnp_adc_tm_enable();
+		qpnp_adc_tm_enable(chip);
 
 		/* Request conversion */
-		rc = qpnp_adc_tm_write_reg(QPNP_CONV_REQ, QPNP_CONV_REQ_SET);
+		rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
+							QPNP_CONV_REQ_SET);
 		if (rc < 0) {
 			pr_err("adc-tm request conversion failed\n");
 			return rc;
@@ -339,13 +357,13 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_req_sts_check(void)
+static int32_t qpnp_adc_tm_req_sts_check(struct qpnp_adc_tm_chip *chip)
 {
 	u8 status1;
 	int rc, count = 0;
 
 	/* The VADC_TM bank needs to be disabled for new conversion request */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS1, &status1);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1);
 	if (rc) {
 		pr_err("adc-tm read status1 failed\n");
 		return rc;
@@ -353,7 +371,7 @@
 
 	/* Disable the bank if a conversion is occuring */
 	while ((status1 & QPNP_STATUS1_REQ_STS) && (count < 5)) {
-		rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS1, &status1);
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1);
 		if (rc < 0)
 			pr_err("adc-tm disable failed\n");
 		/* Wait time is based on the optimum sampling rate
@@ -366,18 +384,19 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_check_revision(uint32_t btm_chan_num)
+static int32_t qpnp_adc_tm_check_revision(struct qpnp_adc_tm_chip *chip,
+							uint32_t btm_chan_num)
 {
 	u8 rev, perph_subtype;
 	int rc = 0;
 
-	rc = qpnp_adc_tm_read_reg(QPNP_REVISION3, &rev);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_REVISION3, &rev);
 	if (rc) {
 		pr_err("adc-tm revision read failed\n");
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_read_reg(QPNP_PERPH_SUBTYPE, &perph_subtype);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_PERPH_SUBTYPE, &perph_subtype);
 	if (rc) {
 		pr_err("adc-tm perph_subtype read failed\n");
 		return rc;
@@ -393,21 +412,23 @@
 
 	return rc;
 }
-static int32_t qpnp_adc_tm_mode_select(u8 mode_ctl)
+static int32_t qpnp_adc_tm_mode_select(struct qpnp_adc_tm_chip *chip,
+								u8 mode_ctl)
 {
 	int rc;
 
 	mode_ctl |= (QPNP_ADC_TRIM_EN | QPNP_AMUX_TRIM_EN);
 
 	/* VADC_BTM current sets mode to recurring measurements */
-	rc = qpnp_adc_tm_write_reg(QPNP_MODE_CTL, mode_ctl);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_MODE_CTL, mode_ctl);
 	if (rc < 0)
 		pr_err("adc-tm write mode selection err\n");
 
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_timer_interval_select(uint32_t btm_chan,
+static int32_t qpnp_adc_tm_timer_interval_select(
+		struct qpnp_adc_tm_chip *chip, uint32_t btm_chan,
 		struct qpnp_vadc_chan_properties *chan_prop)
 {
 	int rc;
@@ -416,7 +437,8 @@
 	/* Configure kernel clients to timer1 */
 	switch (chan_prop->timer_select) {
 	case ADC_MEAS_TIMER_SELECT1:
-		rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL,
+		rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL,
 				chan_prop->meas_interval1);
 		if (rc < 0) {
 			pr_err("timer1 configure failed\n");
@@ -425,7 +447,8 @@
 	break;
 	case ADC_MEAS_TIMER_SELECT2:
 		/* Thermal channels uses timer2, default to 1 second */
-		rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer2 configure read failed\n");
@@ -434,7 +457,8 @@
 		meas_interval_timer2 |=
 			(chan_prop->meas_interval2 <<
 			QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT);
-		rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 			meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer2 configure failed\n");
@@ -442,7 +466,8 @@
 		}
 	break;
 	case ADC_MEAS_TIMER_SELECT3:
-		rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer3 read failed\n");
@@ -450,7 +475,8 @@
 		}
 		chan_prop->meas_interval2 = ADC_MEAS3_INTERVAL_1S;
 		meas_interval_timer2 |= chan_prop->meas_interval2;
-		rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
 			meas_interval_timer2);
 		if (rc < 0) {
 			pr_err("timer3 configure failed\n");
@@ -468,13 +494,13 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_reg_update(uint16_t addr,
-		u8 mask, bool state)
+static int32_t qpnp_adc_tm_reg_update(struct qpnp_adc_tm_chip *chip,
+		uint16_t addr, u8 mask, bool state)
 {
 	u8 reg_value = 0;
 	int rc = 0;
 
-	rc = qpnp_adc_tm_read_reg(addr, &reg_value);
+	rc = qpnp_adc_tm_read_reg(chip, addr, &reg_value);
 	if (rc < 0) {
 		pr_err("read failed for addr:0x%x\n", addr);
 		return rc;
@@ -486,7 +512,7 @@
 
 	pr_debug("state:%d, reg:0x%x with bits:0x%x and mask:0x%x\n",
 					state, addr, reg_value, ~mask);
-	rc = qpnp_adc_tm_write_reg(addr, reg_value);
+	rc = qpnp_adc_tm_write_reg(chip, addr, reg_value);
 	if (rc < 0) {
 		pr_err("write failed for addr:%x\n", addr);
 		return rc;
@@ -495,12 +521,13 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_thr_update(uint32_t btm_chan,
+static int32_t qpnp_adc_tm_thr_update(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan,
 			struct qpnp_vadc_chan_properties *chan_prop)
 {
 	int rc = 0;
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 			adc_tm_data[btm_chan].low_thr_lsb_addr,
 			QPNP_ADC_TM_THR_LSB_MASK(chan_prop->low_thr));
 	if (rc < 0) {
@@ -508,7 +535,7 @@
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 		adc_tm_data[btm_chan].low_thr_msb_addr,
 		QPNP_ADC_TM_THR_MSB_MASK(chan_prop->low_thr));
 	if (rc < 0) {
@@ -516,7 +543,7 @@
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 		adc_tm_data[btm_chan].high_thr_lsb_addr,
 		QPNP_ADC_TM_THR_LSB_MASK(chan_prop->high_thr));
 	if (rc < 0) {
@@ -524,7 +551,7 @@
 		return rc;
 	}
 
-	rc = qpnp_adc_tm_write_reg(
+	rc = qpnp_adc_tm_write_reg(chip,
 		adc_tm_data[btm_chan].high_thr_msb_addr,
 		QPNP_ADC_TM_THR_MSB_MASK(chan_prop->high_thr));
 	if (rc < 0)
@@ -536,17 +563,17 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_channel_configure(uint32_t btm_chan,
+static int32_t qpnp_adc_tm_channel_configure(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan,
 			struct qpnp_vadc_chan_properties *chan_prop,
 			uint32_t amux_channel)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	int rc = 0, i = 0, chan_idx = 0;
 	bool chan_found = false;
 	u8 sensor_mask = 0;
 
-	while (i < adc_tm->max_channels_available) {
-		if (adc_tm->sensor[i].btm_channel_num == btm_chan) {
+	while (i < chip->max_channels_available) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
 			chan_idx = i;
 			chan_found = true;
 			i++;
@@ -560,9 +587,9 @@
 	}
 
 	sensor_mask = 1 << chan_idx;
-	if (!adc_tm->sensor[chan_idx].thermal_node) {
+	if (!chip->sensor[chan_idx].thermal_node) {
 		/* Update low and high notification thresholds */
-		rc = qpnp_adc_tm_thr_update(btm_chan,
+		rc = qpnp_adc_tm_thr_update(chip, btm_chan,
 				chan_prop);
 		if (rc < 0) {
 			pr_err("setting chan:%d threshold failed\n", btm_chan);
@@ -576,7 +603,7 @@
 			pr_debug("low sensor mask:%x with state:%d\n",
 					sensor_mask, chan_prop->state_request);
 			/* Enable low threshold's interrupt */
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_LOW_THR_INT_EN, sensor_mask, true);
 			if (rc < 0) {
 				pr_err("low thr enable err:%d\n", btm_chan);
@@ -590,7 +617,7 @@
 					ADC_TM_HIGH_LOW_THR_ENABLE)) {
 			/* Enable high threshold's interrupt */
 			pr_debug("high sensor mask:%x\n", sensor_mask);
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_HIGH_THR_INT_EN, sensor_mask, true);
 			if (rc < 0) {
 				pr_err("high thr enable err:%d\n", btm_chan);
@@ -600,7 +627,7 @@
 	}
 
 	/* Enable corresponding BTM channel measurement */
-	rc = qpnp_adc_tm_reg_update(
+	rc = qpnp_adc_tm_reg_update(chip,
 		QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, true);
 	if (rc < 0) {
 		pr_err("multi measurement en failed\n");
@@ -610,7 +637,7 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_configure(
+static int32_t qpnp_adc_tm_configure(struct qpnp_adc_tm_chip *chip,
 			struct qpnp_adc_amux_properties *chan_prop)
 {
 	u8 decimation = 0, op_cntrl = 0;
@@ -618,19 +645,19 @@
 	uint32_t btm_chan = 0;
 
 	/* Disable bank */
-	rc = qpnp_adc_tm_disable();
+	rc = qpnp_adc_tm_disable(chip);
 	if (rc)
 		return rc;
 
 	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check();
+	rc = qpnp_adc_tm_req_sts_check(chip);
 	if (rc < 0) {
 		pr_err("adc-tm req_sts check failed\n");
 		return rc;
 	}
 
 	/* Set measurement in recurring mode */
-	rc = qpnp_adc_tm_mode_select(chan_prop->mode_sel);
+	rc = qpnp_adc_tm_mode_select(chip, chan_prop->mode_sel);
 	if (rc < 0) {
 		pr_err("adc-tm mode select failed\n");
 		return rc;
@@ -638,7 +665,7 @@
 
 	/* Configure AMUX channel select for the corresponding BTM channel*/
 	btm_chan = chan_prop->chan_prop->tm_channel_select;
-	rc = qpnp_adc_tm_write_reg(btm_chan, chan_prop->amux_channel);
+	rc = qpnp_adc_tm_write_reg(chip, btm_chan, chan_prop->amux_channel);
 	if (rc < 0) {
 		pr_err("adc-tm channel selection err\n");
 		return rc;
@@ -647,14 +674,14 @@
 	/* Digital paramater setup */
 	decimation |= chan_prop->decimation <<
 				QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT;
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_DIG_PARAM, decimation);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_DIG_PARAM, decimation);
 	if (rc < 0) {
 		pr_err("adc-tm digital parameter setup err\n");
 		return rc;
 	}
 
 	/* Hardware setting time */
-	rc = qpnp_adc_tm_write_reg(QPNP_HW_SETTLE_DELAY,
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_HW_SETTLE_DELAY,
 					chan_prop->hw_settle_time);
 	if (rc < 0) {
 		pr_err("adc-tm hw settling time setup err\n");
@@ -662,7 +689,7 @@
 	}
 
 	/* Fast averaging setup */
-	rc = qpnp_adc_tm_write_reg(QPNP_FAST_AVG_CTL,
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_CTL,
 					chan_prop->fast_avg_setup);
 	if (rc < 0) {
 		pr_err("adc-tm fast-avg setup err\n");
@@ -670,7 +697,7 @@
 	}
 
 	/* Measurement interval setup */
-	rc = qpnp_adc_tm_timer_interval_select(btm_chan,
+	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
 						chan_prop->chan_prop);
 	if (rc < 0) {
 		pr_err("adc-tm timer select failed\n");
@@ -678,17 +705,18 @@
 	}
 
 	/* Channel configuration setup */
-	rc = qpnp_adc_tm_channel_configure(btm_chan, chan_prop->chan_prop,
-					chan_prop->amux_channel);
+	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
+			chan_prop->chan_prop, chan_prop->amux_channel);
 	if (rc < 0) {
 		pr_err("adc-tm channel configure failed\n");
 		return rc;
 	}
 
 	/* Recurring interval measurement enable */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_MEAS_INTERVAL_OP_CTL, &op_cntrl);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+								&op_cntrl);
 	op_cntrl |= QPNP_ADC_MEAS_INTERVAL_OP;
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
 			op_cntrl, true);
 	if (rc < 0) {
 		pr_err("adc-tm meas interval op configure failed\n");
@@ -696,12 +724,12 @@
 	}
 
 	/* Enable bank */
-	rc = qpnp_adc_tm_enable();
+	rc = qpnp_adc_tm_enable(chip);
 	if (rc)
 		return rc;
 
 	/* Request conversion */
-	rc = qpnp_adc_tm_write_reg(QPNP_CONV_REQ, QPNP_CONV_REQ_SET);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET);
 	if (rc < 0) {
 		pr_err("adc-tm request conversion failed\n");
 		return rc;
@@ -713,13 +741,13 @@
 static int qpnp_adc_tm_get_mode(struct thermal_zone_device *thermal,
 			      enum thermal_device_mode *mode)
 {
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
 
-	if (!adc_tm_sensor || qpnp_adc_tm_check_revision(
-			adc_tm_sensor->btm_channel_num) || !mode)
+	if ((IS_ERR(adc_tm)) || qpnp_adc_tm_check_revision(
+			adc_tm->chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
-	*mode = adc_tm_sensor->mode;
+	*mode = adc_tm->mode;
 
 	return 0;
 }
@@ -728,35 +756,38 @@
 			      enum thermal_device_mode mode)
 {
 	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
-	struct qpnp_adc_tm_drv *adc_drv = qpnp_adc_tm;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	int rc = 0, channel;
 	u8 sensor_mask = 0;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(adc_tm->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
 	if (mode == THERMAL_DEVICE_ENABLED) {
-		adc_drv->adc->amux_prop->amux_channel =
+		chip->adc->amux_prop->amux_channel =
 					adc_tm->vadc_channel_num;
 		channel = adc_tm->sensor_num;
-		adc_drv->adc->amux_prop->decimation =
-			adc_drv->adc->adc_channels[channel].adc_decimation;
-		adc_drv->adc->amux_prop->hw_settle_time =
-			adc_drv->adc->adc_channels[channel].hw_settle_time;
-		adc_drv->adc->amux_prop->fast_avg_setup =
-			adc_drv->adc->adc_channels[channel].fast_avg_setup;
-		adc_drv->adc->amux_prop->mode_sel =
+		chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[channel].adc_decimation;
+		chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[channel].hw_settle_time;
+		chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[channel].fast_avg_setup;
+		chip->adc->amux_prop->mode_sel =
 			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-		adc_drv->adc->amux_prop->chan_prop->timer_select =
+		chip->adc->amux_prop->chan_prop->timer_select =
 					ADC_MEAS_TIMER_SELECT1;
-		adc_drv->adc->amux_prop->chan_prop->meas_interval1 =
+		chip->adc->amux_prop->chan_prop->meas_interval1 =
 						ADC_MEAS1_INTERVAL_1S;
-		adc_drv->adc->amux_prop->chan_prop->low_thr = adc_tm->low_thr;
-		adc_drv->adc->amux_prop->chan_prop->high_thr = adc_tm->high_thr;
-		adc_drv->adc->amux_prop->chan_prop->tm_channel_select =
+		chip->adc->amux_prop->chan_prop->low_thr = adc_tm->low_thr;
+		chip->adc->amux_prop->chan_prop->high_thr = adc_tm->high_thr;
+		chip->adc->amux_prop->chan_prop->tm_channel_select =
 			adc_tm->btm_channel_num;
 
-		rc = qpnp_adc_tm_configure(adc_drv->adc->amux_prop);
+		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
 		if (rc) {
 			pr_err("adc-tm tm configure failed with %d\n", rc);
 			return -EINVAL;
@@ -764,27 +795,27 @@
 	} else if (mode == THERMAL_DEVICE_DISABLED) {
 		sensor_mask = 1 << adc_tm->sensor_num;
 		/* Disable bank */
-		rc = qpnp_adc_tm_disable();
+		rc = qpnp_adc_tm_disable(chip);
 		if (rc < 0) {
 			pr_err("adc-tm disable failed\n");
 			return rc;
 		}
 
 		/* Check if a conversion is in progress */
-		rc = qpnp_adc_tm_req_sts_check();
+		rc = qpnp_adc_tm_req_sts_check(chip);
 		if (rc < 0) {
 			pr_err("adc-tm req_sts check failed\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, false);
 		if (rc < 0) {
 			pr_err("multi measurement update failed\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_enable_if_channel_meas();
+		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
 		if (rc < 0) {
 			pr_err("re-enabling measurement failed\n");
 			return rc;
@@ -800,9 +831,12 @@
 				   int trip, enum thermal_trip_type *type)
 {
 	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(adc_tm->btm_channel_num)
-						|| !type || type < 0)
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
 	switch (trip) {
@@ -823,15 +857,17 @@
 				   int trip, unsigned long *temp)
 {
 	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
 	int64_t result = 0;
 	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
 	unsigned int reg, rc = 0, btm_channel_num;
 	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
 	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(
-					adc_tm_sensor->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm_sensor->btm_channel_num))
 		return -EINVAL;
 
 	btm_channel_num = adc_tm_sensor->btm_channel_num;
@@ -842,13 +878,15 @@
 
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
-		rc = qpnp_adc_tm_read_reg(reg_low_thr_lsb, &trip_warm_thr0);
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_lsb,
+							&trip_warm_thr0);
 		if (rc) {
 			pr_err("adc-tm low_thr_lsb err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_read_reg(reg_low_thr_msb, &trip_warm_thr1);
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_msb,
+							&trip_warm_thr1);
 		if (rc) {
 			pr_err("adc-tm low_thr_msb err\n");
 			return rc;
@@ -856,13 +894,15 @@
 	reg = (trip_warm_thr1 << 8) | trip_warm_thr0;
 	break;
 	case ADC_TM_TRIP_LOW_COOL:
-		rc = qpnp_adc_tm_read_reg(reg_high_thr_lsb, &trip_cool_thr0);
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_lsb,
+							&trip_cool_thr0);
 		if (rc) {
 			pr_err("adc-tm_tm high_thr_lsb err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_read_reg(reg_high_thr_msb, &trip_cool_thr1);
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_msb,
+							&trip_cool_thr1);
 		if (rc) {
 			pr_err("adc-tm_tm high_thr_lsb err\n");
 			return rc;
@@ -873,7 +913,8 @@
 		return -EINVAL;
 	}
 
-	rc = qpnp_adc_tm_scale_voltage_therm_pu2(reg, &result);
+	rc = qpnp_adc_tm_scale_voltage_therm_pu2(chip->vadc_dev, reg,
+								&result);
 	if (rc < 0) {
 		pr_err("Failed to lookup the therm thresholds\n");
 		return rc;
@@ -887,19 +928,21 @@
 static int qpnp_adc_tm_set_trip_temp(struct thermal_zone_device *thermal,
 				   int trip, long temp)
 {
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	struct qpnp_adc_tm_config tm_config;
 	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
 	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
 	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
 	int rc = 0, btm_channel_num;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(
-				adc_tm_sensor->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
-	tm_config.channel = adc_tm_sensor->vadc_channel_num;
+	tm_config.channel = adc_tm->vadc_channel_num;
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
 		tm_config.high_thr_temp = temp;
@@ -913,7 +956,7 @@
 
 	pr_debug("requested a high - %d and low - %d with trip - %d\n",
 			tm_config.high_thr_temp, tm_config.low_thr_temp, trip);
-	rc = qpnp_adc_tm_scale_therm_voltage_pu2(&tm_config);
+	rc = qpnp_adc_tm_scale_therm_voltage_pu2(chip->vadc_dev, &tm_config);
 	if (rc < 0) {
 		pr_err("Failed to lookup the adc-tm thresholds\n");
 		return rc;
@@ -924,7 +967,7 @@
 	trip_cool_thr0 = ((tm_config.high_thr_voltage << 24) >> 24);
 	trip_cool_thr1 = ((tm_config.high_thr_voltage << 16) >> 24);
 
-	btm_channel_num = adc_tm_sensor->btm_channel_num;
+	btm_channel_num = adc_tm->btm_channel_num;
 	reg_low_thr_lsb = adc_tm_data[btm_channel_num].low_thr_lsb_addr;
 	reg_low_thr_msb = adc_tm_data[btm_channel_num].low_thr_msb_addr;
 	reg_high_thr_lsb = adc_tm_data[btm_channel_num].high_thr_lsb_addr;
@@ -932,32 +975,36 @@
 
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
-		rc = qpnp_adc_tm_write_reg(reg_low_thr_lsb, trip_cool_thr0);
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_lsb,
+							trip_cool_thr0);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_write_reg(reg_low_thr_msb, trip_cool_thr1);
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_msb,
+							trip_cool_thr1);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm_sensor->low_thr = tm_config.high_thr_voltage;
+	adc_tm->low_thr = tm_config.high_thr_voltage;
 	break;
 	case ADC_TM_TRIP_LOW_COOL:
-		rc = qpnp_adc_tm_write_reg(reg_high_thr_lsb, trip_warm_thr0);
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_lsb,
+							trip_warm_thr0);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
 
-		rc = qpnp_adc_tm_write_reg(reg_high_thr_msb, trip_warm_thr1);
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_msb,
+							trip_warm_thr1);
 		if (rc) {
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm_sensor->high_thr = tm_config.low_thr_voltage;
+	adc_tm->high_thr = tm_config.low_thr_voltage;
 	break;
 	default:
 		return -EINVAL;
@@ -1032,11 +1079,15 @@
 			int trip, enum thermal_trip_activation_mode mode)
 {
 	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	int rc = 0, sensor_mask = 0;
 	u8 thr_int_en = 0;
 	bool state = false;
 
-	if (!adc_tm || qpnp_adc_tm_check_revision(adc_tm->btm_channel_num))
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
 	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
@@ -1051,7 +1102,7 @@
 		/* low_thr (lower voltage) for higher temp */
 		thr_int_en = adc_tm_data[adc_tm->btm_channel_num].
 							low_thr_int_chan_en;
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_LOW_THR_INT_EN,
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
 				sensor_mask, state);
 		if (rc)
 			pr_err("channel:%x failed\n", adc_tm->btm_channel_num);
@@ -1060,7 +1111,7 @@
 		/* high_thr (higher voltage) for cooler temp */
 		thr_int_en = adc_tm_data[adc_tm->btm_channel_num].
 							high_thr_int_chan_en;
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_HIGH_THR_INT_EN,
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
 				sensor_mask, state);
 		if (rc)
 			pr_err("channel:%x failed\n", adc_tm->btm_channel_num);
@@ -1072,32 +1123,31 @@
 	return rc;
 }
 
-static int qpnp_adc_tm_read_status(void)
+static int qpnp_adc_tm_read_status(struct qpnp_adc_tm_chip *chip)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	u8 status_low = 0, status_high = 0, qpnp_adc_tm_meas_en = 0;
 	u8 adc_tm_low_enable = 0, adc_tm_high_enable = 0;
 	u8 sensor_mask = 0;
 	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0, btm_chan_num;
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
+	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
 
-	mutex_lock(&adc_tm->adc->adc_lock);
+	mutex_lock(&chip->adc->adc_lock);
 
-	rc = qpnp_adc_tm_req_sts_check();
+	rc = qpnp_adc_tm_req_sts_check(chip);
 	if (rc) {
 		pr_err("adc-tm-tm req sts check failed with %d\n", rc);
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS_LOW, &status_low);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW, &status_low);
 	if (rc) {
 		pr_err("adc-tm-tm read status low failed with %d\n", rc);
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_STATUS_HIGH, &status_high);
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH, &status_high);
 	if (rc) {
 		pr_err("adc-tm-tm read status high failed with %d\n", rc);
 		goto fail;
@@ -1105,7 +1155,7 @@
 
 	/* Check which interrupt threshold is lower and measure against the
 	 * enabled channel */
-	rc = qpnp_adc_tm_read_reg(QPNP_ADC_TM_MULTI_MEAS_EN,
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 							&qpnp_adc_tm_meas_en);
 	if (rc) {
 		pr_err("adc-tm-tm read status high failed with %d\n", rc);
@@ -1117,37 +1167,37 @@
 
 	if (adc_tm_high_enable) {
 		sensor_notify_num = adc_tm_high_enable;
-		while (i < adc_tm->max_channels_available) {
+		while (i < chip->max_channels_available) {
 			if ((sensor_notify_num & 0x1) == 1)
 				sensor_num = i;
 			sensor_notify_num >>= 1;
 			i++;
 		}
 
-		btm_chan_num = adc_tm->sensor[sensor_num].btm_channel_num;
+		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
 		pr_debug("high:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 			sensor_num, adc_tm_high_enable, adc_tm_low_enable,
 			qpnp_adc_tm_meas_en);
-		if (!adc_tm->sensor[sensor_num].thermal_node) {
+		if (!chip->sensor[sensor_num].thermal_node) {
 			/* For non thermal registered clients
 				such as usb_id, vbatt, pmic_therm */
 			sensor_mask = 1 << sensor_num;
 			pr_debug("non thermal node - mask:%x\n", sensor_mask);
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_HIGH_THR_INT_EN,
 				sensor_mask, false);
 			if (rc < 0) {
 				pr_err("high threshold int read failed\n");
 				goto fail;
 			}
-			adc_tm->sensor[sensor_num].high_thr_notify = true;
+			chip->sensor[sensor_num].high_thr_notify = true;
 		} else {
 			/* Uses the thermal sysfs registered device to disable
 				the corresponding high voltage threshold which
 				 is triggered by low temp */
 			pr_debug("thermal node with mask:%x\n", sensor_mask);
 			rc = qpnp_adc_tm_activate_trip_type(
-				adc_tm->sensor[sensor_num].tz_dev,
+				chip->sensor[sensor_num].tz_dev,
 				ADC_TM_TRIP_LOW_COOL,
 				THERMAL_TRIP_ACTIVATION_DISABLED);
 			if (rc < 0) {
@@ -1160,37 +1210,37 @@
 	if (adc_tm_low_enable) {
 		sensor_notify_num = adc_tm_low_enable;
 		i = 0;
-		while (i < adc_tm->max_channels_available) {
+		while (i < chip->max_channels_available) {
 			if ((sensor_notify_num & 0x1) == 1)
 				sensor_num = i;
 			sensor_notify_num >>= 1;
 			i++;
 		}
 
-		btm_chan_num = adc_tm->sensor[sensor_num].btm_channel_num;
+		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
 		pr_debug("low:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 			sensor_num, adc_tm_high_enable, adc_tm_low_enable,
 			qpnp_adc_tm_meas_en);
-		if (!adc_tm->sensor[sensor_num].thermal_node) {
+		if (!chip->sensor[sensor_num].thermal_node) {
 			/* For non thermal registered clients
 				such as usb_id, vbatt, pmic_therm */
 			pr_debug("non thermal node - mask:%x\n", sensor_mask);
 			sensor_mask = 1 << sensor_num;
-			rc = qpnp_adc_tm_reg_update(
+			rc = qpnp_adc_tm_reg_update(chip,
 				QPNP_ADC_TM_LOW_THR_INT_EN,
 				sensor_mask, false);
 			if (rc < 0) {
 				pr_err("low threshold int read failed\n");
 				goto fail;
 			}
-			adc_tm->sensor[sensor_num].low_thr_notify = true;
+			chip->sensor[sensor_num].low_thr_notify = true;
 		} else {
 			/* Uses the thermal sysfs registered device to disable
 				the corresponding low voltage threshold which
 				 is triggered by high temp */
 			pr_debug("thermal node with mask:%x\n", sensor_mask);
 			rc = qpnp_adc_tm_activate_trip_type(
-				adc_tm->sensor[sensor_num].tz_dev,
+				chip->sensor[sensor_num].tz_dev,
 				ADC_TM_TRIP_HIGH_WARM,
 				THERMAL_TRIP_ACTIVATION_DISABLED);
 			if (rc < 0) {
@@ -1201,14 +1251,14 @@
 	}
 
 	if (adc_tm_high_enable || adc_tm_low_enable) {
-		rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_MULTI_MEAS_EN,
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 			sensor_mask, false);
 		if (rc < 0) {
 			pr_err("multi meas disable for channel failed\n");
 			goto fail;
 		}
 
-		rc = qpnp_adc_tm_enable_if_channel_meas();
+		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
 		if (rc < 0) {
 			pr_err("re-enabling measurement failed\n");
 			return rc;
@@ -1218,62 +1268,67 @@
 								sensor_mask);
 
 fail:
-	mutex_unlock(&adc_tm->adc->adc_lock);
+	mutex_unlock(&chip->adc->adc_lock);
 
 	if (adc_tm_high_enable || adc_tm_low_enable)
-		schedule_work(&adc_tm->sensor[sensor_num].work);
+		schedule_work(&chip->sensor[sensor_num].work);
 
 	return rc;
 }
 
 static void qpnp_adc_tm_high_thr_work(struct work_struct *work)
 {
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_high_thr_work);
 	int rc;
 
-	rc = qpnp_adc_tm_read_status();
+	rc = qpnp_adc_tm_read_status(chip);
 	if (rc < 0)
 		pr_err("adc-tm high thr work failed\n");
 
 	return;
 }
-DECLARE_WORK(trigger_completion_adc_tm_high_thr_work,
-					qpnp_adc_tm_high_thr_work);
 
 static irqreturn_t qpnp_adc_tm_high_thr_isr(int irq, void *data)
 {
-	qpnp_adc_tm_disable();
+	struct qpnp_adc_tm_chip *chip = data;
 
-	schedule_work(&trigger_completion_adc_tm_high_thr_work);
+	qpnp_adc_tm_disable(chip);
+
+	schedule_work(&chip->trigger_high_thr_work);
 
 	return IRQ_HANDLED;
 }
 
 static void qpnp_adc_tm_low_thr_work(struct work_struct *work)
 {
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_low_thr_work);
 	int rc;
 
-	rc = qpnp_adc_tm_read_status();
+	rc = qpnp_adc_tm_read_status(chip);
 	if (rc < 0)
 		pr_err("adc-tm low thr work failed\n");
 
 	return;
 }
-DECLARE_WORK(trigger_completion_adc_tm_low_thr_work, qpnp_adc_tm_low_thr_work);
 
 static irqreturn_t qpnp_adc_tm_low_thr_isr(int irq, void *data)
 {
-	qpnp_adc_tm_disable();
+	struct qpnp_adc_tm_chip *chip = data;
 
-	schedule_work(&trigger_completion_adc_tm_low_thr_work);
+	qpnp_adc_tm_disable(chip);
+
+	schedule_work(&chip->trigger_low_thr_work);
 
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t qpnp_adc_tm_isr(int irq, void *dev_id)
+static irqreturn_t qpnp_adc_tm_isr(int irq, void *data)
 {
-	struct qpnp_adc_tm_drv *adc_tm = dev_id;
+	struct qpnp_adc_tm_chip *chip = data;
 
-	complete(&adc_tm->adc->adc_rslt_completion);
+	complete(&chip->adc->adc_rslt_completion);
 
 	return IRQ_HANDLED;
 }
@@ -1282,10 +1337,12 @@
 			     unsigned long *temp)
 {
 	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
 	struct qpnp_vadc_result result;
 	int rc = 0;
 
-	rc = qpnp_vadc_read(adc_tm_sensor->vadc_channel_num, &result);
+	rc = qpnp_vadc_read(chip->vadc_dev,
+				adc_tm_sensor->vadc_channel_num, &result);
 	if (rc)
 		return rc;
 
@@ -1304,14 +1361,14 @@
 	.set_trip_temp = qpnp_adc_tm_set_trip_temp,
 };
 
-int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_btm_param *param)
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	uint32_t channel, dt_index = 0, scale_type = 0;
 	int rc = 0, i = 0;
 	bool chan_found = false;
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
+	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
 
 	if (param->threshold_notification == NULL) {
@@ -1319,11 +1376,11 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&adc_tm->adc->adc_lock);
+	mutex_lock(&chip->adc->adc_lock);
 
 	channel = param->channel;
-	while (i < adc_tm->max_channels_available) {
-		if (adc_tm->adc->adc_channels[i].channel_num ==
+	while (i < chip->max_channels_available) {
+		if (chip->adc->adc_channels[i].channel_num ==
 							channel) {
 			dt_index = i;
 			chan_found = true;
@@ -1338,12 +1395,12 @@
 		goto fail_unlock;
 	}
 
-	rc = qpnp_adc_tm_check_revision(
-			adc_tm->sensor[dt_index].btm_channel_num);
+	rc = qpnp_adc_tm_check_revision(chip,
+			chip->sensor[dt_index].btm_channel_num);
 	if (rc < 0)
 		goto fail_unlock;
 
-	scale_type = adc_tm->adc->adc_channels[dt_index].adc_scale_fn;
+	scale_type = chip->adc->adc_channels[dt_index].adc_scale_fn;
 	if (scale_type >= SCALE_RSCALE_NONE) {
 		rc = -EBADF;
 		goto fail_unlock;
@@ -1351,155 +1408,158 @@
 
 	pr_debug("channel:%d, scale_type:%d, dt_idx:%d",
 					channel, scale_type, dt_index);
-	adc_tm->adc->amux_prop->amux_channel = channel;
-	adc_tm->adc->amux_prop->decimation =
-			adc_tm->adc->adc_channels[dt_index].adc_decimation;
-	adc_tm->adc->amux_prop->hw_settle_time =
-			adc_tm->adc->adc_channels[dt_index].hw_settle_time;
-	adc_tm->adc->amux_prop->fast_avg_setup =
-			adc_tm->adc->adc_channels[dt_index].fast_avg_setup;
-	adc_tm->adc->amux_prop->mode_sel =
+	chip->adc->amux_prop->amux_channel = channel;
+	chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[dt_index].adc_decimation;
+	chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[dt_index].hw_settle_time;
+	chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[dt_index].fast_avg_setup;
+	chip->adc->amux_prop->mode_sel =
 		ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-	adc_tm->adc->amux_prop->chan_prop->meas_interval1 =
+	chip->adc->amux_prop->chan_prop->meas_interval1 =
 						ADC_MEAS1_INTERVAL_1S;
-	adc_tm_rscale_fn[scale_type].chan(param,
-			&adc_tm->adc->amux_prop->chan_prop->low_thr,
-			&adc_tm->adc->amux_prop->chan_prop->high_thr);
-	adc_tm->adc->amux_prop->chan_prop->tm_channel_select =
-				adc_tm->sensor[dt_index].btm_channel_num;
-	adc_tm->adc->amux_prop->chan_prop->timer_select =
+	adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev, param,
+			&chip->adc->amux_prop->chan_prop->low_thr,
+			&chip->adc->amux_prop->chan_prop->high_thr);
+	chip->adc->amux_prop->chan_prop->tm_channel_select =
+				chip->sensor[dt_index].btm_channel_num;
+	chip->adc->amux_prop->chan_prop->timer_select =
 					ADC_MEAS_TIMER_SELECT1;
-	adc_tm->adc->amux_prop->chan_prop->state_request =
+	chip->adc->amux_prop->chan_prop->state_request =
 					param->state_request;
-	rc = qpnp_adc_tm_configure(adc_tm->adc->amux_prop);
+	rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
 	if (rc) {
 		pr_err("adc-tm configure failed with %d\n", rc);
 		goto fail_unlock;
 	}
 
-	adc_tm->sensor[dt_index].btm_param = param;
-	adc_tm->sensor[dt_index].scale_type = scale_type;
+	chip->sensor[dt_index].btm_param = param;
+	chip->sensor[dt_index].scale_type = scale_type;
 
 fail_unlock:
-	mutex_unlock(&adc_tm->adc->adc_lock);
+	mutex_unlock(&chip->adc->adc_lock);
 
 	return rc;
 }
 EXPORT_SYMBOL(qpnp_adc_tm_channel_measure);
 
-int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_btm_param *param)
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
 	uint32_t channel, dt_index = 0, btm_chan_num;
 	u8 sensor_mask = 0;
 	int rc = 0;
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
+	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
 
-	mutex_lock(&adc_tm->adc->adc_lock);
+	mutex_lock(&chip->adc->adc_lock);
 
 	/* Disable bank */
-	rc = qpnp_adc_tm_disable();
+	rc = qpnp_adc_tm_disable(chip);
 	if (rc < 0) {
 		pr_err("adc-tm disable failed\n");
 		goto fail;
 	}
 
 	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check();
+	rc = qpnp_adc_tm_req_sts_check(chip);
 	if (rc < 0) {
 		pr_err("adc-tm req_sts check failed\n");
 		goto fail;
 	}
 
 	channel = param->channel;
-	while ((adc_tm->adc->adc_channels[dt_index].channel_num
-		!= channel) && (dt_index < adc_tm->max_channels_available))
+	while ((chip->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < chip->max_channels_available))
 		dt_index++;
 
-	if (dt_index >= adc_tm->max_channels_available) {
+	if (dt_index >= chip->max_channels_available) {
 		pr_err("not a valid ADC_TMN channel\n");
 		rc = -EINVAL;
 		goto fail;
 	}
 
-	btm_chan_num = adc_tm->sensor[dt_index].btm_channel_num;
-	sensor_mask = 1 << adc_tm->sensor[dt_index].sensor_num;
+	btm_chan_num = chip->sensor[dt_index].btm_channel_num;
+	sensor_mask = 1 << chip->sensor[dt_index].sensor_num;
 
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_LOW_THR_INT_EN,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
 		sensor_mask, false);
 	if (rc < 0) {
 		pr_err("low threshold int write failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_HIGH_THR_INT_EN,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
 		sensor_mask, false);
 	if (rc < 0) {
 		pr_err("high threshold int enable failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_reg_update(QPNP_ADC_TM_MULTI_MEAS_EN,
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
 		sensor_mask, false);
 	if (rc < 0) {
 		pr_err("multi measurement en failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_enable_if_channel_meas();
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
 	if (rc < 0)
 		pr_err("re-enabling measurement failed\n");
 
 fail:
-	mutex_unlock(&adc_tm->adc->adc_lock);
+	mutex_unlock(&chip->adc->adc_lock);
 
 	return rc;
 }
 EXPORT_SYMBOL(qpnp_adc_tm_disable_chan_meas);
 
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_btm_param *param)
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+				struct qpnp_adc_tm_btm_param *param)
 {
 	param->channel = LR_MUX10_PU2_AMUX_USB_ID_LV;
-	return qpnp_adc_tm_channel_measure(param);
+	return qpnp_adc_tm_channel_measure(chip, param);
 }
 EXPORT_SYMBOL(qpnp_adc_tm_usbid_configure);
 
-int32_t qpnp_adc_tm_usbid_end(void)
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
 {
 	struct qpnp_adc_tm_btm_param param;
 
-	return qpnp_adc_tm_disable_chan_meas(&param);
+	return qpnp_adc_tm_disable_chan_meas(chip, &param);
 }
 EXPORT_SYMBOL(qpnp_adc_tm_usbid_end);
 
-int32_t qpnp_adc_tm_is_ready(void)
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name)
 {
-	struct qpnp_adc_tm_drv *adc_tm = qpnp_adc_tm;
+	struct qpnp_adc_tm_chip *chip;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
 
-	if (!adc_tm || !adc_tm->adc_tm_initialized)
-		return -EPROBE_DEFER;
-	else
-		return 0;
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-adc_tm", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(chip, &qpnp_adc_tm_device_list, list)
+		if (chip->adc->spmi->dev.of_node == node)
+			return chip;
+
+	return ERR_PTR(-EPROBE_DEFER);
 }
-EXPORT_SYMBOL(qpnp_adc_tm_is_ready);
+EXPORT_SYMBOL(qpnp_get_adc_tm);
 
 static int __devinit qpnp_adc_tm_probe(struct spmi_device *spmi)
 {
 	struct device_node *node = spmi->dev.of_node, *child;
-	struct qpnp_adc_tm_drv *adc_tm;
+	struct qpnp_adc_tm_chip *chip;
 	struct qpnp_adc_drv *adc_qpnp;
-	int32_t count_adc_channel_list = 0, rc, sen_idx = 0;
+	int32_t count_adc_channel_list = 0, rc, sen_idx = 0, i = 0;
 	u8 thr_init = 0;
-
-	if (!node)
-		return -EINVAL;
-
-	if (qpnp_adc_tm) {
-		pr_err("adc-tm already in use\n");
-		return -EBUSY;
-	}
+	bool thermal_node = false;
 
 	for_each_child_of_node(node, child)
 		count_adc_channel_list++;
@@ -1509,16 +1569,15 @@
 		return -EINVAL;
 	}
 
-	adc_tm = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_tm_drv) +
+	chip = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_tm_chip) +
 			(count_adc_channel_list *
 			sizeof(struct qpnp_adc_tm_sensor)),
 				GFP_KERNEL);
-	if (!adc_tm) {
+	if (!chip) {
 		dev_err(&spmi->dev, "Unable to allocate memory\n");
 		return -ENOMEM;
 	}
 
-	qpnp_adc_tm = adc_tm;
 	adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
 			GFP_KERNEL);
 	if (!adc_qpnp) {
@@ -1527,67 +1586,74 @@
 		goto fail;
 	}
 
-	adc_tm->adc = adc_qpnp;
+	chip->adc = adc_qpnp;
 
-	rc = qpnp_adc_get_devicetree_data(spmi, adc_tm->adc);
+	rc = qpnp_adc_get_devicetree_data(spmi, chip->adc);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to read device tree\n");
 		goto fail;
 	}
-	mutex_init(&adc_tm->adc->adc_lock);
+	mutex_init(&chip->adc->adc_lock);
 
 	/* Register the ADC peripheral interrupt */
-	adc_tm->adc->adc_high_thr_irq = spmi_get_irq_byname(spmi,
+	chip->adc->adc_high_thr_irq = spmi_get_irq_byname(spmi,
 						NULL, "high-thr-en-set");
-	if (adc_tm->adc->adc_high_thr_irq < 0) {
+	if (chip->adc->adc_high_thr_irq < 0) {
 		pr_err("Invalid irq\n");
 		rc = -ENXIO;
 		goto fail;
 	}
 
-	adc_tm->adc->adc_low_thr_irq = spmi_get_irq_byname(spmi,
+	chip->adc->adc_low_thr_irq = spmi_get_irq_byname(spmi,
 						NULL, "low-thr-en-set");
-	if (adc_tm->adc->adc_low_thr_irq < 0) {
+	if (chip->adc->adc_low_thr_irq < 0) {
 		pr_err("Invalid irq\n");
 		rc = -ENXIO;
 		goto fail;
 	}
 
-	rc = devm_request_irq(&spmi->dev, adc_tm->adc->adc_irq_eoc,
+	chip->vadc_dev = qpnp_get_vadc(&spmi->dev, "adc_tm");
+	if (IS_ERR(chip->vadc_dev)) {
+		rc = PTR_ERR(chip->vadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("vadc property missing, rc=%d\n", rc);
+		goto fail;
+	}
+
+	rc = devm_request_irq(&spmi->dev, chip->adc->adc_irq_eoc,
 				qpnp_adc_tm_isr, IRQF_TRIGGER_RISING,
-				"qpnp_adc_tm_interrupt", adc_tm);
+				"qpnp_adc_tm_interrupt", chip);
 	if (rc) {
 		dev_err(&spmi->dev,
 			"failed to request adc irq with error %d\n", rc);
 		goto fail;
 	} else {
-		enable_irq_wake(adc_tm->adc->adc_irq_eoc);
+		enable_irq_wake(chip->adc->adc_irq_eoc);
 	}
 
-	rc = devm_request_irq(&spmi->dev, adc_tm->adc->adc_high_thr_irq,
+	rc = devm_request_irq(&spmi->dev, chip->adc->adc_high_thr_irq,
 				qpnp_adc_tm_high_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", adc_tm);
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", chip);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to request adc irq\n");
 		goto fail;
 	} else {
-		enable_irq_wake(adc_tm->adc->adc_high_thr_irq);
+		enable_irq_wake(chip->adc->adc_high_thr_irq);
 	}
 
-	rc = devm_request_irq(&spmi->dev, adc_tm->adc->adc_low_thr_irq,
+	rc = devm_request_irq(&spmi->dev, chip->adc->adc_low_thr_irq,
 				qpnp_adc_tm_low_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", adc_tm);
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", chip);
 	if (rc) {
 		dev_err(&spmi->dev, "failed to request adc irq\n");
 		goto fail;
 	} else {
-		enable_irq_wake(adc_tm->adc->adc_low_thr_irq);
+		enable_irq_wake(chip->adc->adc_low_thr_irq);
 	}
 
 	for_each_child_of_node(node, child) {
 		char name[25];
 		int btm_channel_num;
-		bool thermal_node = false;
 
 		rc = of_property_read_u32(child,
 				"qcom,btm-channel-number", &btm_channel_num);
@@ -1595,80 +1661,95 @@
 			pr_err("Invalid btm channel number\n");
 			goto fail;
 		}
-		adc_tm->sensor[sen_idx].btm_channel_num = btm_channel_num;
-		adc_tm->sensor[sen_idx].vadc_channel_num =
-				adc_tm->adc->adc_channels[sen_idx].channel_num;
-		adc_tm->sensor[sen_idx].sensor_num = sen_idx;
+		chip->sensor[sen_idx].btm_channel_num = btm_channel_num;
+		chip->sensor[sen_idx].vadc_channel_num =
+				chip->adc->adc_channels[sen_idx].channel_num;
+		chip->sensor[sen_idx].sensor_num = sen_idx;
+		chip->sensor[sen_idx].chip = chip;
 		pr_debug("btm_chan:%x, vadc_chan:%x\n", btm_channel_num,
-			adc_tm->adc->adc_channels[sen_idx].channel_num);
+			chip->adc->adc_channels[sen_idx].channel_num);
 		thermal_node = of_property_read_bool(child,
 					"qcom,thermal-node");
 		if (thermal_node) {
 			/* Register with the thermal zone */
 			pr_debug("thermal node%x\n", btm_channel_num);
-			adc_tm->sensor[sen_idx].mode = THERMAL_DEVICE_DISABLED;
-			adc_tm->sensor[sen_idx].thermal_node = true;
+			chip->sensor[sen_idx].mode = THERMAL_DEVICE_DISABLED;
+			chip->sensor[sen_idx].thermal_node = true;
 			snprintf(name, sizeof(name),
-				adc_tm->adc->adc_channels[sen_idx].name);
-			adc_tm->sensor[sen_idx].meas_interval =
+				chip->adc->adc_channels[sen_idx].name);
+			chip->sensor[sen_idx].meas_interval =
 				QPNP_ADC_TM_MEAS_INTERVAL;
-			adc_tm->sensor[sen_idx].low_thr =
+			chip->sensor[sen_idx].low_thr =
 						QPNP_ADC_TM_M0_LOW_THR;
-			adc_tm->sensor[sen_idx].high_thr =
+			chip->sensor[sen_idx].high_thr =
 						QPNP_ADC_TM_M0_HIGH_THR;
-			adc_tm->sensor[sen_idx].tz_dev =
+			chip->sensor[sen_idx].tz_dev =
 				thermal_zone_device_register(name,
 				ADC_TM_TRIP_NUM,
-				&adc_tm->sensor[sen_idx],
+				&chip->sensor[sen_idx],
 				&qpnp_adc_tm_thermal_ops, 0, 0, 0, 0);
-			if (IS_ERR(adc_tm->sensor[sen_idx].tz_dev))
+			if (IS_ERR(chip->sensor[sen_idx].tz_dev))
 				pr_err("thermal device register failed.\n");
 		}
-		INIT_WORK(&adc_tm->sensor[sen_idx].work, notify_adc_tm_fn);
+		INIT_WORK(&chip->sensor[sen_idx].work, notify_adc_tm_fn);
 		sen_idx++;
 	}
-	adc_tm->max_channels_available = count_adc_channel_list;
-	dev_set_drvdata(&spmi->dev, adc_tm);
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_HIGH_THR_INT_EN, thr_init);
+	chip->max_channels_available = count_adc_channel_list;
+	INIT_WORK(&chip->trigger_high_thr_work, qpnp_adc_tm_high_thr_work);
+	INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
+	dev_set_drvdata(&spmi->dev, chip);
+	list_add(&chip->list, &qpnp_adc_tm_device_list);
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+								thr_init);
 	if (rc < 0) {
 		pr_err("high thr init failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_LOW_THR_INT_EN, thr_init);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+								thr_init);
 	if (rc < 0) {
 		pr_err("low thr init failed\n");
 		goto fail;
 	}
 
-	rc = qpnp_adc_tm_write_reg(QPNP_ADC_TM_MULTI_MEAS_EN, thr_init);
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+								thr_init);
 	if (rc < 0) {
 		pr_err("multi meas en failed\n");
 		goto fail;
 	}
 
-	adc_tm->adc_tm_initialized = true;
-
 	pr_debug("OK\n");
 	return 0;
 fail:
-	qpnp_adc_tm = NULL;
+	for_each_child_of_node(node, child) {
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node)
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
+		i++;
+	}
+	dev_set_drvdata(&spmi->dev, NULL);
 	return rc;
 }
 
 static int __devexit qpnp_adc_tm_remove(struct spmi_device *spmi)
 {
-	struct qpnp_adc_tm_drv *adc_tm = dev_get_drvdata(&spmi->dev);
-	struct device_node *node = spmi->dev.of_node;
-	struct device_node *child;
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&spmi->dev);
+	struct device_node *node = spmi->dev.of_node, *child;
+	bool thermal_node = false;
 	int i = 0;
 
 	for_each_child_of_node(node, child) {
-		thermal_zone_device_unregister(adc_tm->sensor[i].tz_dev);
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node)
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
 		i++;
 	}
 
-	adc_tm->adc_tm_initialized = false;
 	dev_set_drvdata(&spmi->dev, NULL);
 
 	return 0;
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
index 499d67e..36b84c8 100644
--- a/drivers/thermal/qpnp-temp-alarm.c
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -93,6 +93,7 @@
 	enum qpnp_vadc_channels		adc_channel;
 	u16				base_addr;
 	bool				allow_software_override;
+	struct qpnp_vadc_chip		*vadc_dev;
 };
 
 /* Delay between TEMP_STAT IRQ going high and status value changing in ms. */
@@ -160,7 +161,7 @@
 	struct qpnp_vadc_result adc_result;
 	int rc;
 
-	rc = qpnp_vadc_read(chip->adc_channel, &adc_result);
+	rc = qpnp_vadc_read(chip->vadc_dev, chip->adc_channel, &adc_result);
 	if (!rc)
 		chip->temperature = adc_result.physical;
 	else
@@ -543,9 +544,12 @@
 				__func__, chip->adc_channel);
 		} else {
 			chip->adc_type = QPNP_TM_ADC_QPNP_ADC;
-			rc = qpnp_vadc_is_ready();
-			if (rc) {
-				/* Probe retry, do not print an error message */
+			chip->vadc_dev = qpnp_get_vadc(&spmi->dev,
+							"temp_alarm");
+			if (IS_ERR(chip->vadc_dev)) {
+				rc = PTR_ERR(chip->vadc_dev);
+				if (rc != -EPROBE_DEFER)
+					pr_err("vadc property missing\n");
 				goto err_cancel_work;
 			}
 		}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index c89f6d8..d3ea3be 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -204,6 +204,7 @@
 	int			pmic_id_irq;
 	struct work_struct	id_work;
 	struct qpnp_adc_tm_btm_param	adc_param;
+	struct qpnp_adc_tm_chip *adc_tm_dev;
 	struct delayed_work	init_adc_work;
 	bool			id_adc_detect;
 	u8			dcd_retries;
@@ -1709,7 +1710,8 @@
 	}
 
 	dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
-	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
+	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER) ||
+	      (mdwc->charger.chg_type == DWC3_FLOATED_CHARGER));
 	host_bus_suspend = mdwc->host_mode == 1;
 
 	if (!dcp && !host_bus_suspend)
@@ -1841,7 +1843,8 @@
 	}
 
 	dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
-	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
+	      (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER) ||
+	      (mdwc->charger.chg_type == DWC3_FLOATED_CHARGER));
 	host_bus_suspend = mdwc->host_mode == 1;
 
 	if (mdwc->lpm_flags & MDWC3_TCXO_SHUTDOWN) {
@@ -2354,7 +2357,7 @@
 	dwc3_id_work(&mdwc->id_work);
 
 	/* re-arm ADC interrupt */
-	qpnp_adc_tm_usbid_configure(&mdwc->adc_param);
+	qpnp_adc_tm_usbid_configure(mdwc->adc_tm_dev, &mdwc->adc_param);
 }
 
 static void dwc3_init_adc_work(struct work_struct *w)
@@ -2363,10 +2366,14 @@
 							init_adc_work.work);
 	int ret;
 
-	ret = qpnp_adc_tm_is_ready();
-	if (ret == -EPROBE_DEFER) {
-		queue_delayed_work(system_nrt_wq, to_delayed_work(w),
+	mdwc->adc_tm_dev = qpnp_get_adc_tm(mdwc->dev, "dwc_usb3-adc_tm");
+	if (IS_ERR(mdwc->adc_tm_dev)) {
+		if (PTR_ERR(mdwc->adc_tm_dev) == -EPROBE_DEFER)
+			queue_delayed_work(system_nrt_wq, to_delayed_work(w),
 					msecs_to_jiffies(100));
+		else
+			mdwc->adc_tm_dev = NULL;
+
 		return;
 	}
 
@@ -2377,7 +2384,7 @@
 	mdwc->adc_param.btm_ctx = mdwc;
 	mdwc->adc_param.threshold_notification = dwc3_adc_notification;
 
-	ret = qpnp_adc_tm_usbid_configure(&mdwc->adc_param);
+	ret = qpnp_adc_tm_usbid_configure(mdwc->adc_tm_dev, &mdwc->adc_param);
 	if (ret) {
 		dev_err(mdwc->dev, "%s: request ADC error %d\n", __func__, ret);
 		return;
@@ -2412,7 +2419,7 @@
 			dwc3_init_adc_work(&mdwc->init_adc_work.work);
 		return size;
 	} else if (!strnicmp(buf, "disable", 7)) {
-		qpnp_adc_tm_usbid_end();
+		qpnp_adc_tm_usbid_end(mdwc->adc_tm_dev);
 		mdwc->id_adc_detect = false;
 		return size;
 	}
@@ -3042,7 +3049,7 @@
 	}
 
 	if (mdwc->id_adc_detect)
-		qpnp_adc_tm_usbid_end();
+		qpnp_adc_tm_usbid_end(mdwc->adc_tm_dev);
 	if (dwc3_debugfs_root)
 		debugfs_remove_recursive(dwc3_debugfs_root);
 	if (mdwc->otg_xceiv) {
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 0d4d580..d0d9d34 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -727,7 +727,9 @@
 					work = 1;
 					break;
 				case DWC3_FLOATED_CHARGER:
-					dotg->charger_retry_count++;
+					if (dotg->charger_retry_count <
+							max_chgr_retry_count)
+						dotg->charger_retry_count++;
 					/*
 					 * In case of floating charger, if
 					 * retry count equal to max retry count
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index d1b911a..bccc504 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -626,6 +626,7 @@
 		struct usb_function *f)
 {
 	struct diag_context *ctxt = func_to_diag(f);
+	unsigned long flags;
 
 	if (gadget_is_superspeed(c->cdev->gadget))
 		usb_free_descriptors(f->ss_descriptors);
@@ -643,7 +644,9 @@
 		ctxt->ch->priv_usb = NULL;
 	list_del(&ctxt->list_item);
 	/* Free any pending USB requests from last session */
+	spin_lock_irqsave(&ctxt->lock, flags);
 	free_reqs(ctxt);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
 	kfree(ctxt);
 }
 
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index a775459..b0b2f56 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -127,6 +127,8 @@
 
 	struct work_struct	connect_w;
 	struct work_struct	disconnect_w;
+	struct work_struct	suspend_w;
+	struct work_struct	resume_w;
 };
 
 static struct bam_portmaster {
@@ -542,7 +544,9 @@
 	struct bam_ch_info *d = &port->data_ch;
 	int status;
 
+	spin_lock(&port->port_lock_ul);
 	if (!port->port_usb) {
+		spin_unlock(&port->port_lock_ul);
 		pr_err("%s: port->port_usb is NULL", __func__);
 		return;
 	}
@@ -551,6 +555,7 @@
 	status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
 	if (status)
 		pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+	spin_unlock(&port->port_lock_ul);
 }
 
 static void gbam_start_endless_tx(struct gbam_port *port)
@@ -558,7 +563,9 @@
 	struct bam_ch_info *d = &port->data_ch;
 	int status;
 
+	spin_lock(&port->port_lock_dl);
 	if (!port->port_usb) {
+		spin_unlock(&port->port_lock_dl);
 		pr_err("%s: port->port_usb is NULL", __func__);
 		return;
 	}
@@ -567,6 +574,8 @@
 	status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
 	if (status)
 		pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+	spin_unlock(&port->port_lock_dl);
+
 }
 
 static void gbam_stop_endless_rx(struct gbam_port *port)
@@ -574,7 +583,9 @@
 	struct bam_ch_info *d = &port->data_ch;
 	int status;
 
+	spin_lock(&port->port_lock_ul);
 	if (!port->port_usb) {
+		spin_unlock(&port->port_lock_ul);
 		pr_err("%s: port->port_usb is NULL", __func__);
 		return;
 	}
@@ -583,14 +594,17 @@
 	status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
 	if (status)
 		pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
-
+	spin_unlock(&port->port_lock_ul);
 }
+
 static void gbam_stop_endless_tx(struct gbam_port *port)
 {
 	struct bam_ch_info *d = &port->data_ch;
 	int status;
 
+	spin_lock(&port->port_lock_dl);
 	if (!port->port_usb) {
+		spin_unlock(&port->port_lock_dl);
 		pr_err("%s: port->port_usb is NULL", __func__);
 		return;
 	}
@@ -599,6 +613,7 @@
 	status = usb_ep_dequeue(port->port_usb->in, d->tx_req);
 	if (status)
 		pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+	spin_unlock(&port->port_lock_dl);
 }
 
 static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
@@ -893,6 +908,46 @@
 	pr_debug("%s: done\n", __func__);
 }
 
+static int gbam_wake_cb(void *param)
+{
+	struct gbam_port	*port = (struct gbam_port *)param;
+	struct bam_ch_info *d;
+	struct f_rmnet		*dev;
+
+	dev = port_to_rmnet(port->gr);
+	d = &port->data_ch;
+
+	pr_debug("%s: woken up by peer\n", __func__);
+
+	return usb_gadget_wakeup(dev->cdev->gadget);
+}
+
+static void gbam2bam_suspend_work(struct work_struct *w)
+{
+	struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
+	struct bam_ch_info *d = &port->data_ch;
+
+	pr_debug("%s: suspend work started\n", __func__);
+
+	usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
+		usb_bam_suspend(&d->ipa_params);
+	}
+}
+
+static void gbam2bam_resume_work(struct work_struct *w)
+{
+	struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
+	struct bam_ch_info *d = &port->data_ch;
+
+	pr_debug("%s: resume work started\n", __func__);
+
+	usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+		usb_bam_resume(&d->ipa_params);
+}
+
 static int gbam_peer_reset_cb(void *param)
 {
 	struct gbam_port	*port = (struct gbam_port *)param;
@@ -1122,6 +1177,8 @@
 
 	INIT_WORK(&port->connect_w, gbam2bam_connect_work);
 	INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
+	INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
+	INIT_WORK(&port->resume_w, gbam2bam_resume_work);
 
 	/* data ch */
 	d = &port->data_ch;
@@ -1446,20 +1503,6 @@
 	return ret;
 }
 
-static int gbam_wake_cb(void *param)
-{
-	struct gbam_port	*port = (struct gbam_port *)param;
-	struct bam_ch_info *d;
-	struct f_rmnet		*dev;
-
-	dev = port_to_rmnet(port->gr);
-	d = &port->data_ch;
-
-	pr_debug("%s: woken up by peer\n", __func__);
-
-	return usb_gadget_wakeup(dev->cdev->gadget);
-}
-
 void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
 {
 	struct gbam_port	*port;
@@ -1474,11 +1517,7 @@
 
 	pr_debug("%s: suspended port %d\n", __func__, port_num);
 
-	usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
-	if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
-		usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
-		usb_bam_suspend(&d->ipa_params);
-	}
+	queue_work(gbam_wq, &port->suspend_w);
 }
 
 void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
@@ -1495,7 +1534,5 @@
 
 	pr_debug("%s: resumed port %d\n", __func__, port_num);
 
-	usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
-	if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
-		usb_bam_resume(&d->ipa_params);
+	queue_work(gbam_wq, &port->resume_w);
 }
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 577a4fe..b315605 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -62,10 +62,15 @@
 
 	struct work_struct		connect_w;
 	struct work_struct		disconnect_w;
+	struct work_struct		suspend_w;
+	struct work_struct		resume_w;
 };
 
 struct bam_data_port *bam2bam_data_ports[BAM2BAM_DATA_N_PORTS];
 
+static void bam2bam_data_suspend_work(struct work_struct *w);
+static void bam2bam_data_resume_work(struct work_struct *w);
+
 /*------------data_path----------------------------*/
 
 static void bam_data_endless_rx_complete(struct usb_ep *ep,
@@ -351,6 +356,8 @@
 
 	INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
 	INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
+	INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+	INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
 
 	/* data ch */
 	d = &port->data_ch;
@@ -578,12 +585,8 @@
 	d = &port->data_ch;
 
 	pr_debug("%s: suspended port %d\n", __func__, port_num);
-	usb_bam_register_wake_cb(d->dst_connection_idx, bam_data_wake_cb, port);
-	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
-		usb_bam_register_start_stop_cbs(bam_data_start, bam_data_stop,
-									port);
-		usb_bam_suspend(&d->ipa_params);
-	}
+
+	queue_work(bam_data_wq, &port->suspend_w);
 }
 
 void bam_data_resume(u8 port_num)
@@ -596,6 +599,34 @@
 	d = &port->data_ch;
 
 	pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+	queue_work(bam_data_wq, &port->resume_w);
+}
+
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+	struct bam_data_port *port =
+			container_of(w, struct bam_data_port, suspend_w);
+	struct bam_data_ch_info *d = &port->data_ch;
+
+	pr_debug("%s: suspend work started\n", __func__);
+
+	usb_bam_register_wake_cb(d->dst_connection_idx, bam_data_wake_cb, port);
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		usb_bam_register_start_stop_cbs(bam_data_start, bam_data_stop,
+									port);
+		usb_bam_suspend(&d->ipa_params);
+	}
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+	struct bam_data_port *port =
+			container_of(w, struct bam_data_port, resume_w);
+	struct bam_data_ch_info *d = &port->data_ch;
+
+	pr_debug("%s: resume work started\n", __func__);
+
 	usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
 	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
 		usb_bam_resume(&d->ipa_params);
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 253658e..d102c22 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -1122,10 +1122,16 @@
 	u32 cmd;
 	unsigned long flags;
 	int retries = 0, ret, cnt = RESET_SIGNAL_TIME_USEC;
+	s32 next_latency = 0;
 
-	if (pdata && pdata->swfi_latency)
-		pm_qos_update_request(&mehci->pm_qos_req_dma,
-			pdata->swfi_latency + 1);
+	if (pdata && pdata->swfi_latency) {
+		next_latency = pdata->swfi_latency + 1;
+		pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
+		if (pdata->standalone_latency)
+			next_latency = pdata->standalone_latency + 1;
+		else
+			next_latency = PM_QOS_DEFAULT_VALUE;
+	}
 
 	mehci->bus_reset = 1;
 
@@ -1196,9 +1202,8 @@
 	pr_debug("reset completed\n");
 fail:
 	mehci->bus_reset = 0;
-	if (pdata && pdata->swfi_latency)
-		pm_qos_update_request(&mehci->pm_qos_req_dma,
-			PM_QOS_DEFAULT_VALUE);
+	if (next_latency)
+		pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
 }
 
 static int ehci_hsic_bus_suspend(struct usb_hcd *hcd)
@@ -1229,19 +1234,27 @@
 	int			retry_cnt = 0;
 	int			tight_resume = 0;
 	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
+	s32 next_latency = 0;
 
 	dbg_log_event(NULL, "Resume RH", 0);
 
+	if (pdata && pdata->swfi_latency) {
+		next_latency = pdata->swfi_latency + 1;
+		pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
+		if (pdata->standalone_latency)
+			next_latency = pdata->standalone_latency + 1;
+		else
+			next_latency = PM_QOS_DEFAULT_VALUE;
+	}
+
 	/* keep delay between bus states */
 	if (time_before(jiffies, ehci->next_statechange))
 		usleep_range(5000, 5000);
 
 	spin_lock_irq(&ehci->lock);
 	if (!HCD_HW_ACCESSIBLE(hcd)) {
-		spin_unlock_irq(&ehci->lock);
 		mehci->resume_status = -ESHUTDOWN;
-		complete(&mehci->rt_completion);
-		return 0;
+		goto exit;
 	}
 
 	if (unlikely(ehci->debug)) {
@@ -1313,13 +1326,7 @@
 				&mehci->timer->gptimer1_ctrl);
 
 			spin_unlock_irq(&ehci->lock);
-			if (pdata && pdata->swfi_latency)
-				pm_qos_update_request(&mehci->pm_qos_req_dma,
-					pdata->swfi_latency + 1);
 			wait_for_completion(&mehci->gpt0_completion);
-			if (pdata && pdata->standalone_latency)
-				pm_qos_update_request(&mehci->pm_qos_req_dma,
-					pdata->standalone_latency + 1);
 			spin_lock_irq(&ehci->lock);
 		} else {
 			dbg_log_event(NULL, "FPR: Tightloop", 0);
@@ -1357,9 +1364,11 @@
 
 	dbg_log_event(NULL, "FPR: RT-Done", 0);
 	mehci->resume_status = 1;
+exit:
 	spin_unlock_irq(&ehci->lock);
-
 	complete(&mehci->rt_completion);
+	if (next_latency)
+		pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
 
 	return 0;
 }
@@ -1872,6 +1881,8 @@
 
 	pdata->phy_sof_workaround = of_property_read_bool(node,
 					"qcom,phy-sof-workaround");
+	pdata->phy_susp_sof_workaround = of_property_read_bool(node,
+					"qcom,phy-susp-sof-workaround");
 	pdata->ignore_cal_pad_config = of_property_read_bool(node,
 					"hsic,ignore-cal-pad-config");
 	of_property_read_u32(node, "hsic,strobe-pad-offset",
@@ -1978,9 +1989,13 @@
 	spin_lock_init(&mehci->wakeup_lock);
 
 	if (pdata->phy_sof_workaround) {
+		/* Enable ALL workarounds related to PHY SOF bugs */
 		mehci->ehci.susp_sof_bug = 1;
 		mehci->ehci.reset_sof_bug = 1;
 		mehci->ehci.resume_sof_bug = 1;
+	} else if (pdata->phy_susp_sof_workaround) {
+		/* Only SUSP SOF hardware bug exists, rest all not present */
+		mehci->ehci.susp_sof_bug = 1;
 	}
 
 	if (pdata->reset_delay)
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index de7fc02..9139447 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -863,7 +863,8 @@
 	struct msm_otg_platform_data *pdata = motg->pdata;
 
 	/* Check if target allows min_vote to be same as no_vote */
-	if (vote >= pdata->bus_scale_table->num_usecases)
+	if (pdata->bus_scale_table &&
+	    vote >= pdata->bus_scale_table->num_usecases)
 		vote = USB_NO_PERF_VOTE;
 
 	if (motg->bus_perf_client) {
@@ -4291,11 +4292,27 @@
 		goto put_core_clk;
 	}
 
+	/*
+	 * On few platforms USB PHY is fed with sleep clk.
+	 * Hence don't fail probe.
+	 */
+	motg->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+	if (IS_ERR(motg->sleep_clk)) {
+		dev_dbg(&pdev->dev, "failed to get sleep_clk\n");
+	} else {
+		ret = clk_prepare_enable(motg->sleep_clk);
+		if (ret) {
+			dev_err(&pdev->dev, "%s failed to vote sleep_clk%d\n",
+							__func__, ret);
+			goto put_pclk;
+		}
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(&pdev->dev, "failed to get platform resource mem\n");
 		ret = -ENODEV;
-		goto put_pclk;
+		goto disable_sleep_clk;
 	}
 
 	motg->io_res = res;
@@ -4303,7 +4320,7 @@
 	if (!motg->regs) {
 		dev_err(&pdev->dev, "ioremap failed\n");
 		ret = -ENOMEM;
-		goto put_pclk;
+		goto disable_sleep_clk;
 	}
 	dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
 
@@ -4602,6 +4619,9 @@
 		msm_xo_put(motg->xo_handle);
 free_regs:
 	iounmap(motg->regs);
+disable_sleep_clk:
+	if (!IS_ERR(motg->sleep_clk))
+		clk_disable_unprepare(motg->sleep_clk);
 put_pclk:
 	clk_put(motg->pclk);
 put_core_clk:
@@ -4696,6 +4716,10 @@
 	} else {
 		msm_xo_put(motg->xo_handle);
 	}
+
+	if (!IS_ERR(motg->sleep_clk))
+		clk_disable_unprepare(motg->sleep_clk);
+
 	msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
 	msm_hsusb_ldo_init(motg, 0);
 	regulator_disable(hsusb_vdd);
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 3b0cd20..a575d6d 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -477,7 +477,6 @@
 		}
 		ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
 	}
-	mdss_dsi_op_mode_config(mipi->mode, pdata);
 
 	if (pdata->panel_info.type == MIPI_CMD_PANEL) {
 		if (mipi->vsync_enable && mipi->hw_vsync_mode
@@ -555,9 +554,9 @@
 
 	mdss_dsi_sw_reset(pdata);
 	mdss_dsi_host_init(mipi, pdata);
+	mdss_dsi_op_mode_config(mipi->mode, pdata);
 
 	if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE) {
-		mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
 		ret = mdss_dsi_unblank(pdata);
 		if (ret) {
 			pr_err("%s: unblank failed\n", __func__);
@@ -586,6 +585,8 @@
 	switch (event) {
 	case MDSS_EVENT_UNBLANK:
 		rc = mdss_dsi_on(pdata);
+		mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
+							pdata);
 		if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE)
 			rc = mdss_dsi_unblank(pdata);
 		break;
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 2603648..a8c34f3 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -394,9 +394,7 @@
 void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl);
 void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
 void mdss_dsi_ack_err_status(unsigned char *dsi_base);
-void mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable);
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable);
 void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
 				int enable);
 void mdss_dsi_controller_cfg(int enable,
@@ -412,8 +410,6 @@
 int mdss_dsi_clk_init(struct platform_device *pdev,
 		      struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 int mdss_dsi_enable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 void mdss_dsi_disable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 5f5084d..e48d0d2 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -80,34 +80,6 @@
 	mdss_dsi_buf_alloc(&ctrl->rx_buf, SZ_4K);
 }
 
-/*
- * acquire ctrl->mutex first
- */
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
-{
-	mutex_lock(&ctrl->mutex);
-	if (enable) {
-		if (ctrl->clk_cnt == 0) {
-			mdss_dsi_enable_bus_clocks(ctrl);
-			mdss_dsi_prepare_clocks(ctrl);
-			mdss_dsi_clk_enable(ctrl);
-		}
-		ctrl->clk_cnt++;
-	} else {
-		if (ctrl->clk_cnt) {
-			ctrl->clk_cnt--;
-			if (ctrl->clk_cnt == 0) {
-				mdss_dsi_clk_disable(ctrl);
-				mdss_dsi_unprepare_clocks(ctrl);
-				mdss_dsi_disable_bus_clocks(ctrl);
-			}
-		}
-	}
-	pr_debug("%s: ctrl ndx=%d enabled=%d clk_cnt=%d\n",
-			__func__, ctrl->ndx, enable, ctrl->clk_cnt);
-	mutex_unlock(&ctrl->mutex);
-}
-
 void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
 {
 	if (enable == 0) {
@@ -1185,11 +1157,21 @@
 		dchdr = &cm->dchdr;
 		mdss_dsi_buf_reserve(tp, len);
 		len = mdss_dsi_cmd_dma_add(tp, cm);
+		if (!len) {
+			pr_err("%s: failed to call cmd_dma_add\n", __func__);
+			return -EINVAL;
+		}
 		tot += len;
 		if (dchdr->last) {
 			tp->data = tp->start; /* begin of buf */
 			mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
-			mdss_dsi_cmd_dma_tx(ctrl, tp);
+			len = mdss_dsi_cmd_dma_tx(ctrl, tp);
+			if (IS_ERR_VALUE(len)) {
+				mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+				pr_err("%s: failed to call cmd_dma_tx for cmd = 0x%x\n",
+					__func__,  cmds->payload[0]);
+				return -EINVAL;
+			}
 			if (dchdr->wait)
 				usleep(dchdr->wait * 1000);
 
@@ -1209,7 +1191,7 @@
 		struct dsi_cmd_desc *cmds, int cnt)
 {
 	u32 dsi_ctrl, data;
-	int video_mode;
+	int video_mode, ret = 0;
 	u32 left_dsi_ctrl = 0;
 	bool left_ctrl_restore = false;
 
@@ -1249,7 +1231,12 @@
 		MIPI_OUTP((ctrl->ctrl_base) + 0x0004, data);
 	}
 
-	mdss_dsi_cmds2buf_tx(ctrl, cmds, cnt);
+	ret = mdss_dsi_cmds2buf_tx(ctrl, cmds, cnt);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: failed to call\n",
+			__func__);
+		cnt = -EINVAL;
+	}
 
 	if (left_ctrl_restore)
 		MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
@@ -1285,7 +1272,7 @@
 int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
 			struct dsi_cmd_desc *cmds, int rlen, u32 rx_flags)
 {
-	int cnt, len, diff, pkt_size;
+	int cnt, len, diff, pkt_size, ret = 0;
 	struct dsi_buf *tp, *rp;
 	int no_max_pkt_size;
 	char cmd;
@@ -1362,19 +1349,44 @@
 		/* packet size need to be set at every read */
 		pkt_size = len;
 		max_pktsize[0] = pkt_size;
-		mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
 		mdss_dsi_buf_init(tp);
-		mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
-		mdss_dsi_cmd_dma_tx(ctrl, tp);
+		ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
+		if (!ret) {
+			pr_err("%s: failed to call\n",
+				__func__);
+			rp->len = 0;
+			goto end;
+		}
+		mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+		ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+		if (IS_ERR_VALUE(ret)) {
+			mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+			pr_err("%s: failed to call\n",
+				__func__);
+			rp->len = 0;
+			goto end;
+		}
 		pr_debug("%s: Max packet size sent\n", __func__);
 	}
+	mdss_dsi_buf_init(tp);
+	ret = mdss_dsi_cmd_dma_add(tp, cmds);
+	if (!ret) {
+		pr_err("%s: failed to call cmd_dma_add for cmd = 0x%x\n",
+			__func__,  cmds->payload[0]);
+		rp->len = 0;
+		goto end;
+	}
 
 	mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
-	mdss_dsi_buf_init(tp);
-	mdss_dsi_cmd_dma_add(tp, cmds);
-
 	/* transmit read comamnd to client */
-	mdss_dsi_cmd_dma_tx(ctrl, tp);
+	ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+	if (IS_ERR_VALUE(ret)) {
+		mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+		pr_err("%s: failed to call\n",
+			__func__);
+		rp->len = 0;
+		goto end;
+	}
 	/*
 	 * once cmd_dma_done interrupt received,
 	 * return data from client is ready and stored
@@ -1406,7 +1418,7 @@
 	switch (cmd) {
 	case DTYPE_ACK_ERR_RESP:
 		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
-		break;
+		rp->len = 0;
 	case DTYPE_GEN_READ1_RESP:
 	case DTYPE_DCS_READ1_RESP:
 		mdss_dsi_short_read1_resp(rp);
@@ -1422,10 +1434,10 @@
 		rp->len -= diff; /* align bytes */
 		break;
 	default:
-		pr_debug("%s: Unknown cmd received\n", __func__);
-		break;
+		pr_warning("%s:Invalid response cmd\n", __func__);
+		rp->len = 0;
 	}
-
+end:
 	if (left_ctrl_restore)
 		MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
 					left_dsi_ctrl); /*restore */
@@ -1441,7 +1453,7 @@
 static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
 					struct dsi_buf *tp)
 {
-	int len;
+	int len, ret = 0;
 	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
 	char *bp;
 	unsigned long size, addr;
@@ -1486,16 +1498,18 @@
 	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);	/* trigger */
 	wmb();
 
-	if (!wait_for_completion_timeout(&ctrl->dma_comp,
-				msecs_to_jiffies(DMA_TX_TIMEOUT))) {
-		pr_err("%s: dma timeout error\n", __func__);
-	}
+	ret = wait_for_completion_timeout(&ctrl->dma_comp,
+				msecs_to_jiffies(DMA_TX_TIMEOUT));
+	if (ret == 0)
+		ret = -ETIMEDOUT;
+	else
+		ret = tp->len;
 
 	if (is_mdss_iommu_attached())
 		msm_iommu_unmap_contig_buffer(addr,
 			mdss_get_iommu_domain(domain), 0, size);
 
-	return tp->len;
+	return ret;
 }
 
 static int mdss_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index ac87cbd..94fced0 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -274,8 +274,8 @@
 {
 	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
 
-	if (mfd->ref_cnt > 1)
-		mfd->ref_cnt = 1;
+	for (; mfd->ref_cnt > 1; mfd->ref_cnt--)
+		pm_runtime_put(mfd->fbi->dev);
 
 	mdss_fb_release(mfd->fbi, 0);
 }
@@ -1096,7 +1096,8 @@
 					   mfd->op_enable);
 		if (result) {
 			pm_runtime_put(info->dev);
-			pr_err("mdss_fb_open: can't turn on display!\n");
+			pr_err("can't turn on fb%d! rc=%d\n", mfd->index,
+				result);
 			return result;
 		}
 	}
@@ -1122,8 +1123,7 @@
 		ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
 				       mfd->op_enable);
 		if (ret) {
-			pr_err("can't turn off display attached to fb%d!\n",
-				mfd->index);
+			pr_err("can't turn off fb%d! rc=%d\n", mfd->index, ret);
 			return ret;
 		}
 	}
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
index bcd5f28..367c918 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/stat.h>
 
 #include "mdss_hdmi_hdcp.h"
 
@@ -1171,6 +1172,38 @@
 	return rc;
 } /* hdmi_hdcp_isr */
 
+static ssize_t hdmi_hdcp_sysfs_rda_status(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_hdcp_ctrl *hdcp_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdcp_ctrl->hdcp_state);
+	DEV_DBG("%s: '%d'\n", __func__, hdcp_ctrl->hdcp_state);
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+	return ret;
+} /* hdmi_hdcp_sysfs_rda_hdcp*/
+
+static DEVICE_ATTR(status, S_IRUGO, hdmi_hdcp_sysfs_rda_status, NULL);
+
+static struct attribute *hdmi_hdcp_fs_attrs[] = {
+	&dev_attr_status.attr,
+	NULL,
+};
+
+static struct attribute_group hdmi_hdcp_fs_attr_group = {
+	.name = "hdcp",
+	.attrs = hdmi_hdcp_fs_attrs,
+};
+
 void hdmi_hdcp_deinit(void *input)
 {
 	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
@@ -1180,6 +1213,9 @@
 		return;
 	}
 
+	sysfs_remove_group(hdcp_ctrl->init_data.sysfs_kobj,
+				&hdmi_hdcp_fs_attr_group);
+
 	kfree(hdcp_ctrl);
 } /* hdmi_hdcp_deinit */
 
@@ -1203,6 +1239,12 @@
 
 	hdcp_ctrl->init_data = *init_data;
 
+	if (sysfs_create_group(init_data->sysfs_kobj,
+				&hdmi_hdcp_fs_attr_group)) {
+		DEV_ERR("%s: hdcp sysfs group creation failed\n", __func__);
+		goto error;
+	}
+
 	INIT_DELAYED_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
 	INIT_WORK(&hdcp_ctrl->hdcp_int_work, hdmi_hdcp_int_work);
 
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.h b/drivers/video/msm/mdss/mdss_hdmi_hdcp.h
index d35b2a9..2f0c3b9 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_hdcp.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.h
@@ -26,6 +26,7 @@
 	struct dss_io_data *core_io;
 	struct dss_io_data *qfprom_io;
 	struct mutex *mutex;
+	struct kobject *sysfs_kobj;
 	struct workqueue_struct *workq;
 	void *cb_data;
 	void (*notify_status)(void *cb_data, enum hdmi_hdcp_state status);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 1fef395..ad134a0 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -193,12 +193,18 @@
 		{20480, 247500} } },
 };
 
-static bool is_cea_format(int mode)
+static bool hdmi_tx_is_cea_format(int mode)
 {
-	if ((mode > 0) && (mode < HDMI_EVFRMT_END))
-		return true;
+	bool cea_fmt;
+
+	if ((mode > 0) && (mode <= HDMI_EVFRMT_END))
+		cea_fmt = true;
 	else
-		return false;
+		cea_fmt = false;
+
+	DEV_DBG("%s: %s\n", __func__, cea_fmt ? "Yes" : "No");
+
+	return cea_fmt;
 }
 
 const char *hdmi_tx_pm_name(enum hdmi_tx_power_module_type module)
@@ -740,6 +746,7 @@
 		hdcp_init_data.qfprom_io =
 			&hdmi_ctrl->pdata.io[HDMI_TX_QFPROM_IO];
 		hdcp_init_data.mutex = &hdmi_ctrl->mutex;
+		hdcp_init_data.sysfs_kobj = hdmi_ctrl->kobj;
 		hdcp_init_data.ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
 		hdcp_init_data.workq = hdmi_ctrl->workq;
 		hdcp_init_data.notify_status = hdmi_tx_hdcp_cb;
@@ -2210,7 +2217,7 @@
 	}
 
 	if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
-	    is_cea_format(hdmi_ctrl->video_resolution)) {
+	    hdmi_tx_is_cea_format(hdmi_ctrl->video_resolution)) {
 		rc = hdmi_tx_audio_setup(hdmi_ctrl);
 		if (rc) {
 			DEV_ERR("%s: hdmi_msm_audio_setup failed. rc=%d\n",
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 6f9c98e..0a41ef8 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -133,7 +133,6 @@
 struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
 
 static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
-static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata);
 static int mdss_mdp_parse_dt(struct platform_device *pdev);
 static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
 static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
@@ -911,18 +910,6 @@
 	}
 }
 
-static void mdss_mdp_shutdown(struct platform_device *pdev)
-{
-	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
-
-	if (!mdata)
-		return;
-
-	pr_debug("display shutdown\n");
-
-	mdss_mdp_suspend_sub(mdata);
-}
-
 static int mdss_mdp_probe(struct platform_device *pdev)
 {
 	struct resource *res;
@@ -1795,7 +1782,7 @@
 	.remove = mdss_mdp_remove,
 	.suspend = mdss_mdp_suspend,
 	.resume = mdss_mdp_resume,
-	.shutdown = mdss_mdp_shutdown,
+	.shutdown = NULL,
 	.driver = {
 		/*
 		 * Driver name must match the device name added in
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index b5a5383..d1595b3 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -24,7 +24,10 @@
 #define MDSS_MDP_BUS_FACTOR_SHIFT 10
 /* 1.5 bus fudge factor */
 #define MDSS_MDP_BUS_FUDGE_FACTOR_IB(val) (((val) / 2) * 3)
+#define MDSS_MDP_BUS_FUDGE_FACTOR_HIGH_IB(val) (val << 1)
 #define MDSS_MDP_BUS_FUDGE_FACTOR_AB(val) (val << 1)
+#define MDSS_MDP_BUS_FLOOR_BW (3200000000ULL >> MDSS_MDP_BUS_FACTOR_SHIFT)
+
 /* 1.25 clock fudge factor */
 #define MDSS_MDP_CLK_FUDGE_FACTOR(val) (((val) * 5) / 4)
 
@@ -58,6 +61,74 @@
 		pinfo->clk_rate;
 }
 
+static u32 __mdss_mdp_ctrl_perf_ovrd_helper(struct mdss_mdp_mixer *mixer,
+		u32 *npipe)
+{
+	struct mdss_panel_info *pinfo;
+	struct mdss_mdp_pipe *pipe;
+	u32 mnum, ovrd = 0;
+
+	if (!mixer || !mixer->ctl->panel_data)
+		return 0;
+
+	pinfo = &mixer->ctl->panel_data->panel_info;
+	for (mnum = 0; mnum < MDSS_MDP_MAX_STAGE; mnum++) {
+		pipe = mixer->stage_pipe[mnum];
+		if (pipe && pinfo) {
+			*npipe = *npipe + 1;
+			if ((pipe->src.w >= pipe->src.h) &&
+					(pipe->src.w >= pinfo->xres))
+				ovrd = 1;
+		}
+	}
+
+	return ovrd;
+}
+
+/**
+ * mdss_mdp_ctrl_perf_ovrd() - Determines if performance override is needed
+ * @mdata:	Struct containing references to all MDP5 hardware structures
+ *		and status info such as interupts, target caps etc.
+ * @ab_quota:	Arbitrated bandwidth quota
+ * @ib_quota:	Instantaneous bandwidth quota
+ *
+ * Function calculates the minimum required MDP and BIMC clocks to avoid MDP
+ * underflow during portrait video playback. The calculations are based on the
+ * way MDP fetches (bandwidth requirement) and processes data through
+ * MDP pipeline (MDP clock requirement) based on frame size and scaling
+ * requirements.
+ */
+static void __mdss_mdp_ctrl_perf_ovrd(struct mdss_data_type *mdata,
+	u64 *ab_quota, u64 *ib_quota)
+{
+	struct mdss_mdp_ctl *ctl;
+	u32 i, npipe = 0, ovrd = 0;
+
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		if (!ctl->power_on)
+			continue;
+		ovrd |= __mdss_mdp_ctrl_perf_ovrd_helper(
+				ctl->mixer_left, &npipe);
+		ovrd |= __mdss_mdp_ctrl_perf_ovrd_helper(
+				ctl->mixer_right, &npipe);
+	}
+
+	*ab_quota = MDSS_MDP_BUS_FUDGE_FACTOR_AB(*ab_quota);
+	if (npipe > 1)
+		*ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR_HIGH_IB(*ib_quota);
+	else
+		*ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR_IB(*ib_quota);
+
+	if (ovrd && (*ib_quota < MDSS_MDP_BUS_FLOOR_BW)) {
+		*ib_quota = MDSS_MDP_BUS_FLOOR_BW;
+		pr_debug("forcing the BIMC clock to 200 MHz : %llu bytes",
+			*ib_quota);
+	} else {
+		pr_debug("ib quota : %llu bytes", *ib_quota);
+	}
+}
+
 static int mdss_mdp_ctl_perf_commit(struct mdss_data_type *mdata, u32 flags)
 {
 	struct mdss_mdp_ctl *ctl;
@@ -82,16 +153,15 @@
 		}
 	}
 	if (flags & MDSS_MDP_PERF_UPDATE_BUS) {
-		bus_ab_quota = bus_ib_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
-		bus_ab_quota = MDSS_MDP_BUS_FUDGE_FACTOR_AB(bus_ab_quota);
-		bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR_IB(bus_ib_quota);
+		bus_ab_quota = bus_ib_quota;
+		__mdss_mdp_ctrl_perf_ovrd(mdata, &bus_ab_quota, &bus_ib_quota);
 		bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
-
+		bus_ab_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
 		mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
 	}
 	if (flags & MDSS_MDP_PERF_UPDATE_CLK) {
 		clk_rate = MDSS_MDP_CLK_FUDGE_FACTOR(clk_rate);
-		pr_debug("update clk rate = %lu\n", clk_rate);
+		pr_debug("update clk rate = %lu HZ\n", clk_rate);
 		mdss_mdp_set_clk_rate(clk_rate);
 	}
 	mutex_unlock(&mdss_mdp_ctl_lock);
@@ -1337,7 +1407,7 @@
 		 * writeback block
 		*/
 		head[len] = head[len - 1];
-		head[len].num = -1;
+		head[len].num = head[len - 1].num;
 	}
 	mdata->ctl_off = head;
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 9b81633..6fb8883 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -409,11 +409,9 @@
 	if (ctx->polling_en) {
 		rc = mdss_mdp_video_pollwait(ctl);
 	} else {
-		rc = wait_for_completion_interruptible_timeout(&ctx->vsync_comp,
+		rc = wait_for_completion_timeout(&ctx->vsync_comp,
 				usecs_to_jiffies(VSYNC_TIMEOUT_US));
-		if (rc < 0) {
-			pr_warn("vsync wait interrupted ctl=%d\n", ctl->num);
-		} else if (rc == 0) {
+		if (rc == 0) {
 			pr_warn("vsync wait timeout %d, fallback to poll mode\n",
 					ctl->num);
 			ctx->polling_en++;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 4032b91..c4dee86 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -595,7 +595,8 @@
 	mutex_lock(&mfd->lock);
 	if (pipe->play_cnt == 0) {
 		pr_debug("failed for pipe %d\n", pipe->num);
-		list_del(&pipe->used_list);
+		if (!list_empty(&pipe->used_list))
+			list_del_init(&pipe->used_list);
 		mdss_mdp_pipe_destroy(pipe);
 	}
 
@@ -827,20 +828,17 @@
 			pipe->params_changed++;
 			buf = &pipe->front_buf;
 		} else if (!pipe->params_changed) {
-			if (pipe->mixer) {
-				if (!mdss_mdp_pipe_is_staged(pipe)) {
-					list_del(&pipe->used_list);
-					list_add(&pipe->cleanup_list,
-						 &mdp5_data->pipes_cleanup);
-				}
+			if (pipe->mixer && !mdss_mdp_pipe_is_staged(pipe) &&
+			    !list_empty(&pipe->used_list)) {
+				list_del_init(&pipe->used_list);
+				list_add(&pipe->cleanup_list,
+					&mdp5_data->pipes_cleanup);
 			}
 			continue;
 		} else if (pipe->front_buf.num_planes) {
 			buf = &pipe->front_buf;
 		} else {
-			pr_warn("pipe queue w/o buffer. unstaging layer\n");
-			pipe->params_changed = 0;
-			mdss_mdp_mixer_pipe_unstage(pipe);
+			pr_warn("pipe queue w/o buffer\n");
 			continue;
 		}
 
@@ -894,9 +892,11 @@
 				continue;
 			}
 			mutex_lock(&mfd->lock);
-			list_del(&pipe->used_list);
-			list_add(&pipe->cleanup_list,
-				&mdp5_data->pipes_cleanup);
+			if (!list_empty(&pipe->used_list)) {
+				list_del_init(&pipe->used_list);
+				list_add(&pipe->cleanup_list,
+					&mdp5_data->pipes_cleanup);
+			}
 			mutex_unlock(&mfd->lock);
 			mdss_mdp_mixer_pipe_unstage(pipe);
 			mdss_mdp_pipe_unmap(pipe);
diff --git a/drivers/video/msm/mdss/mhl_msc.c b/drivers/video/msm/mdss/mhl_msc.c
index 15811bb..e7cd1be 100644
--- a/drivers/video/msm/mdss/mhl_msc.c
+++ b/drivers/video/msm/mdss/mhl_msc.c
@@ -74,6 +74,22 @@
 	}
 }
 
+static bool mhl_qualify_path_enable(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	int rc = false;
+
+	if (!mhl_ctrl)
+		return rc;
+
+	if (mhl_ctrl->tmds_en_state ||
+	    /* Identify sink with non-standard INT STAT SIZE */
+	    (mhl_ctrl->devcap[DEVCAP_OFFSET_MHL_VERSION] == 0x10 &&
+	     mhl_ctrl->devcap[DEVCAP_OFFSET_INT_STAT_SIZE] == 0x44))
+		rc = true;
+
+	return rc;
+}
+
 void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
 {
 	if (ctrl)
@@ -224,12 +240,16 @@
 	case MHL_WRITE_STAT:
 		if (req->offset == MHL_STATUS_REG_LINK_MODE) {
 			if (req->payload.data[0]
-			    & MHL_STATUS_PATH_ENABLED)
+			    & MHL_STATUS_PATH_ENABLED) {
 				/* Enable TMDS output */
 				mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
-			else
+				if (mhl_ctrl->devcap_state == MHL_DEVCAP_ALL)
+					mhl_drive_hpd(mhl_ctrl, HPD_UP);
+			} else {
 				/* Disable TMDS output */
 				mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+				mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+			}
 		}
 		break;
 	case MHL_READ_DEVCAP:
@@ -245,8 +265,14 @@
 				pr_debug("%s: devcap pow bit unset\n",
 					 __func__);
 			break;
+		case DEVCAP_OFFSET_RESERVED:
+			mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+			break;
 		case DEVCAP_OFFSET_MHL_VERSION:
 		case DEVCAP_OFFSET_INT_STAT_SIZE:
+			if (mhl_qualify_path_enable(mhl_ctrl))
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
 			break;
 		}
 		break;
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
index 82b56e3..c66d50d 100644
--- a/drivers/video/msm/mdss/mhl_sii8334.c
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -794,11 +794,13 @@
 void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
 {
 	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
 	if (on) {
 		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
-		mhl_drive_hpd(mhl_ctrl, HPD_UP);
+		mhl_ctrl->tmds_en_state = true;
 	} else {
 		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+		mhl_ctrl->tmds_en_state = false;
 	}
 }
 
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
index c24f643..7b89eff 100644
--- a/drivers/video/msm/mdss/msm_mdss_io_8974.c
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -209,61 +209,136 @@
 	clk_disable_unprepare(ctrl_pdata->ahb_clk);
 }
 
-void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata  *ctrl_pdata)
+static int mdss_dsi_clk_prepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
-	clk_prepare(ctrl_pdata->byte_clk);
-	clk_prepare(ctrl_pdata->esc_clk);
-	clk_prepare(ctrl_pdata->pixel_clk);
-}
+	int rc = 0;
 
-void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
-{
-	clk_unprepare(ctrl_pdata->esc_clk);
-	clk_unprepare(ctrl_pdata->pixel_clk);
+	rc = clk_prepare(ctrl_pdata->esc_clk);
+	if (rc) {
+		pr_err("%s: Failed to prepare dsi esc clk\n", __func__);
+		goto esc_clk_err;
+	}
+
+	rc = clk_prepare(ctrl_pdata->byte_clk);
+	if (rc) {
+		pr_err("%s: Failed to prepare dsi byte clk\n", __func__);
+		goto byte_clk_err;
+	}
+
+	rc = clk_prepare(ctrl_pdata->pixel_clk);
+	if (rc) {
+		pr_err("%s: Failed to prepare dsi pixel clk\n", __func__);
+		goto pixel_clk_err;
+	}
+
+	return rc;
+
+pixel_clk_err:
 	clk_unprepare(ctrl_pdata->byte_clk);
+byte_clk_err:
+	clk_unprepare(ctrl_pdata->esc_clk);
+esc_clk_err:
+	return rc;
 }
 
-void mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static void mdss_dsi_clk_unprepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
-	u32 esc_clk_rate = 19200000;
-
 	if (!ctrl_pdata) {
 		pr_err("%s: Invalid input data\n", __func__);
 		return;
 	}
 
-	if (ctrl_pdata->mdss_dsi_clk_on) {
-		pr_info("%s: mdss_dsi_clks already ON\n", __func__);
-		return;
+	clk_unprepare(ctrl_pdata->pixel_clk);
+	clk_unprepare(ctrl_pdata->byte_clk);
+	clk_unprepare(ctrl_pdata->esc_clk);
+}
+
+static int mdss_dsi_clk_set_rate(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	u32 esc_clk_rate = 19200000;
+	int rc = 0;
+
+	if (!ctrl_pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
 	}
 
 	if (!ctrl_pdata->panel_data.panel_info.cont_splash_enabled) {
 		pr_debug("%s: Set clk rates: pclk=%d, byteclk=%d escclk=%d\n",
 			__func__, ctrl_pdata->pclk_rate,
 			ctrl_pdata->byte_clk_rate, esc_clk_rate);
-		if (clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate) < 0)
+		rc = clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate);
+		if (rc) {
 			pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
 				__func__);
+			goto error;
+		}
 
-		if (clk_set_rate(ctrl_pdata->byte_clk,
-			ctrl_pdata->byte_clk_rate) < 0)
+		rc =  clk_set_rate(ctrl_pdata->byte_clk,
+			ctrl_pdata->byte_clk_rate);
+		if (rc) {
 			pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
 				__func__);
+			goto error;
+		}
 
-		if (clk_set_rate(ctrl_pdata->pixel_clk,
-			ctrl_pdata->pclk_rate) < 0)
+		rc = clk_set_rate(ctrl_pdata->pixel_clk, ctrl_pdata->pclk_rate);
+		if (rc) {
 			pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
 				__func__);
+			goto error;
+		}
 	}
 
-	clk_enable(ctrl_pdata->esc_clk);
-	clk_enable(ctrl_pdata->byte_clk);
-	clk_enable(ctrl_pdata->pixel_clk);
-
-	ctrl_pdata->mdss_dsi_clk_on = 1;
+error:
+	return rc;
 }
 
-void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static int mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int rc = 0;
+
+	if (!ctrl_pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ctrl_pdata->mdss_dsi_clk_on) {
+		pr_info("%s: mdss_dsi_clks already ON\n", __func__);
+		return 0;
+	}
+
+	rc = clk_enable(ctrl_pdata->esc_clk);
+	if (rc) {
+		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+		goto esc_clk_err;
+	}
+
+	rc = clk_enable(ctrl_pdata->byte_clk);
+	if (rc) {
+		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+		goto byte_clk_err;
+	}
+
+	rc = clk_enable(ctrl_pdata->pixel_clk);
+	if (rc) {
+		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+		goto pixel_clk_err;
+	}
+
+	ctrl_pdata->mdss_dsi_clk_on = 1;
+
+	return rc;
+
+pixel_clk_err:
+	clk_disable(ctrl_pdata->byte_clk);
+byte_clk_err:
+	clk_disable(ctrl_pdata->esc_clk);
+esc_clk_err:
+	return rc;
+}
+
+static void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
 	if (!ctrl_pdata) {
 		pr_err("%s: Invalid input data\n", __func__);
@@ -275,13 +350,71 @@
 		return;
 	}
 
+	clk_disable(ctrl_pdata->esc_clk);
 	clk_disable(ctrl_pdata->pixel_clk);
 	clk_disable(ctrl_pdata->byte_clk);
-	clk_disable(ctrl_pdata->esc_clk);
 
 	ctrl_pdata->mdss_dsi_clk_on = 0;
 }
 
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
+{
+	int rc = 0;
+
+	mutex_lock(&ctrl->mutex);
+	if (enable) {
+		if (ctrl->clk_cnt == 0) {
+			rc = mdss_dsi_enable_bus_clocks(ctrl);
+			if (rc) {
+				pr_err("%s: failed to enable bus clks. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+
+			rc = mdss_dsi_clk_set_rate(ctrl);
+			if (rc) {
+				pr_err("%s: failed to set clk rates. rc=%d\n",
+					__func__, rc);
+				mdss_dsi_disable_bus_clocks(ctrl);
+				goto error;
+			}
+
+			rc = mdss_dsi_clk_prepare(ctrl);
+			if (rc) {
+				pr_err("%s: failed to prepare clks. rc=%d\n",
+					__func__, rc);
+				mdss_dsi_disable_bus_clocks(ctrl);
+				goto error;
+			}
+
+			rc = mdss_dsi_clk_enable(ctrl);
+			if (rc) {
+				pr_err("%s: failed to enable clks. rc=%d\n",
+					__func__, rc);
+				mdss_dsi_clk_unprepare(ctrl);
+				mdss_dsi_disable_bus_clocks(ctrl);
+				goto error;
+			}
+		}
+		ctrl->clk_cnt++;
+	} else {
+		if (ctrl->clk_cnt) {
+			ctrl->clk_cnt--;
+			if (ctrl->clk_cnt == 0) {
+				mdss_dsi_clk_disable(ctrl);
+				mdss_dsi_clk_unprepare(ctrl);
+				mdss_dsi_disable_bus_clocks(ctrl);
+			}
+		}
+	}
+	pr_debug("%s: ctrl ndx=%d enabled=%d clk_cnt=%d\n",
+			__func__, ctrl->ndx, enable, ctrl->clk_cnt);
+
+error:
+	mutex_unlock(&ctrl->mutex);
+	return rc;
+}
+
 void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base)
 {
 	/* start phy sw reset */
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 21d836f..668c397 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -10,6 +10,7 @@
 #include <linux/namei.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/kmemleak.h>
 #include "internal.h"
 
 static const struct dentry_operations proc_sys_dentry_operations;
@@ -1215,6 +1216,8 @@
 	if (!header)
 		return NULL;
 
+	kmemleak_not_leak(header);
+
 	node = (struct ctl_node *)(header + 1);
 	init_header(header, root, set, node, table);
 	if (sysctl_check_table(path, table))
diff --git a/include/linux/cm36283.h b/include/linux/cm36283.h
new file mode 100644
index 0000000..cccd5ee
--- /dev/null
+++ b/include/linux/cm36283.h
@@ -0,0 +1,121 @@
+/* include/linux/cm36283.h
+ *
+ * Copyright (C) 2012 Capella Microsystems Inc.
+ * Author: Frank Hsieh <pengyueh@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_CM36283_H
+#define __LINUX_CM36283_H
+
+#define CM36283_I2C_NAME "cm36283"
+
+/* Define Slave Address*/
+#define	CM36283_slave_add	0xC0>>1
+
+#define ALS_CALIBRATED		0x6E9F
+#define PS_CALIBRATED		  0x509F
+
+/*Define Command Code*/
+#define		ALS_CONF		  0x00
+#define		ALS_THDH  	  0x01
+#define		ALS_THDL	    0x02
+#define		PS_CONF1      0x03
+#define		PS_CONF3      0x04
+#define		PS_CANC       0x05
+#define		PS_THD        0x06
+#define		RESERVED      0x07
+
+#define		PS_DATA       0x08
+#define		ALS_DATA      0x09
+#define		RESERVED2     0x0A
+#define		INT_FLAG      0x0B
+#define		ID_REG        0x0C
+
+/*cm36283*/
+/*for ALS CONF command*/
+#define CM36283_ALS_IT_80ms 	(0 << 6)
+#define CM36283_ALS_IT_160ms 	(1 << 6)
+#define CM36283_ALS_IT_320ms 	(2 << 6)
+#define CM36283_ALS_IT_640ms 	(3 << 6)
+#define CM36283_ALS_GAIN_1 		(0 << 2)
+#define CM36283_ALS_GAIN_2 		(1 << 2)
+#define CM36283_ALS_GAIN_4 		(2 << 2)
+#define CM36283_ALS_GAIN_8 		(3 << 2)
+#define CM36283_ALS_INT_EN	 	(1 << 1) /*enable/disable Interrupt*/
+#define CM36283_ALS_INT_MASK	0xFFFD
+#define CM36283_ALS_SD			  (1 << 0) /*enable/disable ALS func, 1:disable , 0: enable*/
+#define CM36283_ALS_SD_MASK		0xFFFE
+
+/*for PS CONF1 command*/
+#define CM36283_PS_ITB_1_2	 (0 << 14)
+#define CM36283_PS_ITB_1     (1 << 14)
+#define CM36283_PS_ITB_2     (2 << 14)
+#define CM36283_PS_ITB_4     (3 << 14)
+#define CM36283_PS_INT_OFF	       (0 << 8) /*enable/disable Interrupt*/
+#define CM36283_PS_INT_IN          (1 << 8)
+#define CM36283_PS_INT_OUT         (2 << 8)
+#define CM36283_PS_INT_IN_AND_OUT  (3 << 8)
+
+#define CM36283_PS_INT_MASK   0xFCFF
+
+#define CM36283_PS_DR_1_40   (0 << 6)
+#define CM36283_PS_DR_1_80   (1 << 6)
+#define CM36283_PS_DR_1_160  (2 << 6)
+#define CM36283_PS_DR_1_320  (3 << 6)
+#define CM36283_PS_IT_1T 	   (0 << 4)
+#define CM36283_PS_IT_1_3T   (1 << 4)
+#define CM36283_PS_IT_1_6T 	 (2 << 4)
+#define CM36283_PS_IT_2T 		 (3 << 4)
+#define CM36283_PS_PERS_1 	 (0 << 2)
+#define CM36283_PS_PERS_2 	 (1 << 2)
+#define CM36283_PS_PERS_3 	 (2 << 2)
+#define CM36283_PS_PERS_4 	 (3 << 2)
+#define CM36283_PS_RES_1     (1 << 1)
+#define CM36283_PS_SD	       (1 << 0)/*enable/disable PS func, 1:disable , 0: enable*/
+#define CM36283_PS_SD_MASK	 0xFFFE
+
+/*for PS CONF3 command*/
+#define CM36283_PS_MS_NORMAL        (0 << 14)
+#define CM36283_PS_MS_LOGIC_ENABLE  (1 << 14)
+#define CM36283_PS_PROL_63 	     (0 << 12)
+#define CM36283_PS_PROL_127      (1 << 12)
+#define CM36283_PS_PROL_191 	   (2 << 12)
+#define CM36283_PS_PROL_255 		 (3 << 12)
+#define CM36283_PS_SMART_PERS_ENABLE  (1 << 4)
+#define CM36283_PS_ACTIVE_FORCE_MODE  (1 << 3)
+#define CM36283_PS_ACTIVE_FORCE_TRIG  (1 << 2)
+
+/*for INT FLAG*/
+#define INT_FLAG_PS_SPFLAG           (1<<14)
+#define INT_FLAG_ALS_IF_L            (1<<13)
+#define INT_FLAG_ALS_IF_H            (1<<12)
+#define INT_FLAG_PS_IF_CLOSE         (1<<9)
+#define INT_FLAG_PS_IF_AWAY          (1<<8)  
+
+extern unsigned int ps_kparam1;
+extern unsigned int ps_kparam2;
+
+struct cm36283_platform_data {
+	int intr;
+	uint16_t levels[10];
+	uint16_t golden_adc;
+	int (*power)(int, uint8_t); /* power to the chip */
+	uint8_t slave_addr;
+	uint8_t ps_close_thd_set;
+	uint8_t ps_away_thd_set;	
+	uint16_t ls_cmd;
+	uint16_t ps_conf1_val;
+	uint16_t ps_conf3_val;	
+};
+
+#endif
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index dd675f3..02ecb0c 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -152,6 +152,8 @@
 #define DMX_IDX_VC1_FIRST_SEQ_FRAME_END     0x00800000
 #define DMX_IDX_VC1_FRAME_START             0x01000000
 #define DMX_IDX_VC1_FRAME_END               0x02000000
+#define DMX_IDX_H264_ACCESS_UNIT_DEL        0x04000000
+#define DMX_IDX_H264_SEI                    0x08000000
 
 struct dmx_pes_filter_params
 {
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
index d121695..73016d6 100644
--- a/include/linux/input/synaptics_dsx.h
+++ b/include/linux/input/synaptics_dsx.h
@@ -35,11 +35,8 @@
  * struct synaptics_rmi4_platform_data - rmi4 platform data
  * @x_flip: x flip flag
  * @y_flip: y flip flag
- * @i2c_pull_up: pull up i2c bus with regulator
- * @power_down_enable: enable complete regulator shutdown in suspend
  * @irq_gpio: attention interrupt gpio
  * @irq_flags: flags used by the irq
- * @reset_flags: flags used by reset line
  * @reset_gpio: reset gpio
  * @panel_x: panel maximum values on the x
  * @panel_y: panel maximum values on the y
@@ -50,7 +47,6 @@
 	bool x_flip;
 	bool y_flip;
 	bool i2c_pull_up;
-	bool power_down_enable;
 	unsigned irq_gpio;
 	u32 irq_flags;
 	u32 reset_flags;
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index 560f75b..d1ee11c 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -99,7 +99,7 @@
 	int mhl_mode;
 	struct completion rgnd_done;
 	struct completion msc_cmd_done;
-	uint8_t devcap_state;
+	uint16_t devcap_state;
 	uint8_t path_en_state;
 	struct work_struct mhl_msc_send_work;
 	struct list_head list_cmd;
@@ -146,9 +146,10 @@
 	int current_val;
 	struct completion msc_cmd_done;
 	uint8_t devcap[16];
-	uint8_t devcap_state;
+	uint16_t devcap_state;
 	uint8_t status[2];
 	uint8_t path_en_state;
+	uint8_t tmds_en_state;
 	void *hdmi_mhl_ops;
 	struct work_struct mhl_msc_send_work;
 	struct list_head list_cmd;
diff --git a/include/linux/mhl_defs.h b/include/linux/mhl_defs.h
index f5dacfd..6177f07 100644
--- a/include/linux/mhl_defs.h
+++ b/include/linux/mhl_defs.h
@@ -132,6 +132,7 @@
 #define		MHL_SCRATCHPAD_SIZE			16
 #define		MAX_SCRATCHPAD_TRANSFER_SIZE		64
 #define		ADOPTER_ID_SIZE				2
+#define		MHL_DEVCAP_ALL		0xffff
 
 /* manually define highest number */
 #define		MHL_MAX_BUFFER_SIZE			MHL_SCRATCHPAD_SIZE
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index f8b78a4..f74fcbe 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -20,7 +20,10 @@
 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
+#define KGSL_CONTEXT_END_OF_FRAME	0x00000100
+
 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
+#define KGSL_CONTEXT_SYNC               0x00000400
 /* bits [12:15] are reserved for future use */
 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
 #define KGSL_CONTEXT_TYPE_SHIFT         20
@@ -30,6 +33,7 @@
 #define KGSL_CONTEXT_TYPE_CL		2
 #define KGSL_CONTEXT_TYPE_C2D		3
 #define KGSL_CONTEXT_TYPE_RS		4
+#define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
 
 #define KGSL_CONTEXT_INVALID 0xffffffff
 
@@ -282,7 +286,7 @@
 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
 
-/* issue indirect commands to the GPU.
+/* DEPRECATED: issue indirect commands to the GPU.
  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
  * ibaddr and sizedwords must specify a subset of a buffer created
  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
@@ -290,6 +294,9 @@
  * timestamp is a returned counter value which can be passed to
  * other ioctls to determine when the commands have been executed by
  * the GPU.
+ *
+ * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
+ * instead
  */
 struct kgsl_ringbuffer_issueibcmds {
 	unsigned int drawctxt_id;
@@ -804,6 +811,77 @@
 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
 
+/*
+ * struct kgsl_cmd_syncpoint_timestamp
+ * @context_id: ID of a KGSL context
+ * @timestamp: GPU timestamp
+ *
+ * This structure defines a syncpoint comprising a context/timestamp pair. A
+ * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
+ * dependencies that must be met before the command can be submitted to the
+ * hardware
+ */
+struct kgsl_cmd_syncpoint_timestamp {
+	unsigned int context_id;
+	unsigned int timestamp;
+};
+
+#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
+
+struct kgsl_cmd_syncpoint_fence {
+	int fd;
+};
+
+#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
+
+/**
+ * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
+ * @type: type of sync point defined here
+ * @priv: Pointer to the type specific buffer
+ * @size: Size of the type specific buffer
+ *
+ * This structure contains pointers defining a specific command sync point.
+ * The pointer and size should point to a type appropriate structure.
+ */
+struct kgsl_cmd_syncpoint {
+	int type;
+	void __user *priv;
+	unsigned int size;
+};
+
+/**
+ * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
+ * @context_id: KGSL context ID that owns the commands
+ * @flags:
+ * @cmdlist: User pointer to a list of kgsl_ibdesc structures
+ * @numcmds: Number of commands listed in cmdlist
+ * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
+ * @numsyncs: Number of sync points listed in synclist
+ * @timestamp: On entry the a user defined timestamp, on exist the timestamp
+ * assigned to the command batch
+ *
+ * This structure specifies a command to send to the GPU hardware.  This is
+ * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
+ * submit IB lists and it adds sync points to block the IB until the
+ * dependencies are satisified.  This entry point is the new and preferred way
+ * to submit commands to the GPU.
+ */
+
+struct kgsl_submit_commands {
+	unsigned int context_id;
+	unsigned int flags;
+	struct kgsl_ibdesc __user *cmdlist;
+	unsigned int numcmds;
+	struct kgsl_cmd_syncpoint __user *synclist;
+	unsigned int numsyncs;
+	unsigned int timestamp;
+/* private: reserved for future use */
+	unsigned int __pad[4];
+};
+
+#define IOCTL_KGSL_SUBMIT_COMMANDS \
+	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
+
 #ifdef __KERNEL__
 #ifdef CONFIG_MSM_KGSL_DRM
 int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 013a778..4f39eaa 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -138,6 +138,16 @@
 
 #define QPNP_ADC_625_UV	625000
 #define QPNP_ADC_HWMON_NAME_LENGTH				64
+#define QPNP_MAX_PROP_NAME_LEN					32
+
+/* Structure device for qpnp vadc */
+struct qpnp_vadc_chip;
+
+/* Structure device for qpnp iadc */
+struct qpnp_iadc_chip;
+
+/* Structure device for qpnp adc tm */
+struct qpnp_adc_tm_chip;
 
 /**
  * enum qpnp_adc_decimation_type - Sampling rate supported.
@@ -893,7 +903,7 @@
  *	and returns the physical result
  */
 struct qpnp_vadc_scale_fn {
-	int32_t (*chan) (int32_t,
+	int32_t (*chan) (struct qpnp_vadc_chip *, int32_t,
 		const struct qpnp_adc_properties *,
 		const struct qpnp_vadc_chan_properties *,
 		struct qpnp_vadc_result *);
@@ -906,7 +916,8 @@
  *	and returns the physical result
  */
 struct qpnp_adc_tm_reverse_scale_fn {
-	int32_t (*chan) (struct qpnp_adc_tm_btm_param *,
+	int32_t (*chan) (struct qpnp_vadc_chip *,
+		struct qpnp_adc_tm_btm_param *,
 		uint32_t *, uint32_t *);
 };
 
@@ -1004,21 +1015,24 @@
 			|| defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
 /**
  * qpnp_vadc_read() - Performs ADC read on the channel.
+ * @dev:	Structure device for qpnp vadc
  * @channel:	Input channel to perform the ADC read.
  * @result:	Structure pointer of type adc_chan_result
  *		in which the ADC read results are stored.
  */
-int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel,
 				struct qpnp_vadc_result *result);
 
 /**
  * qpnp_vadc_conv_seq_request() - Performs ADC read on the conversion
  *				sequencer channel.
+ * @dev:	Structure device for qpnp vadc
  * @channel:	Input channel to perform the ADC read.
  * @result:	Structure pointer of type adc_chan_result
  *		in which the ADC read results are stored.
  */
-int32_t qpnp_vadc_conv_seq_request(
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
 			enum qpnp_vadc_trigger trigger_channel,
 			enum qpnp_vadc_channels channel,
 			struct qpnp_vadc_result *result);
@@ -1041,6 +1055,7 @@
  * qpnp_adc_scale_default() - Scales the pre-calibrated digital output
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
  *		reference voltage.
@@ -1048,7 +1063,8 @@
  *		slope and offset.
  * @chan_rslt:	Physical result to be stored.
  */
-int32_t qpnp_adc_scale_default(int32_t adc_code,
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
@@ -1057,6 +1073,7 @@
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset. Performs the AMUX out as 2mV/K and returns
  *		the temperature in milli degC.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
  *		reference voltage.
@@ -1064,7 +1081,8 @@
  *		slope and offset.
  * @chan_rslt:	Physical result to be stored.
  */
-int32_t qpnp_adc_scale_pmic_therm(int32_t adc_code,
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
@@ -1072,6 +1090,7 @@
  * qpnp_adc_scale_batt_therm() - Scales the pre-calibrated digital output
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
  *		reference voltage.
@@ -1079,7 +1098,8 @@
  *		slope and offset.
  * @chan_rslt:	physical result to be stored.
  */
-int32_t qpnp_adc_scale_batt_therm(int32_t adc_code,
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
@@ -1087,6 +1107,7 @@
  * qpnp_adc_scale_qrd_batt_therm() - Scales the pre-calibrated digital output
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
  *		reference voltage.
@@ -1094,7 +1115,8 @@
  *		slope and offset.
  * @chan_rslt:	physical result to be stored.
  */
-int32_t qpnp_adc_scale_qrd_batt_therm(int32_t adc_code,
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
@@ -1102,6 +1124,7 @@
  * qpnp_adc_scale_batt_id() - Scales the pre-calibrated digital output
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
  *		reference voltage.
@@ -1109,7 +1132,7 @@
  *		slope and offset.
  * @chan_rslt:	physical result to be stored.
  */
-int32_t qpnp_adc_scale_batt_id(int32_t adc_code,
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *dev, int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
@@ -1118,6 +1141,7 @@
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset. Returns the temperature of the xo therm in mili
 		degC.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
  *		reference voltage.
@@ -1125,7 +1149,7 @@
  *		slope and offset.
  * @chan_rslt:	physical result to be stored.
  */
-int32_t qpnp_adc_tdkntcg_therm(int32_t adc_code,
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *dev, int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
@@ -1135,6 +1159,7 @@
  *		gain and offset. Returns the temperature of the therm in degC.
  *		It uses a mapping table computed for a 150K pull-up.
  *		Pull-up1 is an internal pull-up on the AMUX of 150K.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
  *		reference voltage.
@@ -1142,7 +1167,7 @@
  *		slope and offset.
  * @chan_rslt:	physical result to be stored.
  */
-int32_t qpnp_adc_scale_therm_pu1(int32_t adc_code,
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *dev, int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
@@ -1152,6 +1177,7 @@
  *		gain and offset. Returns the temperature of the therm in degC.
  *		It uses a mapping table computed for a 100K pull-up.
  *		Pull-up2 is an internal pull-up on the AMUX of 100K.
+ * @dev:	Structure device for qpnp vadc
  * @adc_code:	pre-calibrated digital ouput of the ADC.
  * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
  *		reference voltage.
@@ -1159,17 +1185,22 @@
  *		slope and offset.
  * @chan_rslt:	physical result to be stored.
  */
-int32_t qpnp_adc_scale_therm_pu2(int32_t adc_code,
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *dev, int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
 /**
- * qpnp_vadc_is_ready() - Clients can use this API to check if the
- *			  device is ready to use.
- * @result:	0 on success and -EPROBE_DEFER when probe for the device
- *		has not occured.
+ * qpnp_get_vadc() - Clients need to register with the vadc using the
+ *		corresponding device instance it wants to read the channels
+ *		from. Read the bindings document on how to pass the phandle
+ *		for the corresponding vadc driver to register with.
+ * @dev:	Clients device structure
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the vadc
+ * @struct qpnp_vadc_chip * - On success returns the vadc device structure
+ *		pointer that needs to be used during an ADC request.
  */
-int32_t qpnp_vadc_is_ready(void);
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name);
 /**
  * qpnp_adc_tm_scaler() - Performs reverse calibration.
  * @config:	Thermal monitoring configuration.
@@ -1185,17 +1216,20 @@
 /**
  * qpnp_get_vadc_gain_and_offset() - Obtains the VADC gain and offset
  *		for absolute and ratiometric calibration.
+ * @dev:	Structure device for qpnp vadc
  * @param:	The result in which the ADC offset and gain values are stored.
  * @type:	The calibration type whether client needs the absolute or
  *		ratiometric gain and offset values.
  */
-int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_linear_graph *param,
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
+			struct qpnp_vadc_linear_graph *param,
 			enum qpnp_adc_calib_type calib_type);
 /**
  * qpnp_adc_scale_millidegc_pmic_voltage_thr() - Performs reverse calibration
  *		on the low/high temperature threshold values passed by the
  *		client. The function coverts milldegC to voltage threshold
  *		and accounts for the corresponding channels scaling as (2mV/K).
+ * @dev:	Structure device for qpnp vadc
  * @param:	The input parameters that contain the low/high temperature
  *		values.
  * @low_threshold: The low threshold value that needs to be updated with
@@ -1203,7 +1237,7 @@
  * @high_threshold: The low threshold value that needs to be updated with
  *		the above calibrated voltage value.
  */
-int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *dev,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold);
 /**
@@ -1211,6 +1245,7 @@
  *		temperature threshold values passed by the client.
  *		The function maps the temperature to voltage and applies
  *		ratiometric calibration on the voltage values.
+ * @dev:	Structure device for qpnp vadc
  * @param:	The input parameters that contain the low/high temperature
  *		values.
  * @low_threshold: The low threshold value that needs to be updated with
@@ -1218,28 +1253,34 @@
  * @high_threshold: The low threshold value that needs to be updated with
  *		the above calibrated voltage value.
  */
-int32_t qpnp_adc_btm_scaler(struct qpnp_adc_tm_btm_param *param,
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold);
 /**
  * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
  *		and convert given temperature to voltage on supported
  *		thermistor channels using 100k pull-up.
+ * @dev:	Structure device for qpnp vadc
  * @param:	The input temperature values.
  */
-int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_adc_tm_config *param);
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *dev,
+				struct qpnp_adc_tm_config *param);
 /**
  * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
  *		and converts the given ADC code to temperature for
  *		thermistor channels using 100k pull-up.
+ * @dev:	Structure device for qpnp vadc
  * @reg:	The input ADC code.
  * @result:	The physical measurement temperature on the thermistor.
  */
-int32_t qpnp_adc_tm_scale_voltage_therm_pu2(uint32_t reg, int64_t *result);
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *dev,
+				uint32_t reg, int64_t *result);
 /**
  * qpnp_adc_usb_scaler() - Performs reverse calibration on the low/high
  *		voltage threshold values passed by the client.
  *		The function applies ratiometric calibration on the
  *		voltage values.
+ * @dev:	Structure device for qpnp vadc
  * @param:	The input parameters that contain the low/high voltage
  *		threshold values.
  * @low_threshold: The low threshold value that needs to be updated with
@@ -1247,13 +1288,15 @@
  * @high_threshold: The low threshold value that needs to be updated with
  *		the above calibrated voltage value.
  */
-int32_t qpnp_adc_usb_scaler(struct qpnp_adc_tm_btm_param *param,
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold);
 /**
  * qpnp_adc_vbatt_rscaler() - Performs reverse calibration on the low/high
  *		voltage threshold values passed by the client.
  *		The function applies ratiometric calibration on the
  *		voltage values.
+ * @dev:	Structure device for qpnp vadc
  * @param:	The input parameters that contain the low/high voltage
  *		threshold values.
  * @low_threshold: The low threshold value that needs to be updated with
@@ -1261,7 +1304,8 @@
  * @high_threshold: The low threshold value that needs to be updated with
  *		the above calibrated voltage value.
  */
-int32_t qpnp_adc_vbatt_rscaler(struct qpnp_adc_tm_btm_param *param,
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold);
 /**
  * qpnp_vadc_iadc_sync_request() - Performs Voltage ADC read and
@@ -1269,107 +1313,127 @@
  *		voltage and current request the VADC peripheral is
  *		prepared for conversion and the IADC sync conversion
  *		is done from the IADC peripheral.
+ * @dev:	Structure device for qpnp vadc
  * @channel:	Input channel to perform the voltage ADC read.
  */
-int32_t qpnp_vadc_iadc_sync_request(enum qpnp_vadc_channels channel);
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel);
 /**
  * qpnp_vadc_iadc_sync_complete_request() - Reads the ADC result and
  *		unlocks the peripheral.
+ * @dev:	Structure device for qpnp vadc
  * @result:	Structure pointer of type adc_chan_result
  *		in which the ADC read results are stored.
  */
-int32_t qpnp_vadc_iadc_sync_complete_request(
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *dev,
 	enum qpnp_vadc_channels channel, struct qpnp_vadc_result *result);
 /**
  * qpnp_vadc_sns_comp_result() - Compensate vbatt readings based on temperature
+ * @dev:	Structure device for qpnp vadc
  * @result:	Voltage in uV that needs compensation.
  */
-int32_t qpnp_vbat_sns_comp_result(int64_t *result);
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+						int64_t *result);
 #else
-static inline int32_t qpnp_vadc_read(uint32_t channel,
+static inline int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+				uint32_t channel,
 				struct qpnp_vadc_result *result)
 { return -ENXIO; }
-static inline int32_t qpnp_vadc_conv_seq_request(
+static inline int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
 			enum qpnp_vadc_trigger trigger_channel,
 			enum qpnp_vadc_channels channel,
 			struct qpnp_vadc_result *result)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
+static inline int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_pmic_therm(int32_t adc_code,
+static inline int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_batt_therm(int32_t adc_code,
+static inline int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_qrd_batt_therm(int32_t adc_code,
+static inline int32_t qpnp_adc_scale_qrd_batt_therm(
+			struct qpnp_vadc_chip *vadc, int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_batt_id(int32_t adc_code,
+static inline int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_tdkntcg_therm(int32_t adc_code,
+static inline int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_therm_pu1(int32_t adc_code,
+static inline int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_therm_pu2(int32_t adc_code,
+static inline int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
-static inline int32_t qpnp_vadc_is_ready(void)
-{ return -ENXIO; }
-static inline int32_t qpnp_get_vadc_gain_and_offset(
+static inline struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
 			struct qpnp_vadc_linear_graph *param,
 			enum qpnp_adc_calib_type calib_type)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_usb_scaler(
+static inline int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_vbatt_rscaler(
+static inline int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_btm_scaler(
+static inline int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 { return -ENXIO; }
 static inline int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(
+		struct qpnp_vadc_chip *dev,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 { return -ENXIO; }
 static inline int32_t qpnp_adc_tm_scale_therm_voltage_pu2(
+				struct qpnp_vadc_chip *dev,
 				struct qpnp_adc_tm_config *param)
 { return -ENXIO; }
 static inline int32_t qpnp_adc_tm_scale_voltage_therm_pu2(
+				struct qpnp_vadc_chip *dev,
 				uint32_t reg, int64_t *result)
 { return -ENXIO; }
-static inline int32_t qpnp_vadc_iadc_sync_request(
+static inline int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
 				enum qpnp_vadc_channels channel)
 { return -ENXIO; }
 static inline int32_t qpnp_vadc_iadc_sync_complete_request(
+				struct qpnp_vadc_chip *dev,
 				enum qpnp_vadc_channels channel,
 				struct qpnp_vadc_result *result)
 { return -ENXIO; }
-static inline int32_t qpnp_vbat_sns_comp_result(int64_t *result)
+static inline int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+						int64_t *result)
 { return -ENXIO; }
 #endif
 
@@ -1378,44 +1442,59 @@
 			|| defined(CONFIG_SENSORS_QPNP_ADC_CURRENT_MODULE)
 /**
  * qpnp_iadc_read() - Performs ADC read on the current channel.
+ * @dev:	Structure device for qpnp iadc
  * @channel:	Input channel to perform the ADC read.
  * @result:	Current across rsense in mA.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *dev,
+				enum qpnp_iadc_channels channel,
 				struct qpnp_iadc_result *result);
 /**
  * qpnp_iadc_get_rsense() - Reads the RDS resistance value from the
 			trim registers.
+ * @dev:	Structure device for qpnp iadc
  * @rsense:	RDS resistance in nOhms.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_get_rsense(int32_t *rsense);
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *dev, int32_t *rsense);
 /**
  * qpnp_iadc_get_gain_and_offset() - Performs gain calibration
  *				over 17.8571mV and offset over selected
  *				channel. Channel can be internal rsense,
  *				external rsense and alternate lead pair.
+ * @dev:	Structure device for qpnp iadc
  * @result:	result structure where the gain and offset is stored of
  *		type qpnp_iadc_calib.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_calib *result);
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *dev,
+					struct qpnp_iadc_calib *result);
 /**
- * qpnp_iadc_is_ready() - Clients can use this API to check if the
- *			  device is ready to use.
- * @result:	0 on success and -EPROBE_DEFER when probe for the device
- *		has not occured.
+ * qpnp_get_iadc() - Clients need to register with the iadc with the
+ *		corresponding device instance it wants to read the channels.
+ *		Read the bindings document on how to pass the phandle for
+ *		the corresponding vadc driver to register with.
+ * @dev:	Clients device structure
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the iadc
+ * @struct qpnp_iadc_chip * - On success returns the iadc device structure
+ *		pointer used everytime client makes an ADC request.
  */
-int32_t qpnp_iadc_is_ready(void);
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name);
 /**
  * qpnp_iadc_vadc_sync_read() - Performs synchronous VADC and IADC read.
  *		The api is to be used only by the BMS to perform
  *		simultaneous VADC and IADC measurement for battery voltage
  *		and current.
+ * @dev:	Structure device for qpnp iadc
  * @i_channel:	Input battery current channel to perform the IADC read.
  * @i_result:	Current across the rsense in mA.
  * @v_channel:	Input battery voltage channel to perform VADC read.
  * @v_result:	Voltage on the vbatt channel with units in mV.
+ * @return:	0 on success.
  */
-int32_t qpnp_iadc_vadc_sync_read(
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *dev,
 	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
 	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result);
 /**
@@ -1423,31 +1502,60 @@
  *		IADC. The offset and gain values are programmed in the trim
  *		registers. The offset and the gain can be retrieved using
  *		qpnp_iadc_get_gain_and_offset
+ * @dev:	Structure device for qpnp iadc
  * @batfet_closed: batfet is opened or closed. The IADC chooses proper
  *			channel (internal/external) based on batfet status
  *			for calibration.
  * RETURNS:	0 on success.
  */
-int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed);
-int32_t qpnp_iadc_comp_result(int64_t *result);
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *dev,
+						bool batfet_closed);
+/**
+ * qpnp_iadc_comp_result() - Compensates the result of the current based on
+ *		the gain and offset co-effients and rsense parameters.
+ * @dev:	Structure device for qpnp iadc
+ * @result:	Current value to perform the compensation.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *dev, int64_t *result);
+/**
+ * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
+ *				to skip iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occured.
+ */
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *dev);
+/**
+ * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
+ *				to resume iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occured.
+ */
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *dev);
 #else
-static inline int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
-						struct qpnp_iadc_result *result)
+static inline int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+	enum qpnp_iadc_channels channel, struct qpnp_iadc_result *result)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_get_rsense(int32_t *rsense)
+static inline int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc,
+							int32_t *rsense)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_calib
-									*result)
+static inline int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+				struct qpnp_iadc_calib *result)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_is_ready(void)
-{ return -ENXIO; }
-static inline int32_t qpnp_iadc_vadc_sync_read(
+static inline struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
 	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
 	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed)
+static inline int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+							bool batfet_closed)
 { return -ENXIO; }
-static inline int32_t qpnp_iadc_comp_result(int64_t *result, int32_t sign)
+static inline int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc,
+						int64_t *result, int32_t sign)
 { return -ENXIO; }
 #endif
 
@@ -1464,14 +1572,15 @@
  *		Clients pass the low/high voltage along with the threshold
  *		notification callback.
  */
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_btm_param *param);
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
 /**
  * qpnp_adc_tm_usbid_end() - Disables the monitoring of channel 0 thats
  *		assigned for monitoring USB_ID. Disables the low/high
  *		threshold activation for channel 0 as well.
  * @param:	none.
  */
-int32_t qpnp_adc_tm_usbid_end(void);
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip);
 /**
  * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to
  *		monitor the corresponding ADC channel for threshold detection.
@@ -1482,7 +1591,8 @@
  *		Clients pass the low/high temperature along with the threshold
  *		notification callback.
  */
-int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_btm_param *param);
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
 /**
  * qpnp_adc_tm_disable_chan_meas() - Disables the monitoring of channel thats
  *		assigned for monitoring kernel clients. Disables the low/high
@@ -1491,27 +1601,36 @@
  *		This is used to identify the channel for which the corresponding
  *		channels high/low threshold notification will be disabled.
  */
-int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_btm_param *param);
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
 /**
- * qpnp_adc_tm_is_ready() - Clients can use this API to check if the
- *			  device is ready to use.
- * @result:	0 on success and -EPROBE_DEFER when probe for the device
- *		has not occured.
+ * qpnp_get_adc_tm() - Clients need to register with the adc_tm using the
+ *		corresponding device instance it wants to read the channels
+ *		from. Read the bindings document on how to pass the phandle
+ *		for the corresponding adc_tm driver to register with.
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the vadc
+ * @struct qpnp_adc_tm_chip * - On success returns the vadc device structure
+ *		pointer that needs to be used during an ADC TM request.
  */
-int32_t	qpnp_adc_tm_is_ready(void);
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name);
 #else
 static inline int32_t qpnp_adc_tm_usbid_configure(
+			struct qpnp_adc_tm_chip *chip,
 			struct qpnp_adc_tm_btm_param *param)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_tm_usbid_end(void)
+static inline int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
 { return -ENXIO; }
 static inline int32_t qpnp_adc_tm_channel_measure(
-		struct qpnp_adc_tm_btm_param *param)
+					struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_tm_disable_chan_meas(void)
+static inline int32_t qpnp_adc_tm_disable_chan_meas(
+					struct qpnp_adc_tm_chip *chip)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_tm_is_ready(void)
-{ return -ENXIO; }
+static inline struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
 #endif
 
 #endif
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 8d104c6..59b6338 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -314,6 +314,7 @@
  * @pclk: clock struct of iface_clk.
  * @phy_reset_clk: clock struct of phy_clk.
  * @core_clk: clock struct of core_bus_clk.
+ * @sleep_clk: clock struct of sleep_clk for USB PHY.
  * @core_clk_rate: core clk max frequency
  * @regs: ioremapped register base address.
  * @inputs: OTG state machine inputs(Id, SessValid etc).
@@ -350,6 +351,7 @@
 	struct clk *pclk;
 	struct clk *phy_reset_clk;
 	struct clk *core_clk;
+	struct clk *sleep_clk;
 	long core_clk_rate;
 	struct resource *io_res;
 	void __iomem *regs;
@@ -461,11 +463,20 @@
 	bool l1_supported;
 };
 
+/**
+ * struct msm_hsic_host_platform_data - platform device data
+ *              for msm_hsic_host driver.
+ * @phy_sof_workaround: Enable ALL PHY SOF bug related workarounds for
+		SUSPEND, RESET and RESUME.
+ * @phy_susp_sof_workaround: Enable PHY SOF workaround only for SUSPEND.
+ *
+ */
 struct msm_hsic_host_platform_data {
 	unsigned strobe;
 	unsigned data;
 	bool ignore_cal_pad_config;
 	bool phy_sof_workaround;
+	bool phy_susp_sof_workaround;
 	u32 reset_delay;
 	int strobe_pad_offset;
 	int data_pad_offset;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 4404df5..101325e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -704,6 +704,7 @@
 #define V4L2_QCOM_BUF_FLAG_DECODEONLY 0x40000
 #define V4L2_QCOM_BUF_DATA_CORRUPT 0x80000
 #define V4L2_QCOM_BUF_DROP_FRAME 0x100000
+#define V4L2_QCOM_BUF_INPUT_UNSUPPORTED 0x200000
 
 /*
  *	O V E R L A Y   P R E V I E W
@@ -767,7 +768,8 @@
 #define V4L2_CAP_QCOM_FRAMESKIP	0x2000	/*  frame skipping is supported */
 
 struct v4l2_qcom_frameskip {
-	__u64		maxframeinterval;
+	__u64		   maxframeinterval;
+	__u8		   fpsvariance;
 };
 
 struct v4l2_outputparm {
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 5ae852a..ec8ec9a 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -98,6 +98,7 @@
 	struct msm_vfe_camif_cfg camif_cfg;
 	enum msm_vfe_inputmux input_mux;
 	enum ISP_START_PIXEL_PATTERN pixel_pattern;
+	uint32_t input_format;
 };
 
 struct msm_vfe_rdi_cfg {
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index 162729a..de42c38 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -13,6 +13,8 @@
 
 #define MAX_NUM_CPP_STRIPS 8
 #define MSM_CPP_MAX_NUM_PLANES 3
+#define MSM_CPP_MAX_FRAME_LENGTH 1024
+#define MSM_CPP_MAX_FW_NAME_LEN 32
 
 enum msm_cpp_frame_type {
 	MSM_CPP_OFFLINE_FRAME,
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index 4cbac7b..419e055 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -626,7 +626,8 @@
 	FM_RECV,
 	FM_TRANS,
 	FM_RESET,
-	FM_CALIB
+	FM_CALIB,
+	FM_TURNING_OFF
 };
 
 enum emphasis_type {
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 4ad1ca9..bd4cddf 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -92,7 +92,7 @@
 #define READDONE_IDX_SEQ_ID 10
 
 #define SOFT_PAUSE_PERIOD       30   /* ramp up/down for 30ms    */
-#define SOFT_PAUSE_STEP         2000 /* Step value 2ms or 2000us */
+#define SOFT_PAUSE_STEP         0 /* Step value 0ms or 0us */
 enum {
 	SOFT_PAUSE_CURVE_LINEAR = 0,
 	SOFT_PAUSE_CURVE_EXP,
@@ -100,7 +100,7 @@
 };
 
 #define SOFT_VOLUME_PERIOD       30   /* ramp up/down for 30ms    */
-#define SOFT_VOLUME_STEP         2000 /* Step value 2ms or 2000us */
+#define SOFT_VOLUME_STEP         0 /* Step value 0ms or 0us */
 enum {
 	SOFT_VOLUME_CURVE_LINEAR = 0,
 	SOFT_VOLUME_CURVE_EXP,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c69f5e2..1438de9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2310,6 +2310,19 @@
 	} while (memcg);
 }
 
+static bool zone_balanced(struct zone *zone, int order,
+			  unsigned long balance_gap, int classzone_idx)
+{
+	if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+				    balance_gap, classzone_idx, 0))
+		return false;
+
+	if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+		return false;
+
+	return true;
+}
+
 /*
  * pgdat_balanced is used when checking if a node is balanced for high-order
  * allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2369,8 +2382,7 @@
 			continue;
 		}
 
-		if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
-							i, 0))
+		if (!zone_balanced(zone, order, 0, i))
 			all_zones_ok = false;
 		else
 			balanced += zone->present_pages;
@@ -2479,8 +2491,7 @@
 				break;
 			}
 
-			if (!zone_watermark_ok_safe(zone, order,
-					high_wmark_pages(zone), 0, 0)) {
+			if (!zone_balanced(zone, order, 0, 0)) {
 				end_zone = i;
 				break;
 			} else {
@@ -2556,9 +2567,8 @@
 				testorder = 0;
 
 			if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
-				    !zone_watermark_ok_safe(zone, testorder,
-					high_wmark_pages(zone) + balance_gap,
-					end_zone, 0)) {
+			    !zone_balanced(zone, testorder,
+					   balance_gap, end_zone)) {
 				shrink_zone(zone, &sc);
 
 				reclaim_state->reclaimed_slab = 0;
@@ -2585,8 +2595,7 @@
 				continue;
 			}
 
-			if (!zone_watermark_ok_safe(zone, testorder,
-					high_wmark_pages(zone), end_zone, 0)) {
+			if (!zone_balanced(zone, testorder, 0, end_zone)) {
 				all_zones_ok = 0;
 				/*
 				 * We are still under min water mark.  This
@@ -2681,22 +2690,6 @@
 			if (!populated_zone(zone))
 				continue;
 
-			if (zone->all_unreclaimable &&
-			    sc.priority != DEF_PRIORITY)
-				continue;
-
-			/* Would compaction fail due to lack of free memory? */
-			if (COMPACTION_BUILD &&
-			    compaction_suitable(zone, order) == COMPACT_SKIPPED)
-				goto loop_again;
-
-			/* Confirm the zone is balanced for order-0 */
-			if (!zone_watermark_ok(zone, 0,
-					high_wmark_pages(zone), 0, 0)) {
-				order = sc.order = 0;
-				goto loop_again;
-			}
-
 			/* Check if the memory needs to be defragmented. */
 			if (zone_watermark_ok(zone, order,
 				    low_wmark_pages(zone), *classzone_idx, 0))
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c28508..247c69b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/kmemleak.h>
 
 #include <net/ip.h>
 #include <net/sock.h>
@@ -256,7 +257,7 @@
 {
 	static struct ctl_table empty[1];
 
-	register_sysctl_paths(net_core_path, empty);
+	kmemleak_not_leak(register_sysctl_paths(net_core_path, empty));
 	register_net_sysctl_rotable(net_core_path, net_core_table);
 	return register_pernet_subsys(&sysctl_core_ops);
 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10..d02a8da 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
 #include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
+#include <linux/kmemleak.h>
 #endif
 #include <net/secure_seq.h>
 
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 46b0a91..8844b21 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -1157,6 +1157,16 @@
 	"ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
 };
 
+/*
+  * There is only one bit to select RX2 (0) or RX3 (1) so add 'ZERO' won't
+  * cause any issue to select the right input, but it eliminates that lineout
+  * is powered-up when HPH is enabled if the 'ZERO" is used in the disable
+  * sequence for lineout.
+  */
+static const char * const rx_rdac4_text[] = {
+	"ZERO", "RX3", "RX2"
+};
+
 static const struct soc_enum rx_mix1_inp1_chain_enum =
 	SOC_ENUM_SINGLE(MSM8X10_WCD_A_CDC_CONN_RX1_B1_CTL, 0, 6, rx_mix1_text);
 
@@ -1194,6 +1204,10 @@
 	SOC_ENUM_SINGLE(MSM8X10_WCD_A_CDC_CONN_EQ1_B1_CTL, 0, 6,
 	iir1_inp1_text);
 
+static const struct soc_enum rx_rdac4_enum  =
+	SOC_ENUM_SINGLE(MSM8X10_WCD_A_CDC_CONN_LO_DAC_CTL, 0, 3,
+	rx_rdac4_text);
+
 static const struct snd_kcontrol_new rx_mix1_inp1_mux =
 	SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
 
@@ -1221,6 +1235,9 @@
 static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
 	SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
 
+static const struct snd_kcontrol_new rx_dac4_mux =
+	SOC_DAPM_ENUM("RDAC4 MUX Mux", rx_rdac4_enum);
+
 static int msm8x10_wcd_put_dec_enum(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_value *ucontrol)
 {
@@ -1312,6 +1329,10 @@
 	SOC_DAPM_SINGLE("Switch", MSM8X10_WCD_A_RX_HPH_L_DAC_CTL, 6, 1, 0)
 };
 
+static const struct snd_kcontrol_new spkr_switch[] = {
+	SOC_DAPM_SINGLE("Switch", MSM8X10_WCD_A_SPKR_DRV_DAC_CTL, 2, 1, 0)
+};
+
 static void msm8x10_wcd_codec_enable_adc_block(struct snd_soc_codec *codec,
 					 int enable)
 {
@@ -1791,13 +1812,6 @@
 	return 0;
 }
 
-static int msm8x10_wcd_spk_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	dev_dbg(w->codec->dev, "%s %s %d\n", __func__, w->name, event);
-	return 0;
-}
-
 static const struct snd_soc_dapm_route audio_map[] = {
 	{"RX_I2S_CLK", NULL, "CDC_CONN"},
 	{"I2S RX1", NULL, "RX_I2S_CLK"},
@@ -1837,11 +1851,15 @@
 
 	{"LINEOUT PA", NULL, "CP"},
 	{"LINEOUT PA", NULL, "LINEOUT DAC"},
+	{"LINEOUT DAC", NULL, "RDAC4 MUX"},
+
+	{"RDAC4 MUX", "RX2", "RX2 CHAIN"},
+	{"RDAC4 MUX", "RX3", "RX3 CHAIN"},
 
 	{"CP", NULL, "CP_REGULATOR"},
 	{"CP", NULL, "RX_BIAS"},
 	{"SPK PA", NULL, "SPK DAC"},
-	{"SPK DAC", NULL, "RX3 CHAIN"},
+	{"SPK DAC", "Switch", "RX3 CHAIN"},
 
 	{"RX1 CHAIN", NULL, "RX1 CLK"},
 	{"RX2 CHAIN", NULL, "RX2 CLK"},
@@ -2270,6 +2288,9 @@
 		msm8x10_wcd_hphr_dac_event,
 		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_MIXER("SPK DAC", SND_SOC_NOPM, 0, 0,
+		spkr_switch, ARRAY_SIZE(spkr_switch)),
+
 	/* Speaker */
 	SND_SOC_DAPM_OUTPUT("LINEOUT"),
 	SND_SOC_DAPM_OUTPUT("SPK_OUT"),
@@ -2289,10 +2310,6 @@
 		msm8x10_wcd_lineout_dac_event,
 		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 
-	SND_SOC_DAPM_DAC_E("SPK DAC", NULL, SND_SOC_NOPM, 0, 0,
-			   msm8x10_wcd_spk_dac_event,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
 
@@ -2340,6 +2357,8 @@
 		&rx1_mix2_inp1_mux),
 	SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
 		&rx2_mix2_inp1_mux),
+	SND_SOC_DAPM_MUX("RDAC4 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_dac4_mux),
 
 	SND_SOC_DAPM_SUPPLY("MICBIAS_REGULATOR", SND_SOC_NOPM,
 		ON_DEMAND_MICBIAS, 0,
@@ -2512,6 +2531,8 @@
 	/* config DMIC clk to CLK_MODE_1 (3.2Mhz@9.6Mhz mclk) */
 	{MSM8X10_WCD_A_CDC_CLK_DMIC_B1_CTL, 0xEE, 0x22},
 
+	/* Disable REF_EN for MSM8X10_WCD_A_SPKR_DRV_DAC_CTL */
+	{MSM8X10_WCD_A_SPKR_DRV_DAC_CTL, 0x04, 0x00},
 };
 
 static void msm8x10_wcd_codec_init_reg(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index a6390dd..ff190cd 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -38,6 +38,9 @@
 #include "wcd9xxx-resmgr.h"
 #include "wcd9xxx-common.h"
 
+#define TAPAN_HPH_PA_SETTLE_COMP_ON 3000
+#define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
+
 static atomic_t kp_tapan_priv;
 static int spkr_drv_wrnd_param_set(const char *val,
 				   const struct kernel_param *kp);
@@ -224,8 +227,8 @@
 };
 
 static const u32 comp_shift[] = {
-	4, /* Compander 0's clock source is on interpolator 7 */
 	0,
+	1,
 	2,
 };
 
@@ -234,47 +237,44 @@
 	COMPANDER_1,
 	COMPANDER_2,
 	COMPANDER_2,
-	COMPANDER_2,
-	COMPANDER_2,
-	COMPANDER_0,
 	COMPANDER_MAX,
 };
 
 static const struct comp_sample_dependent_params comp_samp_params[] = {
 	{
 		/* 8 Khz */
-		.peak_det_timeout = 0x02,
+		.peak_det_timeout = 0x06,
 		.rms_meter_div_fact = 0x09,
 		.rms_meter_resamp_fact = 0x06,
 	},
 	{
 		/* 16 Khz */
-		.peak_det_timeout = 0x03,
+		.peak_det_timeout = 0x07,
 		.rms_meter_div_fact = 0x0A,
 		.rms_meter_resamp_fact = 0x0C,
 	},
 	{
 		/* 32 Khz */
-		.peak_det_timeout = 0x05,
+		.peak_det_timeout = 0x08,
 		.rms_meter_div_fact = 0x0B,
 		.rms_meter_resamp_fact = 0x1E,
 	},
 	{
 		/* 48 Khz */
-		.peak_det_timeout = 0x05,
+		.peak_det_timeout = 0x09,
 		.rms_meter_div_fact = 0x0B,
 		.rms_meter_resamp_fact = 0x28,
 	},
 	{
 		/* 96 Khz */
-		.peak_det_timeout = 0x06,
+		.peak_det_timeout = 0x0A,
 		.rms_meter_div_fact = 0x0C,
 		.rms_meter_resamp_fact = 0x50,
 	},
 	{
 		/* 192 Khz */
-		.peak_det_timeout = 0x07,
-		.rms_meter_div_fact = 0xD,
+		.peak_det_timeout = 0x0B,
+		.rms_meter_div_fact = 0xC,
 		.rms_meter_resamp_fact = 0xA0,
 	},
 };
@@ -673,6 +673,37 @@
 	dev_dbg(codec->dev, "%s: Compander %d enable current %d, new %d\n",
 		 __func__, comp, tapan->comp_enabled[comp], value);
 	tapan->comp_enabled[comp] = value;
+
+	if (comp == COMPANDER_1 &&
+			tapan->comp_enabled[comp] == 1) {
+		/* Wavegen to 5 msec */
+		snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDA);
+		snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x15);
+		snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x2A);
+
+		/* Enable Chopper */
+		snd_soc_update_bits(codec,
+			TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
+
+		snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x20);
+		pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
+				__func__);
+	} else if (comp == COMPANDER_1 &&
+			tapan->comp_enabled[comp] == 0) {
+		/* Wavegen to 20 msec */
+		snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDB);
+		snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x58);
+		snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x1A);
+
+		/* Disable CHOPPER block */
+		snd_soc_update_bits(codec,
+			TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
+
+		snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x10);
+		pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
+				__func__);
+	}
+
 	return 0;
 }
 
@@ -708,26 +739,52 @@
 
 static void tapan_discharge_comp(struct snd_soc_codec *codec, int comp)
 {
-	/* Update RSM to 1, DIVF to 5 */
-	snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8), 1);
+	/* Level meter DIV Factor to 5*/
 	snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
-			    1 << 5);
-	/* Wait for 1ms */
-	usleep_range(1000, 1000);
+			    0x05 << 4);
+	/* RMS meter Sampling to 0x01 */
+	snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
+
+	/* Worst case timeout for compander CnP sleep timeout */
+	usleep_range(3000, 3000);
+}
+
+static enum wcd9xxx_buck_volt tapan_codec_get_buck_mv(
+	struct snd_soc_codec *codec)
+{
+	int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
+	struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
+		if (!strncmp(pdata->regulator[i].name,
+					 WCD9XXX_SUPPLY_BUCK_NAME,
+					 sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
+			if ((pdata->regulator[i].min_uV ==
+					WCD9XXX_CDC_BUCK_MV_1P8) ||
+				(pdata->regulator[i].min_uV ==
+					WCD9XXX_CDC_BUCK_MV_2P15))
+				buck_volt = pdata->regulator[i].min_uV;
+			break;
+		}
+	}
+	pr_debug("%s: S4 voltage requested is %d\n", __func__, buck_volt);
+	return buck_volt;
 }
 
 static int tapan_config_compander(struct snd_soc_dapm_widget *w,
 				  struct snd_kcontrol *kcontrol, int event)
 {
-	int mask, emask;
-	bool timedout;
-	unsigned long timeout;
+	int mask, enable_mask;
+	u8 rdac5_mux;
 	struct snd_soc_codec *codec = w->codec;
 	struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
 	const int comp = w->shift;
 	const u32 rate = tapan->comp_fs[comp];
 	const struct comp_sample_dependent_params *comp_params =
 	    &comp_samp_params[rate];
+	enum wcd9xxx_buck_volt buck_mv;
 
 	dev_dbg(codec->dev, "%s: %s event %d compander %d, enabled %d",
 		__func__, w->name, event, comp, tapan->comp_enabled[comp]);
@@ -737,72 +794,105 @@
 
 	/* Compander 0 has single channel */
 	mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
-	emask = (comp == COMPANDER_0 ? 0x02 : 0x03);
+	buck_mv = tapan_codec_get_buck_mv(codec);
+
+	rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
+	rdac5_mux = (rdac5_mux & 0x04) >> 2;
+
+	if (comp == COMPANDER_0) {  /* SPK compander */
+		enable_mask = 0x02;
+	} else if (comp == COMPANDER_1) { /* HPH compander */
+		enable_mask = 0x03;
+	} else if (comp == COMPANDER_2) { /* LO compander */
+
+		if (rdac5_mux == 0) { /* DEM4 */
+
+			/* for LO Stereo SE, enable Compander 2 left
+			 * channel on RX3 interpolator Path and Compander 2
+			 * rigt channel on RX4 interpolator Path.
+			 */
+			enable_mask = 0x03;
+		} else if (rdac5_mux == 1) { /* DEM3_INV */
+
+			/* for LO mono differential only enable Compander 2
+			 * left channel on RX3 interpolator Path.
+			 */
+			enable_mask = 0x02;
+		} else {
+			dev_err(codec->dev, "%s: invalid rdac5_mux val %d",
+					__func__, rdac5_mux);
+			return -EINVAL;
+		}
+	} else {
+		dev_err(codec->dev, "%s: invalid compander %d", __func__, comp);
+		return -EINVAL;
+	}
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
+		/* Set compander Sample rate */
+		snd_soc_update_bits(codec,
+				    TAPAN_A_CDC_COMP0_FS_CFG + (comp * 8),
+				    0x07, rate);
+		/* Set the static gain offset for HPH Path */
+		if (comp == COMPANDER_1) {
+			if (buck_mv == WCD9XXX_CDC_BUCK_MV_2P15)
+				snd_soc_update_bits(codec,
+					TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
+					0x80, 0x00);
+			else
+				snd_soc_update_bits(codec,
+					TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
+					0x80, 0x80);
+		}
+		/* Enable RX interpolation path compander clocks */
+		snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
+				    0x01 << comp_shift[comp],
+				    0x01 << comp_shift[comp]);
+
+		/* Toggle compander reset bits */
+		snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
+				    0x01 << comp_shift[comp],
+				    0x01 << comp_shift[comp]);
+		snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
+				    0x01 << comp_shift[comp], 0);
+
 		/* Set gain source to compander */
 		tapan_config_gain_compander(codec, comp, true);
-		/* Enable RX interpolation path clocks */
-		snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
-				    mask << comp_shift[comp],
-				    mask << comp_shift[comp]);
+
+		/* Compander enable */
+		snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
+				    (comp * 8), enable_mask, enable_mask);
 
 		tapan_discharge_comp(codec, comp);
 
-		/* Clear compander halt */
-		snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
-					   (comp * 8),
-				    1 << 2, 0);
+		/* Set sample rate dependent paramater */
+		snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8),
+			      comp_params->rms_meter_resamp_fact);
+		snd_soc_update_bits(codec,
+				    TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
+				    0xF0, comp_params->rms_meter_div_fact << 4);
+		snd_soc_update_bits(codec,
+					TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
+					0x0F, comp_params->peak_det_timeout);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* Disable compander */
+		snd_soc_update_bits(codec,
+				    TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
+				    enable_mask, 0x00);
+
 		/* Toggle compander reset bits */
 		snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
 				    mask << comp_shift[comp],
 				    mask << comp_shift[comp]);
 		snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
 				    mask << comp_shift[comp], 0);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		/* Set sample rate dependent paramater */
-		snd_soc_update_bits(codec,
-				    TAPAN_A_CDC_COMP0_FS_CFG + (comp * 8),
-				    0x07, rate);
-		snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8),
-			      comp_params->rms_meter_resamp_fact);
-		snd_soc_update_bits(codec,
-				    TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
-				    0x0F, comp_params->peak_det_timeout);
-		snd_soc_update_bits(codec,
-				    TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
-				    0xF0, comp_params->rms_meter_div_fact << 4);
-		/* Compander enable */
-		snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
-				    (comp * 8), emask, emask);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		/* Halt compander */
-		snd_soc_update_bits(codec,
-				    TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
-				    1 << 2, 1 << 2);
-		/* Wait up to a second for shutdown complete */
-		timeout = jiffies + HZ;
-		do {
-			if ((snd_soc_read(codec,
-					  TAPAN_A_CDC_COMP0_SHUT_DOWN_STATUS +
-					  (comp * 8)) & mask) == mask)
-				break;
-		} while (!(timedout = time_after(jiffies, timeout)));
-		dev_dbg(codec->dev, "%s: Compander %d shutdown %s in %dms\n",
-			 __func__, comp, timedout ? "timedout" : "completed",
-			 jiffies_to_msecs(timeout - HZ - jiffies));
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		/* Disable compander */
-		snd_soc_update_bits(codec,
-				    TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
-				    emask, 0x00);
+
 		/* Turn off the clock for compander in pair */
 		snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
 				    mask << comp_shift[comp], 0);
+
 		/* Set gain source to register */
 		tapan_config_gain_compander(codec, comp, false);
 		break;
@@ -2267,6 +2357,7 @@
 	struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
 	enum wcd9xxx_notify_event e_pre_on, e_post_off;
 	u8 req_clsh_state;
+	u32 pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_OFF;
 
 	dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
 	if (w->shift == 5) {
@@ -2282,23 +2373,32 @@
 		return -EINVAL;
 	}
 
+	if (tapan->comp_enabled[COMPANDER_1])
+		pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_ON;
+
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 		/* Let MBHC module know PA is turning on */
 		wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_pre_on);
 		break;
-
 	case SND_SOC_DAPM_POST_PMU:
+		dev_dbg(codec->dev, "%s: sleep %d ms after %s PA enable.\n",
+			__func__, pa_settle_time / 1000, w->name);
+		/* Time needed for PA to settle */
+		usleep_range(pa_settle_time, pa_settle_time + 1000);
+
 		wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
 						 req_clsh_state,
 						 WCD9XXX_CLSH_REQ_ENABLE,
 						 WCD9XXX_CLSH_EVENT_POST_PA);
 
-
-		usleep_range(5000, 5010);
 		break;
-
 	case SND_SOC_DAPM_POST_PMD:
+		dev_dbg(codec->dev, "%s: sleep %d ms after %s PA disable.\n",
+			__func__, pa_settle_time / 1000, w->name);
+		/* Time needed for PA to settle */
+		usleep_range(pa_settle_time, pa_settle_time + 1000);
+
 		/* Let MBHC module know PA turned off */
 		wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_off);
 
@@ -2306,10 +2406,6 @@
 						 req_clsh_state,
 						 WCD9XXX_CLSH_REQ_DISABLE,
 						 WCD9XXX_CLSH_EVENT_POST_PA);
-
-		dev_dbg(codec->dev, "%s: sleep 10 ms after %s PA disable.\n",
-			 __func__, w->name);
-		usleep_range(5000, 5010);
 		break;
 	}
 	return 0;
@@ -2549,6 +2645,7 @@
 	{"RX1 MIX1", NULL, "COMP1_CLK"},
 	{"RX2 MIX1", NULL, "COMP1_CLK"},
 	{"RX3 MIX1", NULL, "COMP2_CLK"},
+	{"RX4 MIX1", NULL, "COMP0_CLK"},
 
 	{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
 	{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
@@ -3019,6 +3116,7 @@
 	u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
 	u16 rx_fs_reg;
 	u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
+	u8 rdac5_mux;
 	struct snd_soc_codec *codec = dai->codec;
 	struct wcd9xxx_ch *ch;
 	struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
@@ -3036,6 +3134,9 @@
 
 		rx_mix_1_reg_1 = TAPAN_A_CDC_CONN_RX1_B1_CTL;
 
+		rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
+		rdac5_mux = (rdac5_mux & 0x04) >> 2;
+
 		for (j = 0; j < NUM_INTERPOLATORS; j++) {
 			rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
 
@@ -3060,9 +3161,14 @@
 				snd_soc_update_bits(codec, rx_fs_reg,
 						0xE0, rx_fs_rate_reg_val);
 
-				if (comp_rx_path[j] < COMPANDER_MAX)
-					tapan->comp_fs[comp_rx_path[j]]
-					= compander_fs;
+				if (comp_rx_path[j] < COMPANDER_MAX) {
+					if ((j == 3) && (rdac5_mux == 1))
+						tapan->comp_fs[COMPANDER_0] =
+							compander_fs;
+					else
+						tapan->comp_fs[comp_rx_path[j]]
+							= compander_fs;
+				}
 			}
 			if (j <= 1)
 				rx_mix_1_reg_1 += 3;
@@ -3893,13 +3999,13 @@
 
 	SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
 		tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+		SND_SOC_DAPM_PRE_PMD),
 	SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
 		tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+		SND_SOC_DAPM_PRE_PMD),
 	SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
 		tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+		SND_SOC_DAPM_PRE_PMD),
 
 	SND_SOC_DAPM_INPUT("AMIC1"),
 	SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", TAPAN_A_MICB_1_CTL, 7, 0,
@@ -4239,12 +4345,11 @@
 	TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0xF4),
 	TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x08),
 	TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_1, 0x5B),
-	TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_3, 0x60),
+	TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_3, 0x6F),
 
 	/* TODO: Check below reg writes conflict with above */
 	/* PROGRAM_THE_0P85V_VBG_REFERENCE = V_0P858V */
 	TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x04),
-	TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_4, 0x54),
 	TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0x74),
 	TAPAN_REG_VAL(TAPAN_A_RX_BUCK_BIAS1, 0x62),
 
@@ -4414,6 +4519,15 @@
 	{TAPAN_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
 	{TAPAN_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
 	{TAPAN_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
+
+	/*
+	 * Setup wavegen timer to 20msec and disable chopper
+	 * as default. This corresponds to Compander OFF
+	 */
+	{TAPAN_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
+	{TAPAN_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
+	{TAPAN_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
+	{TAPAN_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
 };
 
 static void tapan_codec_init_reg(struct snd_soc_codec *codec)
@@ -4596,30 +4710,6 @@
 	return 0;
 }
 
-static enum wcd9xxx_buck_volt tapan_codec_get_buck_mv(
-	struct snd_soc_codec *codec)
-{
-	int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
-	struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
-		if (!strncmp(pdata->regulator[i].name,
-					 WCD9XXX_SUPPLY_BUCK_NAME,
-					 sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
-			if ((pdata->regulator[i].min_uV ==
-					WCD9XXX_CDC_BUCK_MV_1P8) ||
-				(pdata->regulator[i].min_uV ==
-					WCD9XXX_CDC_BUCK_MV_2P15))
-				buck_volt = pdata->regulator[i].min_uV;
-			break;
-		}
-	}
-	pr_debug("%s: S4 voltage requested is %d\n", __func__, buck_volt);
-	return buck_volt;
-}
-
 static int tapan_codec_probe(struct snd_soc_codec *codec)
 {
 	struct wcd9xxx *control;
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 6fdebe6..f09e643 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -33,6 +33,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/kernel.h>
 #include <linux/gpio.h>
+#include <linux/input.h>
 #include "wcd9320.h"
 #include "wcd9306.h"
 #include "wcd9xxx-mbhc.h"
@@ -155,6 +156,8 @@
 
 static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
 				    uint32_t *zr);
+static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
+				 const enum wcd9xxx_current_v_idx idx);
 
 static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc)
 {
@@ -185,6 +188,7 @@
 /* called under codec_resource_lock acquisition */
 static void wcd9xxx_start_hs_polling(struct wcd9xxx_mbhc *mbhc)
 {
+	s16 v_brh, v_b1_hu;
 	struct snd_soc_codec *codec = mbhc->codec;
 	int mbhc_state = mbhc->mbhc_state;
 
@@ -212,6 +216,17 @@
 		/* set to max */
 		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, 0x7F);
 		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, 0xFF);
+
+		v_brh = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+			      (v_brh >> 8) & 0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
+			      v_brh & 0xFF);
+		v_b1_hu = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
+			      v_b1_hu & 0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+			      (v_b1_hu >> 8) & 0xFF);
 	}
 
 	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
@@ -1000,7 +1015,7 @@
 	 * These will be released by wcd9xxx_cleanup_hs_polling
 	 */
 	WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
-	wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_MBHC_MODE);
+	wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_AUDIO_MODE);
 	wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
 	WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
 
@@ -1205,7 +1220,7 @@
 		goto exit;
 	}
 
-	for (i = 0, d = dt; i < size; i++, d++) {
+	for (i = 0, dprev = NULL, d = dt; i < size; i++, d++) {
 		if (d->vddio) {
 			dvddio = d;
 			continue;
@@ -2410,36 +2425,6 @@
 	return r;
 }
 
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_codec_drive_v_to_micbias(struct wcd9xxx_mbhc *mbhc,
-					     int usec)
-{
-	int cfilt_k_val;
-	bool set = true;
-
-	if (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV &&
-	    mbhc->mbhc_micbias_switched) {
-		pr_debug("%s: set mic V to micbias V\n", __func__);
-		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
-				    0x2, 0x2);
-		wcd9xxx_turn_onoff_override(mbhc->codec, true);
-		while (1) {
-			cfilt_k_val =
-			    wcd9xxx_resmgr_get_k_val(mbhc->resmgr,
-						set ? mbhc->mbhc_data.micb_mv :
-						VDDIO_MICBIAS_MV);
-			snd_soc_update_bits(mbhc->codec,
-					    mbhc->mbhc_bias_regs.cfilt_val,
-					    0xFC, (cfilt_k_val << 2));
-			if (!set)
-				break;
-			usleep_range(usec, usec);
-			set = false;
-		}
-		wcd9xxx_turn_onoff_override(mbhc->codec, false);
-	}
-}
-
 static int wcd9xxx_is_fake_press(struct wcd9xxx_mbhc *mbhc)
 {
 	int i;
@@ -2564,12 +2549,43 @@
 	snd_soc_write(codec, mbhc->mbhc_bias_regs.ctl_reg, reg0);
 }
 
+/*
+ * wcd9xxx_update_rel_threshold : update mbhc release upper bound threshold
+ *				  to ceilmv + buffer
+ */
+static int wcd9xxx_update_rel_threshold(struct wcd9xxx_mbhc *mbhc, int ceilmv)
+{
+	u16 v_brh, v_b1_hu;
+	int mv;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	void *calibration = mbhc->mbhc_cfg->calibration;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
+	mv = ceilmv + btn_det->v_btn_press_delta_cic;
+	pr_debug("%s: reprogram vb1hu/vbrh to %dmv\n", __func__, mv);
+
+	/* update LSB first so mbhc hardware block doesn't see too low value */
+	v_b1_hu = wcd9xxx_codec_v_sta_dce(mbhc, STA, mv);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu & 0xFF);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+		      (v_b1_hu >> 8) & 0xFF);
+	v_brh = wcd9xxx_codec_v_sta_dce(mbhc, DCE, mv);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh & 0xFF);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+		      (v_brh >> 8) & 0xFF);
+	return 0;
+}
+
 irqreturn_t wcd9xxx_dce_handler(int irq, void *data)
 {
 	int i, mask;
 	bool vddio;
 	u8 mbhc_status;
 	s16 dce_z, sta_z;
+	s32 stamv, stamv_s;
+	s16 *v_btn_high;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
 	int btn = -1, meas = 0;
 	struct wcd9xxx_mbhc *mbhc = data;
 	const struct wcd9xxx_mbhc_btn_detect_cfg *d =
@@ -2577,10 +2593,10 @@
 	short btnmeas[d->n_btn_meas + 1];
 	short dce[d->n_btn_meas + 1], sta;
 	s32 mv[d->n_btn_meas + 1], mv_s[d->n_btn_meas + 1];
-	s32 stamv, stamv_s;
 	struct snd_soc_codec *codec = mbhc->codec;
 	struct wcd9xxx *core = mbhc->resmgr->core;
 	int n_btn_meas = d->n_btn_meas;
+	void *calibration = mbhc->mbhc_cfg->calibration;
 
 	pr_debug("%s: enter\n", __func__);
 
@@ -2705,6 +2721,13 @@
 			__func__);
 			goto done;
 		}
+		btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
+		v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
+						       MBHC_BTN_DET_V_BTN_HIGH);
+		WARN_ON(btn >= btn_det->num_btn);
+		/* reprogram release threshold to catch voltage ramp up early */
+		wcd9xxx_update_rel_threshold(mbhc, v_btn_high[btn]);
+
 		mask = wcd9xxx_get_button_mask(btn);
 		mbhc->buttons_pressed |= mask;
 		wcd9xxx_lock_sleep(core);
@@ -2733,8 +2756,6 @@
 	WCD9XXX_BCL_LOCK(mbhc->resmgr);
 	mbhc->mbhc_state = MBHC_STATE_RELEASE;
 
-	wcd9xxx_codec_drive_v_to_micbias(mbhc, 10000);
-
 	if (mbhc->buttons_pressed & WCD9XXX_JACK_BUTTON_MASK) {
 		ret = wcd9xxx_cancel_btn_work(mbhc);
 		if (ret == 0) {
@@ -3785,7 +3806,13 @@
 	mutex_lock(&codec->mutex);
 
 	WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
-	wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_MBHC_MODE);
+	/*
+	 * Fast(mbhc) mode bandagap doesn't need to be enabled explicitly
+	 * since fast mode is set by MBHC hardware when override is on.
+	 * Enable bandgap mode to avoid unnecessary RCO disable and enable
+	 * during clock source change.
+	 */
+	wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_AUDIO_MODE);
 	wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
 	WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
 
@@ -3983,6 +4010,15 @@
 			return ret;
 		}
 
+		ret = snd_jack_set_key(mbhc->button_jack.jack,
+				       SND_JACK_BTN_0,
+				       KEY_MEDIA);
+		if (ret) {
+			pr_err("%s: Failed to set code for btn-0\n",
+				__func__);
+			return ret;
+		}
+
 		INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
 				  wcd9xxx_mbhc_fw_read);
 		INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd9xxx_btn_lpress_fn);
@@ -4058,6 +4094,9 @@
 	}
 	wcd9xxx_disable_irq(codec->control_data, WCD9XXX_IRQ_HPH_PA_OCPR_FAULT);
 
+	wcd9xxx_regmgr_cond_register(resmgr, 1 << WCD9XXX_COND_HPH_MIC |
+					     1 << WCD9XXX_COND_HPH);
+
 	pr_debug("%s: leave ret %d\n", __func__, ret);
 	return ret;
 
@@ -4083,6 +4122,9 @@
 {
 	void *cdata = mbhc->codec->control_data;
 
+	wcd9xxx_regmgr_cond_deregister(mbhc->resmgr, 1 << WCD9XXX_COND_HPH_MIC |
+						     1 << WCD9XXX_COND_HPH);
+
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_RELEASE, mbhc);
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_POTENTIAL, mbhc);
 	wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_REMOVAL, mbhc);
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
index be11e53..9633cc0 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr.c
@@ -652,15 +652,17 @@
 	return rc;
 }
 
-void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr,
-				      enum wcd9xxx_resmgr_cond cond)
+static void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr,
+					     enum wcd9xxx_resmgr_cond cond)
 {
 	struct list_head *l;
 	struct wcd9xxx_resmgr_cond_entry *e;
 	bool set;
 
 	pr_debug("%s: enter\n", __func__);
-	set = !!test_bit(cond, &resmgr->cond_flags);
+	/* update bit if cond isn't available or cond is set */
+	set = !test_bit(cond, &resmgr->cond_avail_flags) ||
+	      !!test_bit(cond, &resmgr->cond_flags);
 	list_for_each(l, &resmgr->update_bit_cond_h) {
 		e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
 		if (e->cond == cond)
@@ -672,6 +674,44 @@
 	pr_debug("%s: leave\n", __func__);
 }
 
+/*
+ * wcd9xxx_regmgr_cond_register : notify resmgr conditions in the condbits are
+ *				  avaliable and notified.
+ * condbits : contains bitmask of enum wcd9xxx_resmgr_cond
+ */
+void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
+				  unsigned long condbits)
+{
+	unsigned int cond;
+
+	for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
+		mutex_lock(&resmgr->update_bit_cond_lock);
+		WARN(test_bit(cond, &resmgr->cond_avail_flags),
+		     "Condition 0x%0x is already registered\n", cond);
+		set_bit(cond, &resmgr->cond_avail_flags);
+		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+		mutex_unlock(&resmgr->update_bit_cond_lock);
+		pr_debug("%s: Condition 0x%x is registered\n", __func__, cond);
+	}
+}
+
+void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
+				    unsigned long condbits)
+{
+	unsigned int cond;
+
+	for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
+		mutex_lock(&resmgr->update_bit_cond_lock);
+		WARN(!test_bit(cond, &resmgr->cond_avail_flags),
+		     "Condition 0x%0x isn't registered\n", cond);
+		clear_bit(cond, &resmgr->cond_avail_flags);
+		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+		mutex_unlock(&resmgr->update_bit_cond_lock);
+		pr_debug("%s: Condition 0x%x is deregistered\n", __func__,
+			 cond);
+	}
+}
+
 void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
 				     enum wcd9xxx_resmgr_cond cond, bool set)
 {
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.h b/sound/soc/codecs/wcd9xxx-resmgr.h
index aaf7317..e6a8f5d 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.h
+++ b/sound/soc/codecs/wcd9xxx-resmgr.h
@@ -134,6 +134,7 @@
 	struct wcd9xxx_mbhc *mbhc;
 
 	unsigned long cond_flags;
+	unsigned long cond_avail_flags;
 	struct list_head update_bit_cond_h;
 	struct mutex update_bit_cond_lock;
 
@@ -227,6 +228,10 @@
 	WCD9XXX_COND_HPH = 0x01, /* Headphone */
 	WCD9XXX_COND_HPH_MIC = 0x02, /* Microphone on the headset */
 };
+void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
+				  unsigned long condbits);
+void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
+				    unsigned long condbits);
 int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
 				       enum wcd9xxx_resmgr_cond cond,
 				       unsigned short reg, int shift,
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
index ad8d85a..60e2c37 100644
--- a/sound/soc/msm/mdm9625.c
+++ b/sound/soc/msm/mdm9625.c
@@ -349,6 +349,14 @@
 	return 0;
 }
 
+static int mdm9625_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				      struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+						      SNDRV_PCM_HW_PARAM_RATE);
+	rate->min = rate->max = 48000;
+	return 0;
+}
 
 static int mdm9625_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
 				 struct snd_ctl_elem_value *ucontrol)
@@ -951,6 +959,45 @@
 		.be_hw_params_fixup = mdm9625_auxpcm_be_params_fixup,
 		.ops = &mdm9625_auxpcm_be_ops,
 	},
+	/* Incall Record Uplink BACK END DAI Link */
+	{
+		.name = LPASS_BE_INCALL_RECORD_TX,
+		.stream_name = "Voice Uplink Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.32772",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+		.be_hw_params_fixup = mdm9625_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	/* Incall Record Downlink BACK END DAI Link */
+	{
+		.name = LPASS_BE_INCALL_RECORD_RX,
+		.stream_name = "Voice Downlink Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.32771",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+		.be_hw_params_fixup = mdm9625_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	/* Incall Music BACK END DAI Link */
+	{
+		.name = LPASS_BE_VOICE_PLAYBACK_TX,
+		.stream_name = "Voice Farend Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.32773",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+		.be_hw_params_fixup = mdm9625_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
 };
 
 static struct snd_soc_card snd_soc_card_mdm9625 = {
diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
index d3ba3d5..85ed869 100644
--- a/sound/soc/msm/msm8226.c
+++ b/sound/soc/msm/msm8226.c
@@ -1146,6 +1146,22 @@
 		.ignore_pmdown_time = 1,
 		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA9,
 	},
+	{
+		.name = "VoLTE",
+		.stream_name = "VoLTE",
+		.cpu_dai_name   = "VoLTE",
+		.platform_name  = "msm-pcm-voice",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_VOLTE,
+	},
 	/* Backend BT/FM DAI Links */
 	{
 		.name = LPASS_BE_INT_BT_SCO_RX,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index edb24fc..de60430 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -63,6 +63,7 @@
 static int lsm_mux_slim_port;
 static int slim0_rx_aanc_fb_port;
 static int msm_route_ec_ref_rx = 3; /* NONE */
+static uint32_t voc_session_id = ALL_SESSION_VSID;
 
 enum {
 	MADNONE,
@@ -572,7 +573,7 @@
 	}
 	if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
 			|| (msm_bedais[reg].port_id == VOICE_RECORD_TX))
-		voc_start_record(msm_bedais[reg].port_id, set);
+		voc_start_record(msm_bedais[reg].port_id, set, voc_session_id);
 
 	mutex_unlock(&routing_lock);
 }
@@ -2632,6 +2633,30 @@
 	msm_routing_put_rms_value_control),
 };
 
+static int msm_voc_session_id_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	voc_session_id = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: voc_session_id=%u\n", __func__, voc_session_id);
+
+	return 0;
+}
+
+static int msm_voc_session_id_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = voc_session_id;
+
+	return 0;
+}
+
+static struct snd_kcontrol_new msm_voc_session_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Voc VSID", SND_SOC_NOPM, 0,
+			     0xFFFFFFFF, 0, 1, msm_voc_session_id_get,
+			     msm_voc_session_id_put),
+};
+
 static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
@@ -3910,6 +3935,10 @@
 	snd_soc_add_platform_controls(platform,
 				get_rms_controls,
 			ARRAY_SIZE(get_rms_controls));
+
+	snd_soc_add_platform_controls(platform, msm_voc_session_controls,
+				      ARRAY_SIZE(msm_voc_session_controls));
+
 	return 0;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 869d642..6a34470 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -542,6 +542,7 @@
 	}
 
 	apr_deregister(ac->apr);
+	ac->apr = NULL;
 	ac->mmap_apr = NULL;
 	q6asm_session_free(ac);
 	q6asm_mmap_apr_dereg();
@@ -550,6 +551,7 @@
 
 /*done:*/
 	kfree(ac);
+	ac = NULL;
 	return;
 }
 
@@ -1327,6 +1329,11 @@
 {
 	pr_debug("%s:pkt_size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
 		cmd_flg, ac->session);
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL", __func__);
+		return;
+	}
+
 	mutex_lock(&ac->cmd_lock);
 	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
 				APR_HDR_LEN(sizeof(struct apr_hdr)),\
@@ -1354,6 +1361,10 @@
 	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
 				APR_HDR_LEN(sizeof(struct apr_hdr)),\
 				APR_PKT_VER);
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL", __func__);
+		return;
+	}
 	hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
 	hdr->src_domain = APR_DOMAIN_APPS;
 	hdr->dest_svc = APR_SVC_ASM;
@@ -2908,6 +2919,12 @@
 	int sz = 0;
 	int rc  = 0;
 
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
 	sz = sizeof(struct asm_volume_ctrl_lr_chan_gain);
 	q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE);
 	lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -2950,6 +2967,12 @@
 	int sz = 0;
 	int rc  = 0;
 
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
 	sz = sizeof(struct asm_volume_ctrl_mute_config);
 	q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
 	mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -2991,6 +3014,12 @@
 	int sz = 0;
 	int rc  = 0;
 
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
 	sz = sizeof(struct asm_volume_ctrl_master_gain);
 	q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
 	vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3034,6 +3063,12 @@
 	int sz = 0;
 	int rc  = 0;
 
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
 	sz = sizeof(struct asm_soft_pause_params);
 	q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
 	softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3081,6 +3116,12 @@
 	int sz = 0;
 	int rc  = 0;
 
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
 	sz = sizeof(struct asm_soft_step_volume_params);
 	q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
 	softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3127,6 +3168,12 @@
 	int sz = 0;
 	int rc  = 0;
 
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
 	if (eq_p == NULL) {
 		pr_err("%s[%d]: Invalid Eq param\n", __func__, ac->session);
 		rc = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 7243f19..60dd522 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -130,6 +130,28 @@
 	return ret;
 }
 
+static bool voice_is_valid_session_id(uint32_t session_id)
+{
+	bool ret = false;
+
+	switch (session_id) {
+	case VOICE_SESSION_VSID:
+	case VOICE2_SESSION_VSID:
+	case VOLTE_SESSION_VSID:
+	case VOIP_SESSION_VSID:
+	case QCHAT_SESSION_VSID:
+	case ALL_SESSION_VSID:
+		ret = true;
+		break;
+	default:
+		pr_err("%s: Invalid session_id : %x\n", __func__, session_id);
+
+		break;
+	}
+
+	return ret;
+}
+
 static u16 voice_get_mvm_handle(struct voice_data *v)
 {
 	if (v == NULL) {
@@ -357,6 +379,21 @@
 	return false;
 }
 
+static void voc_set_error_state(uint16_t reset_proc)
+{
+	struct voice_data *v = NULL;
+	int i;
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		if (reset_proc == APR_DEST_MODEM  && i == VOC_PATH_FULL)
+			continue;
+
+		v = &common.voice[i];
+		if (v != NULL)
+			v->voc_state = VOC_ERROR;
+	}
+}
+
 static bool is_other_session_active(u32 session_id)
 {
 	int i;
@@ -2989,6 +3026,22 @@
 	v->music_info.force = 1;
 	voice_cvs_stop_playback(v);
 	voice_cvs_stop_record(v);
+	/* If voice call is active during VoLTE, SRVCC happens.
+	   Start recording on voice session if recording started during VoLTE.
+	 */
+	if (is_volte_session(v->session_id) &&
+	    ((common.voice[VOC_PATH_PASSIVE].voc_state == VOC_RUN) ||
+	     (common.voice[VOC_PATH_PASSIVE].voc_state == VOC_CHANGE))) {
+		if (v->rec_info.rec_enable) {
+			voice_cvs_start_record(
+				&common.voice[VOC_PATH_PASSIVE],
+				v->rec_info.rec_mode);
+			common.srvcc_rec_flag = true;
+
+			pr_debug("%s: switch recording, srvcc_rec_flag %d\n",
+				 __func__, common.srvcc_rec_flag);
+		}
+	}
 	/* send stop voice cmd */
 	voice_send_stop_voice_cmd(v);
 
@@ -3561,17 +3614,35 @@
 	return ret;
 }
 
-int voc_start_record(uint32_t port_id, uint32_t set)
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id)
 {
 	int ret = 0;
 	int rec_mode = 0;
 	u16 cvs_handle;
-	int i, rec_set = 0;
+	int rec_set = 0;
+	struct voice_session_itr itr;
+	struct voice_data *v = NULL;
 
-	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
-		struct voice_data *v = &common.voice[i];
-		pr_debug("%s: i:%d port_id: %d, set: %d\n",
-			__func__, i, port_id, set);
+	/* check if session_id is valid */
+	if (!voice_is_valid_session_id(session_id)) {
+		pr_err("%s: Invalid session id:%u\n", __func__,
+		       session_id);
+
+		return -EINVAL;
+	}
+
+	voice_itr_init(&itr, session_id);
+	pr_debug("%s: session_id:%u\n", __func__, session_id);
+
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v == NULL) {
+			pr_err("%s: v is NULL, sessionid:%u\n", __func__,
+				session_id);
+
+			break;
+		}
+		pr_debug("%s: port_id: %d, set: %d, v: %p\n",
+			 __func__, port_id, set, v);
 
 		mutex_lock(&v->lock);
 		rec_mode = v->rec_info.rec_mode;
@@ -3579,13 +3650,11 @@
 		if (set) {
 			if ((v->rec_route_state.ul_flag != 0) &&
 				(v->rec_route_state.dl_flag != 0)) {
-				pr_debug("%s: i=%d, rec mode already set.\n",
-					__func__, i);
+				pr_debug("%s: rec mode already set.\n",
+					__func__);
+
 				mutex_unlock(&v->lock);
-				if (i < MAX_VOC_SESSIONS)
-					continue;
-				else
-					return 0;
+				continue;
 			}
 
 			if (port_id == VOICE_RECORD_TX) {
@@ -3615,13 +3684,10 @@
 		} else {
 			if ((v->rec_route_state.ul_flag == 0) &&
 				(v->rec_route_state.dl_flag == 0)) {
-				pr_debug("%s: i=%d, rec already stops.\n",
-					__func__, i);
+				pr_debug("%s: rec already stops.\n",
+					__func__);
 				mutex_unlock(&v->lock);
-				if (i < MAX_VOC_SESSIONS)
-					continue;
-				else
-					return 0;
+				continue;
 			}
 
 			if (port_id == VOICE_RECORD_TX) {
@@ -3650,8 +3716,8 @@
 				}
 			}
 		}
-		pr_debug("%s: i=%d, mode =%d, set =%d\n", __func__,
-			i, rec_mode, rec_set);
+		pr_debug("%s: mode =%d, set =%d\n", __func__,
+			 rec_mode, rec_set);
 		cvs_handle = voice_get_cvs_handle(v);
 
 		if (cvs_handle != 0) {
@@ -3661,6 +3727,18 @@
 				ret = voice_cvs_stop_record(v);
 		}
 
+		/* During SRVCC, recording will switch from VoLTE session to
+		   voice session.
+		   Then stop recording, need to stop recording on voice session.
+		 */
+		if ((!rec_set) && common.srvcc_rec_flag) {
+			pr_debug("%s, srvcc_rec_flag:%d\n",  __func__,
+				 common.srvcc_rec_flag);
+
+			voice_cvs_stop_record(&common.voice[VOC_PATH_PASSIVE]);
+			common.srvcc_rec_flag = false;
+		}
+
 		/* Cache the value */
 		v->rec_info.rec_enable = rec_set;
 		v->rec_info.rec_mode = rec_mode;
@@ -4284,7 +4362,7 @@
 	mutex_lock(&v->lock);
 
 	if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR ||
-	    v->voc_state == VOC_STANDBY) {
+	    v->voc_state == VOC_CHANGE || v->voc_state == VOC_STANDBY) {
 
 		pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
 
@@ -4294,7 +4372,13 @@
 		voice_destroy_mvm_cvs_session(v);
 
 		v->voc_state = VOC_RELEASE;
+	} else {
+		pr_err("%s: Error: End voice called in state %d\n",
+			__func__, v->voc_state);
+
+		ret = -EINVAL;
 	}
+
 	mutex_unlock(&v->lock);
 	return ret;
 }
@@ -4496,8 +4580,10 @@
 		}
 
 		v->voc_state = VOC_RUN;
-	} else if (v->voc_state == VOC_STANDBY) {
-		pr_err("Error: start voice in Standby\n");
+	} else {
+		pr_err("%s: Error: Start voice called in state %d\n",
+			__func__, v->voc_state);
+
 		ret = -EINVAL;
 		goto fail;
 	}
@@ -4539,7 +4625,6 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
-	uint32_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -4556,25 +4641,6 @@
 		if (data->reset_proc == APR_DEST_MODEM) {
 			pr_debug("%s: Received MODEM reset event\n", __func__);
 
-			session_id = voc_get_session_id(VOICE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOICE2_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(QCHAT_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
 		} else {
 			pr_debug("%s: Reset event received in Voice service\n",
 				__func__);
@@ -4590,6 +4656,9 @@
 				c->voice[i].shmem_info.mem_handle = 0;
 			}
 		}
+		/* clean up srvcc rec flag */
+		c->srvcc_rec_flag = false;
+		voc_set_error_state(data->reset_proc);
 		return 0;
 	}
 
@@ -4687,7 +4756,6 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
-	uint32_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -4704,25 +4772,6 @@
 		if (data->reset_proc == APR_DEST_MODEM) {
 			pr_debug("%s: Received Modem reset event\n", __func__);
 
-			session_id = voc_get_session_id(VOICE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOICE2_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(QCHAT_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
 		} else {
 			pr_debug("%s: Reset event received in Voice service\n",
 				 __func__);
@@ -4734,6 +4783,8 @@
 			for (i = 0; i < MAX_VOC_SESSIONS; i++)
 				c->voice[i].cvs_handle = 0;
 		}
+
+		voc_set_error_state(data->reset_proc);
 		return 0;
 	}
 
@@ -4968,7 +5019,6 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
-	uint32_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -4981,25 +5031,6 @@
 		if (data->reset_proc == APR_DEST_MODEM) {
 			pr_debug("%s: Received Modem reset event\n", __func__);
 
-			session_id = voc_get_session_id(VOICE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOICE2_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
-
-			session_id = voc_get_session_id(QCHAT_SESSION_NAME);
-			v = voice_get_session(session_id);
-			if (v != NULL)
-				v->voc_state = VOC_ERROR;
 		} else {
 			pr_debug("%s: Reset event received in Voice service\n",
 				 __func__);
@@ -5011,6 +5042,8 @@
 			for (i = 0; i < MAX_VOC_SESSIONS; i++)
 				c->voice[i].cvp_handle = 0;
 		}
+
+		voc_set_error_state(data->reset_proc);
 		return 0;
 	}
 
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 1e9c813..20f2857 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1338,6 +1338,8 @@
 	struct dtmf_driver_info dtmf_info;
 
 	struct voice_data voice[MAX_VOC_SESSIONS];
+
+	bool srvcc_rec_flag;
 };
 
 struct voice_session_itr {
@@ -1438,7 +1440,7 @@
 uint32_t voc_get_session_id(char *name);
 
 int voc_start_playback(uint32_t set, uint16_t port_id);
-int voc_start_record(uint32_t port_id, uint32_t set);
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id);
 int voice_get_idx_for_session(u32 session_id);
 
 #endif