Merge "usb: dwc3: Keep track of interrupt statistics" into msm-4.8
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 0cebc4d..0450145 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -86,11 +86,11 @@
 - MSMCOBALT
   compatible = "qcom,msmcobalt"
 
-- MSMSKUNK
-  compatible = "qcom,msmskunk"
+- SDM845
+  compatible = "qcom,sdm845"
 
-- SDMBAT
-  compatible = "qcom,sdmbat"
+- SDM830
+  compatible = "qcom,sdm830"
 
 - MSM8952
   compatible = "qcom,msm8952"
@@ -258,15 +258,15 @@
 compatible = "qcom,msmcobalt-sim"
 compatible = "qcom,msmcobalt-rumi"
 compatible = "qcom,msmcobalt-cdp"
-compatible = "qcom,msmskunk-sim"
-compatible = "qcom,msmskunk-rumi"
-compatible = "qcom,msmskunk-cdp"
-compatible = "qcom,msmskunk-mtp"
-compatible = "qcom,msmskunk-mtp"
-compatible = "qcom,sdmbat-sim"
-compatible = "qcom,sdmbat-rumi"
-compatible = "qcom,sdmbat-cdp"
-compatible = "qcom,sdmbat-mtp"
+compatible = "qcom,sdm845-sim"
+compatible = "qcom,sdm845-rumi"
+compatible = "qcom,sdm845-cdp"
+compatible = "qcom,sdm845-mtp"
+compatible = "qcom,sdm845-mtp"
+compatible = "qcom,sdm830-sim"
+compatible = "qcom,sdm830-rumi"
+compatible = "qcom,sdm830-cdp"
+compatible = "qcom,sdm830-mtp"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
index a4672e7..90ddc27 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -79,8 +79,8 @@
 		reg-names = "llcc_base";
 		status = "disabled";
 
-		llcc: qcom,msmskunk-llcc {
-			compatible = "qcom,msmskunk-llcc";
+		llcc: qcom,sdm845-llcc {
+			compatible = "qcom,sdm845-llcc";
 			#cache-cells = <1>;
 			max-slices = <32>;
 		};
diff --git a/Documentation/devicetree/bindings/arm/msm/spcom.txt b/Documentation/devicetree/bindings/arm/msm/spcom.txt
new file mode 100644
index 0000000..36a07ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/spcom.txt
@@ -0,0 +1,11 @@
+Qualcomm Technologies, Inc. Secure Proccessor Communication (spcom)
+
+Required properties:
+-compatible : should be "qcom,spcom"
+-qcom,spcom-ch-names: predefined channels name string
+
+Example:
+    qcom,spcom {
+            compatible = "qcom,spcom";
+            qcom,spcom-ch-names = "sp_kernel" , "sp_ssr";
+    };
diff --git a/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
new file mode 100644
index 0000000..7b89497
--- /dev/null
+++ b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
@@ -0,0 +1,59 @@
+* Bluetooth Controller
+Bluetooth controller communicates with the Bluetooth Host using HCI Transport layer.
+HCI Transport layer can be based on UART or USB serial communication protocol.
+
+Required properties:
+  - compatible: Should be set to one of the following:
+	qca,ar3002
+	qca,qca6174
+	qca,wcn3990
+  - qca,bt-reset-gpio: GPIO pin to bring BT Controller out of reset
+
+Optional properties:
+  - qca,bt-vdd-pa-supply: Bluetooth VDD PA regulator handle
+  - qca,bt-vdd-io-supply: Bluetooth VDD IO regulator handle
+  - qca,bt-vdd-ldo-supply: Bluetooth VDD LDO regulator handle. Kept under optional parameters
+		as some of the chipsets doesn't require ldo or it may use from same vddio.
+  - qca,bt-vdd-xtal-supply: Bluetooth VDD XTAL regulator handle
+  - qca,bt-vdd-core-supply: Bluetooth VDD CORE regulator handle
+  - qca,bt-chip-pwd-supply: Chip power down gpio is required when bluetooth module
+		and other modules like wifi co-exist in a singe chip and shares a
+		common gpio to bring chip out of reset.
+  - qca,bt-vdd-pa-voltage-level: specifies VDD PA voltage levels for supply.
+		Should be specified in pairs (min, max), units uV
+  - qca,bt-vdd-io-voltage-level: specifies VDD IO voltage levels for supply.
+		Should be specified in pairs (min, max), units uV
+  - qca,bt-vdd-ldo-voltage-level: specifies VDD LDO voltage levels for supply.
+		Should be specified in pairs (min, max), units uV
+  - qca,bt-vdd-xtal-voltage-level: specifies VDD XTAL voltage levels for supply.
+                Should be specified in pairs (min, max), units uV
+  - qca,bt-vdd-core-voltage-level: specifies VDD CORE voltage levels for supply.
+                Should be specified in pairs (min, max), units uV
+ - qca,bt-vdd-io-current-level: specifies VDD IO current level in microamps
+ - qca,bt-vdd-xtal-current-level: specifies VDD XTAL current level in microamps
+ - qca,bt-vdd-core-current-level: specifies VDD CORE current level in microamps.
+ - qca,bt-vdd-ldo-current-level: specifies VDD LDO current level in microamps.
+ - qca,bt-vdd-pa-current-level: specifies VDD PA current level in microamps.
+ - qca,bt-chip-pwd-current-level: specifies Chip Power current level in microamps.
+
+Example:
+  bt-ar3002 {
+    compatible = "qca,ar3002";
+    qca,bt-reset-gpio = <&pm8941_gpios 34 0>;
+    qca,bt-vdd-io-supply = <&pm8941_s3>;
+    qca,bt-vdd-pa-supply = <&pm8941_l19>;
+    qca,bt-vdd-xtal-supply = <&pm8994_l30>;
+    qca,bt-vdd-core-supply = <&pm8994_s3>;
+    qca,bt-chip-pwd-supply = <&ath_chip_pwd_l>;
+
+    qca,bt-vdd-io-voltage-level = <1800000 1800000>;
+    qca,bt-vdd-pa-voltage-level = <2900000 2900000>;
+    qca,bt-vdd-xtal-voltage-level = <1800000 1800000>;
+    qca,bt-vdd-core-voltage-level = <1300000 1300000>;
+
+    qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+    qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+    qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+    qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+    qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+  };
diff --git a/Documentation/devicetree/bindings/bluetooth/btfm_slim.txt b/Documentation/devicetree/bindings/bluetooth/btfm_slim.txt
new file mode 100644
index 0000000..901db5f
--- /dev/null
+++ b/Documentation/devicetree/bindings/bluetooth/btfm_slim.txt
@@ -0,0 +1,20 @@
+* BTFM Slimbus Slave Driver
+BTFM Slimbus Slave driver configure and initialize slimbus slave device.
+Bluetooth SCO and FM Audio data is transferred over slimbus interface.
+
+Required properties:
+  - compatible: Should be set to one of the following:
+     btfmslim_slave
+  - qcom,btfm-slim-ifd: BTFM slimbus slave device entry name
+
+Optional properties:
+  - qcom,btfm-slim-ifd-elemental-addr: BTFM slimbus slave device enumeration
+  address
+
+Example:
+  btfmslim_codec: wcn3990 {
+    compatible = "qcom,btfmslim_slave";
+    elemental-addr = [00 01 20 02 17 02];
+    qcom,btfm-slim-ifd = "btfmslim_slave_ifd";
+    qcom,btfm-slim-ifd-elemental-addr = [00 00 20 02 17 02];
+  };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 42a9ec1..7405115 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -16,7 +16,7 @@
 			"qcom,gcc-msm8974pro-ac"
 			"qcom,gcc-msm8996"
 			"qcom,gcc-mdm9615"
-			"qcom,gcc-msmskunk"
+			"qcom,gcc-sdm845"
 
 - reg : shall contain base register location and length
 - #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
index b65de71..5dc109d 100644
--- a/Documentation/devicetree/bindings/clock/qcom,videocc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
@@ -2,7 +2,7 @@
 ----------------------------------------------------
 
 Required properties :
-- compatible : shall contain "qcom,video_cc-msmskunk"
+- compatible : shall contain "qcom,video_cc-sdm845"
 - reg : shall contain base register location and length
 - reg-names: names of registers listed in the same order as in
 	     the reg property.
@@ -14,7 +14,7 @@
 
 Example:
 	clock_videocc: qcom,videocc@ab00000 {
-		compatible = "qcom,video_cc-msmskunk";
+		compatible = "qcom,video_cc-sdm845";
 		reg = <0xab00000 0x10000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pmcobalt_s9_level>;
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index e19a434..15feda3 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -12,17 +12,9 @@
   - reg-names: Names of the memory regions defined in reg entry
   - interrupts: Copy engine interrupt table
   - qcom,wlan-msa-memory: MSA memory size
-  - clocks: List of clock phandles
-  - clock-names: List of clock names corresponding to the "clocks" property
   - iommus: SMMUs and corresponding Stream IDs needed by WLAN
   - qcom,wlan-smmu-iova-address: I/O virtual address range as <start length>
     format to be used for allocations associated between WLAN and SMMU
-  - <supply-name>-supply: phandle to the regulator device tree node
-                          Required "supply-name" is "vdd-0.8-cx-mx".
-  - qcom,<supply>-config - specifies voltage levels for supply. Should be
-                           specified in pairs (min, max), units uV.  There can
-                           be optional load in uA and Regulator settle delay in
-                           uS.
 
 Optional properties:
   - qcom,icnss-vadc: VADC handle for vph_pwr read APIs.
@@ -34,8 +26,6 @@
         compatible = "qcom,icnss";
         reg = <0x0a000000 0x1000000>;
         reg-names = "membase";
-        clocks = <&clock_gcc clk_aggre2_noc_clk>;
-        clock-names = "smmu_aggre2_noc_clk";
         iommus = <&anoc2_smmu 0x1900>,
                  <&anoc2_smmu 0x1901>;
         qcom,wlan-smmu-iova-address = <0 0x10000000>;
@@ -53,6 +43,4 @@
 		   <0 140 0 /* CE10 */ >,
 		   <0 141 0 /* CE11 */ >;
         qcom,wlan-msa-memory = <0x200000>;
-	vdd-0.8-cx-mx-supply = <&pm8998_l5>;
-	qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
     };
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 8bc52d6..e4ba5be 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -147,12 +147,17 @@
 				indicating the danger luts on sspp.
 - qcom,sde-sspp-safe-lut:	A 3 cell property, with a format of <linear, tile, nrt>,
 				indicating the safe luts on sspp.
-- qcom,sde-sspp-max-rects:	Array of u32 values indicating maximum rectangles supported
-				on each sspp. This property is for multirect feature support.
-				Number of offsets defined should match the number of
-				offsets defined in property: qcom,sde-sspp-off.
 - qcom,sde-sspp-excl-rect:	Array of u32 values indicating exclusion rectangle
 				support on each sspp.
+- qcom,sde-sspp-smart-dma-priority:	Array of u32 values indicating hw pipe
+					priority of secondary rectangles when smart dma
+					is supported. Number of priority values should
+					match the number of offsets defined in
+					qcom,sde-sspp-off node. Zero indicates no support
+					for smart dma for the sspp.
+- qcom,sde-smart-dma-rev:	A string entry indicating the smart dma version
+				supported on the device. Supported entries are
+				"smart_dma_v1" and "smart_dma_v2".
 - qcom,sde-intf-type:		Array of string provides the interface type information.
 				Possible string values
 					"dsi" - dsi display interface
@@ -385,6 +390,11 @@
 				1 1 1 1
 				1 1
 				1 1>;
+    qcom,sde-sspp-smart-dma-priority = <0 0 0 0
+					0 0 0 0
+					0 0
+					1 2>;
+    qcom,sde-smart-dma-rev = "smart_dma_v2";
     qcom,sde-te-off = <0x100>;
     qcom,sde-te2-off = <0x100>;
     qcom,sde-te-size = <0xffff>;
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
new file mode 100644
index 0000000..3315304
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -0,0 +1,104 @@
+Qualcomm Technologies, Inc. QPNP PMIC current ADC driver
+
+QPNP PMIC current ADC (IADC) provides interface to clients to read
+current. A 16 bit ADC is used for current measurements. There are multiple
+peripherals to the IADC and the scope of the driver is to provide interface
+for the USR peripheral of the IADC.
+
+IADC node
+
+Required properties:
+- compatible : should be "qcom,qpnp-iadc" for Current ADC driver.
+- reg : offset and length of the PMIC Arbiter register map.
+- reg-names : resource names used for the physical base address of the PMIC IADC
+	      peripheral, the SMBB_BAT_IF_TRIM_CNST_RDS register.
+	      Should be "iadc-base" for the PMIC IADC peripheral base register.
+	      Should be "batt-id-trim-cnst-rds" for reading the
+	      SMBB_BAT_IF_TRIM_CNST_RDS register.
+- address-cells : Must be one.
+- size-cells : Must be zero.
+- interrupts : The USR bank peripheral IADC interrupt.
+- interrupt-names : Should contain "eoc-int-en-set".
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+Optional properties:
+- qcom,rsense : Use this property when external rsense should be used
+		for current calculation and specify the units in nano-ohms.
+- qcom,iadc-poll-eoc: Use polling instead of interrupts for End of Conversion completion.
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+		    PMIC type and revision for applying the appropriate temperature
+		    compensation parameters.
+- qcom,use-default-rds-trim : Add this property to check if certain conditions are to be checked
+			      reading the SMBB_BAT_IF_CNST_RDS, IADC_RDS trim register and
+			      manufacturer type. Check the driver for conditions that each of the type.
+			      0 : Select the TypeA to read the IADC and SMBB trim register and
+				  apply the default RSENSE if conditions are met.
+			      1 : Select the TypeB to read the IADC, SMBB trim register and
+				  manufacturer type and apply the default RSENSE if conditions are met.
+			      2 : Select the TypeC to read the IADC, SMBB trim register and
+				  apply the default RSENSE if conditions are met.
+
+Channel node
+NOTE: Atleast one Channel node is required.
+
+Client required property:
+- qcom,<consumer name>-iadc : The phandle to the corresponding iadc device.
+			The consumer name passed to the driver when calling
+			qpnp_get_iadc() is used to associate the client
+			with the corresponding device.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- reg : AMUX channel number.
+- qcom,channel-num : Channel number associated to the AMUX input.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+		    Select from the following unsigned int.
+		    0 : 512
+		    1 : 1K
+		    2 : 2K
+		    3 : 4K
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+			provides the option to obtain a single measurement from the ADC that
+			is an average of multiple samples. The value selected is 2^(value)
+			Select from the following unsigned int.
+			0 : 1
+			1 : 2
+			2 : 4
+			3 : 8
+			4 : 16
+			5 : 32
+			6 : 64
+			7 : 128
+			8 : 256
+- qcom,iadc-vadc : Corresponding phandle of the VADC device to read the die_temperature and set
+		simultaneous voltage and current conversion requests.
+
+Example:
+	/* Main Node */
+	qcom,iadc@3200 {
+                        compatible = "qcom,qpnp-iadc";
+                        reg = <0x3200 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+                        interrupts = <0 0x36 0>;
+			interrupt-names = "eoc-int-en-set";
+                        qcom,adc-bit-resolution = <16>;
+                        qcom,adc-vdd-reference = <1800>;
+			qcom,rsense = <1500>;
+			qcom,iadc-vadc = <&pm8941_vadc>;
+
+			/* Channel Node */
+                        chan@0 = {
+                                label = "rsense";
+                                reg = <0>;
+                                qcom,decimation = <0>;
+                                qcom,fast-avg-setup = <0>;
+                        };
+	};
+
+Client device example:
+/* Add to the clients node that needs the IADC */
+client_node {
+	qcom,client-iadc = <&pm8941_iadc>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
new file mode 100644
index 0000000..62ba54b
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -0,0 +1,197 @@
+Qualcomm Technologies, Inc. QPNP PMIC Voltage ADC Arbiter
+
+QPNP PMIC Voltage ADC (VADC) provides interface to clients to read
+Voltage. A 15 bit ADC is used for Voltage measurements. There are multiple
+peripherals to the VADC and the scope of the driver is to provide interface
+for the USR peripheral of the VADC.
+
+VADC node
+
+Required properties:
+- compatible : should be "qcom,qpnp-vadc" for Voltage ADC device driver and
+		"qcom,qpnp-vadc-hc" for VADC_HC voltage ADC device driver.
+- reg : offset and length of the PMIC Aribter register map.
+- address-cells : Must be one.
+- size-cells : Must be zero.
+- interrupts : The USR bank peripheral VADC interrupt.
+- interrupt-names : Should contain "eoc-int-en-set" for EOC,
+		"high-thr-en-set" for high threshold interrupts and
+		"low-thr-en-set" for low threshold interrupts. High and low threshold
+		interrupts are to be enabled if VADC_USR needs to support recurring measurement.
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+Channel nodes
+NOTE: Atleast one Channel node is required.
+
+Optional properties:
+- qcom,vadc-poll-eoc: Use polling instead of interrupts for End of Conversion completion.
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+		    PMIC type and revision for applying the appropriate temperature
+		    compensation parameters.
+-qcom,vadc-meas-int-mode : Enable VADC_USR to handle requests to perform recurring measurements
+			   for any one supported channel along with supporting single conversion
+			   requests.
+- qcom,vadc-recalib-check: Add this property to check if recalibration required due to inaccuracy.
+- qcom,vadc-thermal-node : If present a thermal node is created and the channel is registered as
+			   part of the thermal sysfs which allows clients to use the thermal framework
+			   to set temperature thresholds and receive notification when the temperature
+			   crosses a set threshold, read temperature and enable/set trip types supported
+			   by the thermal framework.
+- hkadc_ldo-supply : Add this property if VADC needs to perform a Software Vote for the HKADC.
+- hkadc_ok-supply : Add this property if the VADC needs to perform a Software vote for the HKADC VREG_OK.
+- qcom,cal-val : Add this property for VADC_HC voltage ADC device to select from the following
+		unsigned int. If the property is not present the default calibration value of
+		using the timer value is chosen.
+		    0 : The calibration values used for measurement are from a timer.
+		    1 : Forces a fresh measurement for calibration values at the same time
+			measurement is taken.
+
+Client required property:
+- qcom,<consumer name>-vadc : The phandle to the corresponding vadc device.
+			The consumer name passed to the driver when calling
+			qpnp_get_vadc() is used to associate the client
+			with the corresponding device.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- reg : AMUX channel number.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+		    Select from following unsigned int for Voltage ADC device.
+		    0 : 512
+		    1 : 1K
+		    2 : 2K
+		    3 : 4K
+		    Select from following unsigned int for VADC_HC voltage ADC device.
+		    0 : 256
+		    1 : 512
+		    2 : 1024
+- qcom,pre-div-channel-scaling : Pre-div used for the channel before the signal
+				 is being measured. Some of the AMUX channels
+				 support dividing the signal from a predetermined
+				 ratio. The configuration for this node is to know
+				 the pre-determined ratio and use it for post scaling.
+				 Select from the following unsigned int.
+				 0 : {1, 1}
+				 1 : {1, 3}
+				 2 : {1, 4}
+				 3 : {1, 6}
+				 4 : {1, 20}
+				 5 : {1, 8}
+				 6 : {10, 81}
+				 7 : {1, 10}
+- qcom,calibration-type : Reference voltage to use for channel calibration.
+			  Channel calibration is dependendent on the channel.
+			  Certain channels like XO_THERM, BATT_THERM use ratiometric
+			  calibration. Most other channels fall under absolute calibration.
+			  Select from the following strings.
+			  "absolute" : Uses the 625mv and 1.25V reference channels.
+			  "ratiometric" : Uses the reference Voltage/GND for calibration.
+- qcom,scale-function : Scaling function used to convert raw ADC code to units specific to
+			a given channel.
+			Select from the following unsigned int.
+			0 : Default scaling to convert raw adc code to voltage.
+			1 : Conversion to temperature based on btm parameters.
+			2 : Returns result in degC for 100k pull-up.
+			3 : Returns current across 0.1 ohm resistor.
+			4 : Returns XO thermistor voltage in degree's Centigrade.
+			5 : Returns result in degC for 150k pull-up.
+			9 : Conversion to temperature based on -15~55 allowable
+			    battery charging tempeature setting for btm parameters.
+- qcom,hw-settle-time : Settling period for the channel before ADC read.
+			Select from the following unsigned int.
+			0 : 0us
+			1 : 100us
+			2 : 200us
+			3 : 300us
+			4 : 400us
+			5 : 500us
+			6 : 600us
+			7 : 700us
+			8 : 800us
+			9 : 900us
+			0xa : 1ms
+			0xb : 2ms
+			0xc : 4ms
+			0xd : 6ms
+			0xe : 8ms
+			0xf : 10ms
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+			provides the option to obtain a single measurement from the ADC that
+			is an average of multiple samples. The value selected is 2^(value)
+			Select from the following unsigned int for Voltage ADC device.
+			0 : 1
+			1 : 2
+			2 : 4
+			3 : 8
+			4 : 16
+			5 : 32
+			6 : 64
+			7 : 128
+			8 : 256
+			Select from the following unsigned int for VADC_HC ADC device.
+			0 : 1
+			1 : 2
+			2 : 4
+			3 : 8
+			4 : 16
+
+Example:
+	/* Main Node */
+	qcom,vadc@3100 {
+                        compatible = "qcom,qpnp-vadc";
+                        reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+                        interrupts = <0x0 0x31 0x0>;
+			interrupt-names = "eoc-int-en-set";
+                        qcom,adc-bit-resolution = <15>;
+                        qcom,adc-vdd-reference = <1800>;
+
+			/* Channel Node */
+                        chan@0 {
+                                label = "usb_in";
+                                reg = <0>;
+                                qcom,decimation = <0>;
+                                qcom,pre-div-channel-scaling = <4>;
+                                qcom,calibration-type = "absolute";
+                                qcom,scale-function = <0>;
+                                qcom,hw-settle-time = <0>;
+                                qcom,fast-avg-setup = <0>;
+                        };
+	};
+
+Client device example:
+/* Add to the clients node that needs the VADC channel A/D */
+client_node {
+	qcom,client-vadc = <&pm8941_vadc>;
+};
+
+/* Clients have an option of measuring an analog signal through an MPP.
+   MPP block is not part of the VADC block but is an individual PMIC
+   block that has an option to support clients to configure an MPP as
+   an analog input which can be routed through one of the VADC pre-mux
+   inputs. Here is an example of how to configure an MPP as an analog
+   input */
+
+/* Configure MPP4 as an Analog input to AMUX8 and read from channel 0x23 */
+/* MPP DT configuration in the platform DT file*/
+	mpp@a300 { /* MPP 4 */
+		qcom,mode = <4>; /* AIN input */
+		qcom,invert = <1>; /* Enable MPP */
+		qcom,ain-route = <3>; /* AMUX 8 */
+		qcom,master-en = <1>;
+		qcom,src-sel = <0>; /* Function constant */
+	};
+
+/* VADC Channel configuration */
+	chan@23 {
+		label = "mpp4_div3";
+		reg = <0x23>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
new file mode 100644
index 0000000..1ab49ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
@@ -0,0 +1,63 @@
+Qualcomm Technologies Inc., PMIC Round Robin ADC (RRADC)
+
+PMIC RRADC provides an interface to the clients to read
+the voltage, current and temperature for supported channels
+such as battery ID, battery thermistor, die temperature,
+charger temperature, USB_IN and DC_IN voltage and current.
+
+Main node properties:
+
+- compatible:
+    Usage: required
+    Value type: <string>
+    Definition: Should contain "qcom,rradc".
+
+- reg:
+    Usage: required
+    Value type: <prop-encoded-array>
+    Definition: RRADC base address and length in the PMIC register map.
+
+- #address-cells:
+    Usage: required
+    Value type: <u32>
+    Definition: Must be one. Child node 'channel' property should define ADC
+            channel number. For details about IIO bindings see:
+            Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+- #size-cells:
+    Usage: required
+    Value type: <u32>
+    Definition: Must be zero. For details about IIO bindings see:
+            Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+- #io-channel-cells:
+    Usage: required
+    Value type: <u32>
+    Definition: Must be one. For details about IIO bindings see:
+            Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+IIO client nodes need to specify the RRADC channel number while requesting ADC reads.
+The channel list supported by the RRADC driver is available in the enum rradc_channel_id
+located at at drivers/iio/adc/qcom-rradc.c. Clients can use this index from the enum
+as the channel number while requesting ADC reads.
+
+Optional property:
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+		    PMIC fabrication ID for applying the appropriate temperature
+		    compensation parameters.
+Example:
+
+	/* RRADC node */
+	pmic_rradc: rradc@4500 {
+		compatible = "qcom,rradc";
+		reg = <0x4500 0x100>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		#io-channel-cells = <1>;
+	};
+
+	/* IIO client node */
+	charger {
+		io-channels = <&pmic_rradc 0>;
+		io-channel-names = "rradc_batt_id";
+	};
diff --git a/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt
new file mode 100644
index 0000000..49d33a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt
@@ -0,0 +1,34 @@
+MSM MHI DEV
+
+MSM MHI DEV enables communication with the host over a PCIe link using the
+Modem Host Interface protocol. The driver interfaces with the IPA for
+enabling the HW acceleration channel path and provides interface for
+software channels to communicate between Host and device.
+
+Required properties:
+  - compatible: should be "qcom,msm-mhi-dev" for MHI device driver.
+  - reg: MHI MMIO physical register space.
+  - reg-names: resource names used for the MHI MMIO physical address region,
+	       IPA uC command and event ring doorbell mail box address.
+	       Should be "mhi_mmio_base" for MHI MMIO physical address,
+	       "ipa_uc_mbox_crdb" for IPA uC Command Ring doorbell,
+	       "ipa_uc_mbox_erdb" for IPA uC Event Ring doorbell passed to
+	       the IPA driver.
+  - qcom,mhi-ifc-id: ID of HW interface via which MHI on device side
+		communicates with host side.
+  - qcom,mhi-ep-msi: End point MSI number.
+  - qcom,mhi-version: MHI specification version supported by the device.
+
+Example:
+
+	mhi: qcom,msm-mhi-dev {
+		compatible = "qcom,msm-mhi-dev";
+		reg = <0xfc527000 0x1000>,
+		     <0xfd4fa000 0x1>,
+		     <0xfd4fa080 0x1>;
+		reg-names = "mhi_mmio_base", "ipa_uc_mbox_crdb",
+			    "ipa_uc_mbox_erdb";
+		qcom,mhi-ifc-id = <0x030017cb>;
+		qcom,mhi-ep-msi = <1>;
+		qcom,mhi-version = <0x1000000>;
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
similarity index 96%
rename from Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl
rename to Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
index 9616d9d..0fe8a1b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
@@ -1,12 +1,12 @@
-Qualcomm Technologies, Inc. SDMBAT TLMM block
+Qualcomm Technologies, Inc. SDM830 TLMM block
 
 This binding describes the Top Level Mode Multiplexer block found in the
-SDMBAT platform.
+SDM830 platform.
 
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,sdmbat-pinctrl"
+	Definition: must be "qcom,sdm830-pinctrl"
 
 - reg:
 	Usage: required
@@ -136,7 +136,7 @@
 Example:
 
 	tlmm: pinctrl@03800000 {
-		compatible = "qcom,sdmbat-pinctrl";
+		compatible = "qcom,sdm830-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
similarity index 97%
rename from Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl
rename to Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
index 10bbe56..9c26374 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
@@ -1,12 +1,12 @@
-Qualcomm Technologies, Inc. MSMSKUNK TLMM block
+Qualcomm Technologies, Inc. SDM845 TLMM block
 
 This binding describes the Top Level Mode Multiplexer block found in the
-MSMSKUNK platform.
+SDM845 platform.
 
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,msmskunk-pinctrl"
+	Definition: must be "qcom,sdm845-pinctrl"
 
 - reg:
 	Usage: required
@@ -176,7 +176,7 @@
 Example:
 
 	tlmm: pinctrl@03400000 {
-		compatible = "qcom,msmskunk-pinctrl";
+		compatible = "qcom,sdm845-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index d0800d3..a1d7499 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2309,7 +2309,7 @@
 
 Required properties:
 - compatible : "qcom,msm8998-asoc-snd-stub" for MSM8998 target.
-	       "qcom,msmskunk-asoc-snd-stub" for MSMSKUNK target.
+	       "qcom,sdm845-asoc-snd-stub" for SDM845 target.
 - qcom,model : The user-visible name of this sound card.
 - qcom,tasha-mclk-clk-freq : MCLK frequency value for tasha codec
 - asoc-platform: This is phandle list containing the references to platform device
@@ -2380,10 +2380,10 @@
 		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
 	};
 
-* MSMSKUNK ASoC Machine driver
+* SDM845 ASoC Machine driver
 
 Required properties:
-- compatible : "qcom,msmskunk-asoc-snd-tavil"
+- compatible : "qcom,sdm845-asoc-snd-tavil"
 - qcom,model : The user-visible name of this sound card.
 - qcom,tavil-mclk-clk-freq : MCLK frequency value for tavil codec
 - qcom,audio-routing : A list of the connections between audio components.
@@ -2422,8 +2422,8 @@
 Example:
 
 	sound-tavil {
-		compatible = "qcom,msmskunk-asoc-snd-tavil";
-		qcom,model = "msmskunk-tavil-snd-card";
+		compatible = "qcom,sdm845-asoc-snd-tavil";
+		qcom,model = "sdm845-tavil-snd-card";
 		qcom,ext-disp-audio-rx;
 		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
new file mode 100644
index 0000000..702f252
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -0,0 +1,241 @@
+Qualcomm Technologies, Inc. QPNP PMIC thermal monitor ADC driver (VADC_TM)
+
+QPNP PMIC thermal monitoring (TM) provides interface to thermal clients
+to set temperature thresholds and receive notification when the thresholds
+are crossed. A 15 bit ADC is used for measurements. The driver is part
+of the sysfs thermal framework that provides support to read the trip
+points, set threshold for the trip points and enable the trip points.
+Separat kernel api's are provided to usb_id and batt_therm
+to set thresholds and receive threshold notifications.
+
+VADC_TM node
+
+Required properties:
+- compatible : should be "qcom,qpnp-adc-tm" for thermal ADC driver.
+	     : should be "qcom,qpnp-adc-tm-hc" for thermal ADC driver using
+	       refreshed BTM peripheral.
+- reg : offset and length of the PMIC Aribter register map.
+- address-cells : Must be one.
+- size-cells : Must be zero.
+- interrupts : The thermal ADC bank peripheral interrupts for eoc, high and low interrupts.
+- interrupt-names : Should be "eoc-int-en-set", "high-thr-en-set" and "low-thr-en-set"
+		    for qcom,qpnp-adc-tm type.
+		  : Should be "eoc-int-en-set" for qcom,qpnp-adc-tm-hc type.
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+The following properties are required in the main node for qcom,qpnp-adc-tm-hc peripheral.
+- qcom,decimation : Should be present for qcom,qpnp-adc-tm-hc peripheral as its common setting
+		    across all the channels. Sampling rate to use for all the channel measurements.
+		    Select from the following unsigned int.
+		    0 : 512
+		    1 : 1K
+		    2 : 2K
+		    3 : 4K
+- qcom,fast-avg-setup : Should be present for qcom,qpnp-adc-tm-hc peripheral as its common setting
+		    across all the channels. Average number of samples to be used for measurement.
+		    Fast averaging provides the option to obtain a single measurement from the ADC
+		    that is an average of multiple samples. The value selected is 2^(value)
+		    Select from the following unsigned int.
+			0 : 1
+			1 : 2
+			2 : 4
+			3 : 8
+			4 : 16
+			5 : 32
+			6 : 64
+			7 : 128
+			8 : 256
+
+Optional properties:
+- qcom,thermal-node : If present a thermal node is created and the channel is registered as
+		part of the thermal sysfs which allows clients to use the thermal framework
+		to set temperature thresholds and receive notification when the temperature
+		crosses a set threshold, read temperature and enable/set trip types supported
+		by the thermal framework.
+- qcom,meas-interval-timer-idx: If present select from the following timer index to choose
+		a preset configurable measurement interval timer value. The driver defaults
+		to timer 2 with a measurement interval of 1 second if the property is not present.
+		0 : Select Timer 1 for a measurement polling interval of 3.9 milliseconds.
+		1 : Select Timer 2 for a measurement polling interval of 1 second.
+		2 : Select Timer 3 for a measurement polling interval of 4 seconds.
+- qcom,adc-tm-recalib-check: Add this property to check if recalibration required due to inaccuracy.
+- hkadc_ldo-supply : Add this property if VADC needs to perform a Software Vote for the HKADC.
+- hkadc_ok-supply : Add this property if the VADC needs to perform a Software vote for the HKADC VREG_OK.
+
+Client required property:
+- qcom,<consumer name>-adc_tm : The phandle to the corresponding adc_tm device.
+			The consumer name passed to the driver when calling
+			qpnp_get_adc_tm() is used to associate the client
+			with the corresponding device.
+
+Channel nodes
+NOTE: Atleast one Channel node is required.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- reg : AMUX channel number.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+		    Select from the following unsigned int.
+		    0 : 512
+		    1 : 1K
+		    2 : 2K
+		    3 : 4K
+		    Note: This property is not required in the channel node in qcom,qpnp-adc-tm-hc
+		    peripheral.
+- qcom,pre-div-channel-scaling : Pre-div used for the channel before the signal is being measured.
+				 Select from the following unsigned int for the corresponding
+				 numerator/denominator pre-div ratio.
+				 0 : pre-div ratio of {1, 1}
+				 1 : pre-div ratio of {1, 3}
+				 2 : pre-div ratio of {1, 4}
+				 3 : pre-div ratio of {1, 6}
+				 4 : pre-div ratio of {1, 20}
+				 5 : pre-div ratio of {1, 8}
+				 6 : pre-div ratio of {10, 81}
+				 7 : pre-div ratio of {1, 10}
+- qcom,calibration-type : Reference voltage to use for channel calibration.
+			  Channel calibration is dependendent on the channel.
+			  Certain channels like XO_THERM, BATT_THERM use ratiometric
+			  calibration. Most other channels fall under absolute calibration.
+			  Select from the following strings.
+			  "absolute" : Uses the 625mv and 1.25V reference channels.
+			  "ratiometric" : Uses the reference Voltage/GND for calibration.
+- qcom,scale-function : Reverse scaling function used to convert raw ADC code to units specific to
+			a given channel.
+			Select from the following unsigned int.
+			0 : Scaling to convert voltage in uV to raw adc code.
+			1 : Scaling to convert decidegC to raw adc code.
+			2 : Scaling for converting USB_ID reverse scaling.
+			3 : Scaling to convert milldegC to raw ADC code.
+			4 : Scaling to convert smb_batt_therm values to raw ADC code.
+			5 : Scaling to perform reverse calibration for absolute voltage from uV
+			    to raw ADC code.
+			6 : Scaling to convert qrd skuh battery decidegC to raw ADC code.
+- qcom,hw-settle-time : Settling period for the channel before ADC read.
+			Select from the following unsigned int.
+			0 : 0us
+			1 : 100us
+			2 : 200us
+			3 : 300us
+			4 : 400us
+			5 : 500us
+			6 : 600us
+			7 : 700us
+			8 : 800us
+			9 : 900us
+			0xa : 1ms
+			0xb : 2ms
+			0xc : 4ms
+			0xd : 6ms
+			0xe : 8ms
+			0xf : 10ms
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+			provides the option to obtain a single measurement from the ADC that
+			is an average of multiple samples. The value selected is 2^(value)
+			Select from
+			0 : 1
+			1 : 2
+			2 : 4
+			3 : 8
+			4 : 16
+			5 : 32
+			6 : 64
+			7 : 128
+			8 : 256
+			Note: This property is not required in the channel node in
+			qcom,qpnp-adc-tm-hc peripheral.
+- qcom,btm-channel-number : Depending on the PMIC version, a max of upto 8 BTM channels.
+			    The BTM channel numbers are statically allocated to the
+			    corresponding channel node.
+- qcom,adc_tm-vadc : phandle to the corresponding VADC device to read the ADC channels.
+
+Client device example:
+/* Add to the clients node that needs the ADC_TM channel A/D */
+client_node {
+	qcom,client-adc_tm = <&pm8941_adc_tm>;
+};
+
+Example for "qcom,qpnp-adc-tm" device:
+	/* Main Node */
+	qcom,vadc@3400 {
+                        compatible = "qcom,qpnp-adc-tm";
+                        reg = <0x3400 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+                        interrupts = <0x0 0x34 0x0>,
+					<0x0 0x34 0x3>,
+					<0x0 0x34 0x4>;
+			interrupt-names = "eoc-int-en-set",
+					  "high-thr-en-set",
+					  "low-thr-en-set";
+                        qcom,adc-bit-resolution = <15>;
+                        qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pm8941_vadc>;
+
+			/* Channel Node to be registered as part of thermal sysfs */
+                        chan@b5 {
+                                label = "pa_therm1";
+				reg = <0xb5>;
+                                qcom,decimation = <0>;
+                                qcom,pre-div-channel-scaling = <0>;
+                                qcom,calibration-type = "absolute";
+                                qcom,scale-function = <2>;
+                                qcom,hw-settle-time = <0>;
+                                qcom,fast-avg-setup = <0>;
+				qcom,btm-channel-number = <0x70>;
+				qcom,thermal-node;
+                        };
+
+			/* Channel Node */
+			chan@6 {
+				label = "vbat_sns";
+				reg = <6>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,btm-channel-number = <0x78>;
+			};
+	};
+
+Example for "qcom,qpnp-adc-tm-hc" device:
+	/* Main Node */
+	pm8998_adc_tm: vadc@3400 {
+			compatible = "qcom,qpnp-adc-tm-hc";
+			reg = <0x3400 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1875>;
+			qcom,adc_tm-vadc = <&pm8998_vadc>;
+			qcom,decimation = <0>;
+			qcom,fast-avg-setup = <0>;
+
+			/* Channel Node to be registered as part of thermal sysfs */
+                        chan@b5 {
+                                label = "pa_therm1";
+				reg = <0xb5>;
+                                qcom,pre-div-channel-scaling = <0>;
+                                qcom,calibration-type = "absolute";
+                                qcom,scale-function = <2>;
+                                qcom,hw-settle-time = <0>;
+				qcom,btm-channel-number = <0x70>;
+				qcom,thermal-node;
+                        };
+
+			/* Channel Node */
+			chan@6 {
+				label = "vbat_sns";
+				reg = <6>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,btm-channel-number = <0x78>;
+			};
+	};
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 30f2f6c..af1ba92 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -10,7 +10,7 @@
 - compatible        : compatible list, contains one of the following:
 		      "qcom,ufs-phy-qmp-14nm"
 		      "qcom,ufs-phy-qmp-v3"
-		      "qcom,ufs-phy-qrbtc-msmskunk"
+		      "qcom,ufs-phy-qrbtc-sdm845"
 according to the relevant phy in use.
 - reg               : should contain PHY register address space (mandatory),
 - reg-names         : indicates various resources passed to driver (via reg proptery) by name.
diff --git a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
new file mode 100644
index 0000000..54d342c
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
@@ -0,0 +1,67 @@
+Qualcomm Technologies, Inc. QPNP PD PHY - USB Power Delivery Physical layer
+
+Required properties:
+- compatible:		Must be "qcom,qpnp-pdphy"
+- reg:			The base address for this peripheral
+- vdd-pdphy-supply:	phandle to the VDD supply regulator node
+- interrupts:		Specifies the interrupt associated with the peripheral.
+- interrupt-names:	Specifies the interrupt names for the peripheral. Every
+			available interrupt needs to have an associated name
+			with it to indentify its purpose.
+
+			The following interrupts are required:
+
+			0: sig-tx
+				Triggers when a signal (HardReset or CableReset)
+				has been sent.
+			1: sig-rx
+				Triggers when a signal has been received.
+			2: msg-tx
+				Triggers when a message has been sent and the
+				related GoodCRC has been received.
+			3: msg-rx
+				Triggers when a message has been received and
+				the related GoodCRC was sent successfully.
+			4: msg-tx-failed
+				Triggers when a message failed all its
+				transmission attempts, either due to a non-idle
+				bus or missing GoodCRC reply.
+			5: msg-tx-discarded
+				Triggers when a message is received while a
+				transmission request was in place. The request
+				itself is discarded.
+			6: msg-rx-discarded
+				Triggers when a message was received but had to
+				be discarded due to the RX buffer still in use
+				by SW.
+
+Optional properties:
+- vbus-supply:		Regulator that enables VBUS source output
+- vconn-supply:		Regulator that enables VCONN source output. This will
+			be supplied on the USB CC line that is not used for
+			communication when Ra resistance is detected.
+- qcom,vconn-uses-external-source: Indicates whether VCONN supply is sourced
+			from an external regulator. If omitted, then it is
+			assumed it is connected to VBUS.
+
+Example:
+	qcom,qpnp-pdphy@1700 {
+		compatible = "qcom,qpnp-pdphy";
+		reg = <0x1700 0x100>;
+		vdd-pdphy-supply = <&pm8998_l24>;
+		interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x2 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x3 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x4 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x5 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x6 IRQ_TYPE_EDGE_RISING>;
+
+		interrupt-names = "sig-tx",
+				  "sig-rx",
+				  "msg-tx",
+				  "msg-rx",
+				  "msg-tx-failed",
+				  "msg-tx-discarded",
+				  "msg-rx-discarded";
+	};
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index c14034a..f7a21a6 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -119,22 +119,22 @@
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
-config ARCH_MSMSKUNK
-	bool "Enable Support for Qualcomm MSMSKUNK"
+config ARCH_SDM845
+	bool "Enable Support for Qualcomm SDM845"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
-	  This enables support for the MSMSKUNK chipset. If you do not
+	  This enables support for the SDM845 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
-config ARCH_SDMBAT
-	bool "Enable Support for Qualcomm Technologies Inc. SDMBAT"
+config ARCH_SDM830
+	bool "Enable Support for Qualcomm Technologies Inc. SDM830"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
-	  This enables support for the SDMBAT chipset. If you do not
+	  This enables support for the SDM830 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
 config ARCH_ROCKCHIP
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 3ed0b06..54acae6 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -2,15 +2,15 @@
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
 
-dtb-$(CONFIG_ARCH_MSMSKUNK) += msmskunk-sim.dtb \
-	msmskunk-rumi.dtb \
-	msmskunk-mtp.dtb \
-	msmskunk-cdp.dtb
+dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
+	sdm845-rumi.dtb \
+	sdm845-mtp.dtb \
+	sdm845-cdp.dtb
 
-dtb-$(CONFIG_ARCH_SDMBAT) += sdmbat-sim.dtb \
-	sdmbat-rumi.dtb \
-	sdmbat-mtp.dtb \
-	sdmbat-cdp.dtb
+dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
+	sdm830-rumi.dtb \
+	sdm830-mtp.dtb \
+	sdm830-cdp.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-skunk.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msm-arm-smmu-skunk.dtsi
rename to arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-skunk.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msm-gdsc-skunk.dtsi
rename to arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts b/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts
deleted file mode 100644
index b1dd404..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "msmskunk.dtsi"
-#include "msmskunk-cdp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. MSM skunk v1 CDP";
-	compatible = "qcom,msmskunk-cdp", "qcom,msmskunk", "qcom,cdp";
-	qcom,board-id = <1 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts
deleted file mode 100644
index d6a6ffb..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "msmskunk.dtsi"
-#include "msmskunk-mtp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. MSM skunk v1 MTP";
-	compatible = "qcom,msmskunk-mtp", "qcom,msmskunk", "qcom,mtp";
-	qcom,board-id = <8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi
deleted file mode 100644
index 930c8de..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "msmskunk-pinctrl.dtsi"
-
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sim.dts b/arch/arm64/boot/dts/qcom/msmskunk-sim.dts
deleted file mode 100644
index eb95256..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-sim.dts
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-/memreserve/ 0x90000000 0x00000100;
-
-#include "msmskunk.dtsi"
-#include "msmskunk-sim.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. MSM SKUNK SIM";
-	compatible = "qcom,msmskunk-sim", "qcom,msmskunk", "qcom,sim";
-	qcom,board-id = <16 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts b/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
similarity index 85%
rename from arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
rename to arch/arm64/boot/dts/qcom/sdm830-cdp.dts
index f8f916e..dab4a9d 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-cdp.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-cdp.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM bat v1 CDP";
-	compatible = "qcom,sdmbat-cdp", "qcom,sdmbat", "qcom,cdp";
+	compatible = "qcom,sdm830-cdp", "qcom,sdm830", "qcom,cdp";
 	qcom,board-id = <1 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
similarity index 89%
copy from arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
index 77151c5..c7bbef0 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-cdp.dtsi"
-#include "sdmbat-pinctrl.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts b/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
similarity index 85%
rename from arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
rename to arch/arm64/boot/dts/qcom/sdm830-mtp.dts
index fb8e85a..5da16e6 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-mtp.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-mtp.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM bat v1 MTP";
-	compatible = "qcom,sdmbat-mtp", "qcom,sdmbat", "qcom,mtp";
+	compatible = "qcom,sdm830-mtp", "qcom,sdm830", "qcom,mtp";
 	qcom,board-id = <8 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
similarity index 89%
rename from arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
index 77151c5..b2d607d 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-cdp.dtsi"
-#include "sdmbat-pinctrl.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
similarity index 94%
rename from arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
index ead34a6..a8d559c 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
@@ -12,7 +12,7 @@
 
 &soc {
 	tlmm: pinctrl@03800000 {
-		compatible = "qcom,sdmbat-pinctrl";
+		compatible = "qcom,sdm830-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dts b/arch/arm64/boot/dts/qcom/sdm830-rumi.dts
similarity index 78%
rename from arch/arm64/boot/dts/qcom/sdmbat-rumi.dts
rename to arch/arm64/boot/dts/qcom/sdm830-rumi.dts
index 2bf868e..2485051 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-rumi.dts
@@ -14,12 +14,12 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-rumi.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-rumi.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT RUMI";
-	compatible = "qcom,sdmbat-rumi", "qcom,sdmbat", "qcom,rumi";
+	model = "Qualcomm Technologies, Inc. SDM830 RUMI";
+	compatible = "qcom,sdm830-rumi", "qcom,sdm830", "qcom,rumi";
 	qcom,board-id = <15 0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
similarity index 92%
rename from arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
index 11901f1..2bc5f3f 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
@@ -13,8 +13,8 @@
 /*
  * As a general rule, only version-specific property overrides should be placed
  * inside this file. Common device definitions should be placed inside the
- * msmskunk-rumi.dtsi file.
+ * sdm845-rumi.dtsi file.
  */
 
- #include "msmskunk-rumi.dtsi"
+ #include "sdm845-rumi.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts b/arch/arm64/boot/dts/qcom/sdm830-sim.dts
similarity index 79%
rename from arch/arm64/boot/dts/qcom/sdmbat-sim.dts
rename to arch/arm64/boot/dts/qcom/sdm830-sim.dts
index 216b3d0..57cd155 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-sim.dts
@@ -14,12 +14,12 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-sim.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-sim.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT SIM";
-	compatible = "qcom,sdmbat-sim", "qcom,sdmbat", "qcom,sim";
+	model = "Qualcomm Technologies, Inc. SDM830 SIM";
+	compatible = "qcom,sdm830-sim", "qcom,sdm830", "qcom,sim";
 	qcom,board-id = <16 0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
similarity index 92%
rename from arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
index 560ad45..85e8075 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
@@ -13,8 +13,8 @@
 /*
  * As a general rule, only version-specific property overrides should be placed
  * inside this file. Common device definitions should be placed inside the
- * msmskunk-sim.dtsi file.
+ * sdm845-sim.dtsi file.
  */
 
- #include "msmskunk-sim.dtsi"
+ #include "sdm845-sim.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat.dtsi b/arch/arm64/boot/dts/qcom/sdm830.dtsi
similarity index 86%
rename from arch/arm64/boot/dts/qcom/sdmbat.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830.dtsi
index 950d130..ff0d9a0 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830.dtsi
@@ -13,14 +13,14 @@
 /*
  * As a general rule, only version-specific property overrides should be placed
  * inside this file. Common device definitions should be placed inside the
- * msmskunk.dtsi file.
+ * sdm845.dtsi file.
  */
 
- #include "msmskunk.dtsi"
+ #include "sdm845.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT";
-	compatible = "qcom,sdmbat";
+	model = "Qualcomm Technologies, Inc. SDM830";
+	compatible = "qcom,sdm830";
 	qcom,msm-id = <328 0x0>;
 
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
new file mode 100644
index 0000000..fb3d31f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -0,0 +1,1573 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
+
+&soc {
+	ad_hoc_bus: ad-hoc-bus {
+		compatible = "qcom,msm-bus-device";
+		reg = <0x016E0000 0x40000>,
+			<0x1700000 0x40000>,
+			<0x1500000 0x40000>,
+			<0x14E0000 0x40000>,
+			<0x17900000 0x40000>,
+			<0x1380000 0x40000>,
+			<0x1380000 0x40000>,
+			<0x1740000 0x40000>,
+			<0x1620000 0x40000>;
+
+		reg-names = "aggre1_noc-base", "aggre2_noc-base",
+			"config_noc-base", "dc_noc-base",
+			"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
+			"mmss_noc-base", "system_noc-base";
+
+		mbox-names = "apps_rsc";
+		mboxes = <&apps_rsc 0>;
+
+	/*BCMs*/
+		bcm_acv: bcm-acv {
+			cell-id = <MSM_BUS_BCM_ACV>;
+			label = "ACV";
+			qcom,bcm-dev;
+		};
+
+		bcm_alc: bcm-alc {
+			cell-id = <MSM_BUS_BCM_ALC>;
+			label = "ALC";
+			qcom,bcm-dev;
+		};
+
+		bcm_mc0: bcm-mc0 {
+			cell-id = <MSM_BUS_BCM_MC0>;
+			label = "MC0";
+			qcom,bcm-dev;
+		};
+
+		bcm_sh0: bcm-sh0 {
+			cell-id = <MSM_BUS_BCM_SH0>;
+			label = "SH0";
+			qcom,bcm-dev;
+		};
+
+		bcm_mm0: bcm-mm0 {
+			cell-id = <MSM_BUS_BCM_MM0>;
+			label = "MM0";
+			qcom,bcm-dev;
+		};
+
+		bcm_sh1: bcm-sh1 {
+			cell-id = <MSM_BUS_BCM_SH1>;
+			label = "SH1";
+			qcom,bcm-dev;
+		};
+
+		bcm_mm1: bcm-mm1 {
+			cell-id = <MSM_BUS_BCM_MM1>;
+			label = "MM1";
+			qcom,bcm-dev;
+		};
+
+		bcm_sh2: bcm-sh2 {
+			cell-id = <MSM_BUS_BCM_SH2>;
+			label = "SH2";
+			qcom,bcm-dev;
+		};
+
+		bcm_mm2: bcm-mm2 {
+			cell-id = <MSM_BUS_BCM_MM2>;
+			label = "MM2";
+			qcom,bcm-dev;
+		};
+
+		bcm_sh3: bcm-sh3 {
+			cell-id = <MSM_BUS_BCM_SH3>;
+			label = "SH3";
+			qcom,bcm-dev;
+		};
+
+		bcm_mm3: bcm-mm3 {
+			cell-id = <MSM_BUS_BCM_MM3>;
+			label = "MM3";
+			qcom,bcm-dev;
+		};
+
+		bcm_sh4: bcm-sh4 {
+			cell-id = <MSM_BUS_BCM_SH4>;
+			label = "SH4";
+			qcom,bcm-dev;
+		};
+
+		bcm_sh5: bcm-sh5 {
+			cell-id = <MSM_BUS_BCM_SH5>;
+			label = "SH5";
+			qcom,bcm-dev;
+		};
+
+		bcm_mm5: bcm-mm5 {
+			cell-id = <MSM_BUS_BCM_MM5>;
+			label = "MM5";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn0: bcm-sn0 {
+			cell-id = <MSM_BUS_BCM_SN0>;
+			label = "SN0";
+			qcom,bcm-dev;
+		};
+
+		bcm_ce0: bcm-ce0 {
+			cell-id = <MSM_BUS_BCM_CE0>;
+			label = "CE0";
+			qcom,bcm-dev;
+		};
+
+		bcm_ip0: bcm-ip0 {
+			cell-id = <MSM_BUS_BCM_IP0>;
+			label = "IP0";
+			qcom,bcm-dev;
+		};
+
+		bcm_cn0: bcm-cn0 {
+			cell-id = <MSM_BUS_BCM_CN0>;
+			label = "CN0";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn1: bcm-sn1 {
+			cell-id = <MSM_BUS_BCM_SN1>;
+			label = "SN1";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn2: bcm-sn2 {
+			cell-id = <MSM_BUS_BCM_SN2>;
+			label = "SN2";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn3: bcm-sn3 {
+			cell-id = <MSM_BUS_BCM_SN3>;
+			label = "SN3";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn4: bcm-sn4 {
+			cell-id = <MSM_BUS_BCM_SN4>;
+			label = "SN4";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn5: bcm-sn5 {
+			cell-id = <MSM_BUS_BCM_SN5>;
+			label = "SN5";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn6: bcm-sn6 {
+			cell-id = <MSM_BUS_BCM_SN6>;
+			label = "SN6";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn7: bcm-sn7 {
+			cell-id = <MSM_BUS_BCM_SN7>;
+			label = "SN7";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn8: bcm-sn8 {
+			cell-id = <MSM_BUS_BCM_SN8>;
+			label = "SN8";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn9: bcm-sn9 {
+			cell-id = <MSM_BUS_BCM_SN9>;
+			label = "SN9";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn11: bcm-sn11 {
+			cell-id = <MSM_BUS_BCM_SN11>;
+			label = "SN11";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn12: bcm-sn12 {
+			cell-id = <MSM_BUS_BCM_SN12>;
+			label = "SN12";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn14: bcm-sn14 {
+			cell-id = <MSM_BUS_BCM_SN14>;
+			label = "SN14";
+			qcom,bcm-dev;
+		};
+
+		bcm_sn15: bcm-sn15 {
+			cell-id = <MSM_BUS_BCM_SN15>;
+			label = "SN15";
+			qcom,bcm-dev;
+		};
+
+
+		/*Buses*/
+		fab_aggre1_noc: fab-aggre1_noc {
+			cell-id = <MSM_BUS_FAB_A1_NOC>;
+			label = "fab-aggre1_noc";
+			qcom,fab-dev;
+			qcom,base-name = "aggre1_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_aggre2_noc: fab-aggre2_noc {
+			cell-id = <MSM_BUS_FAB_A2_NOC>;
+			label = "fab-aggre2_noc";
+			qcom,fab-dev;
+			qcom,base-name = "aggre2_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_config_noc: fab-config_noc {
+			cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
+			label = "fab-config_noc";
+			qcom,fab-dev;
+			qcom,base-name = "config_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_dc_noc: fab-dc_noc {
+			cell-id = <MSM_BUS_FAB_DC_NOC>;
+			label = "fab-dc_noc";
+			qcom,fab-dev;
+			qcom,base-name = "dc_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_gladiator_noc: fab-gladiator_noc {
+			cell-id = <MSM_BUS_FAB_GNOC>;
+			label = "fab-gladiator_noc";
+			qcom,fab-dev;
+			qcom,base-name = "gladiator_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_mc_virt: fab-mc_virt {
+			cell-id = <MSM_BUS_FAB_MC_VIRT>;
+			label = "fab-mc_virt";
+			qcom,fab-dev;
+			qcom,base-name = "mc_virt-base";
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
+		fab_mem_noc: fab-mem_noc {
+			cell-id = <MSM_BUS_FAB_MEM_NOC>;
+			label = "fab-mem_noc";
+			qcom,fab-dev;
+			qcom,base-name = "mem_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_mmss_noc: fab-mmss_noc {
+			cell-id = <MSM_BUS_FAB_MMSS_NOC>;
+			label = "fab-mmss_noc";
+			qcom,fab-dev;
+			qcom,base-name = "mmss_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_system_noc: fab-system_noc {
+			cell-id = <MSM_BUS_FAB_SYS_NOC>;
+			label = "fab-system_noc";
+			qcom,fab-dev;
+			qcom,base-name = "system_noc-base";
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+
+		/*Masters*/
+
+		mas_qhm_a1noc_cfg: mas-qhm-a1noc-cfg {
+			cell-id = <MSM_BUS_MASTER_A1NOC_CFG>;
+			label = "mas-qhm-a1noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_aggre1_noc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_qhm_qup1: mas-qhm-qup1 {
+			cell-id = <MSM_BUS_MASTER_BLSP_1>;
+			label = "mas-qhm-qup1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_qhm_tsif: mas-qhm-tsif {
+			cell-id = <MSM_BUS_MASTER_TSIF>;
+			label = "mas-qhm-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_xm_sdc2: mas-xm-sdc2 {
+			cell-id = <MSM_BUS_MASTER_SDCC_2>;
+			label = "mas-xm-sdc2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <1>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_xm_sdc4: mas-xm-sdc4 {
+			cell-id = <MSM_BUS_MASTER_SDCC_4>;
+			label = "mas-xm-sdc4";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <2>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_xm_ufs_card: mas-xm-ufs-card {
+			cell-id = <MSM_BUS_MASTER_UFS_CARD>;
+			label = "mas-xm-ufs-card";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <3>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_xm_ufs_mem: mas-xm-ufs-mem {
+			cell-id = <MSM_BUS_MASTER_UFS_MEM>;
+			label = "mas-xm-ufs-mem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_qhm_a2noc_cfg: mas-qhm-a2noc-cfg {
+			cell-id = <MSM_BUS_MASTER_A2NOC_CFG>;
+			label = "mas-qhm-a2noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_aggre2_noc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_qhm_qdss_bam: mas-qhm-qdss-bam {
+			cell-id = <MSM_BUS_MASTER_QDSS_BAM>;
+			label = "mas-qhm-qdss-bam";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_qhm_qup2: mas-qhm-qup2 {
+			cell-id = <MSM_BUS_MASTER_BLSP_2>;
+			label = "mas-qhm-qup2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_qnm_cnoc: mas-qnm-cnoc {
+			cell-id = <MSM_BUS_MASTER_CNOC_A2NOC>;
+			label = "mas-qnm-cnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <0>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_qxm_crypto: mas-qxm-crypto {
+			cell-id = <MSM_BUS_MASTER_CRYPTO_CORE_0>;
+			label = "mas-qxm-crypto";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <1>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,bcms = <&bcm_ce0>;
+		};
+
+		mas_qxm_ipa: mas-qxm-ipa {
+			cell-id = <MSM_BUS_MASTER_IPA>;
+			label = "mas-qxm-ipa";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <2>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,bcms = <&bcm_ip0>;
+		};
+
+		mas_xm_pcie3_1: mas-xm-pcie3-1 {
+			cell-id = <MSM_BUS_MASTER_PCIE_1>;
+			label = "mas-xm-pcie3-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <6>;
+			qcom,connections = <&slv_qns_pcie_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_xm_pcie_0: mas-xm-pcie-0 {
+			cell-id = <MSM_BUS_MASTER_PCIE>;
+			label = "mas-xm-pcie-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <5>;
+			qcom,connections = <&slv_qns_pcie_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_xm_qdss_etr: mas-xm-qdss-etr {
+			cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
+			label = "mas-xm-qdss-etr";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_xm_usb3_0: mas-xm-usb3-0 {
+			cell-id = <MSM_BUS_MASTER_USB3>;
+			label = "mas-xm-usb3-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <10>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_xm_usb3_1: mas-xm-usb3-1 {
+			cell-id = <MSM_BUS_MASTER_USB3_1>;
+			label = "mas-xm-usb3-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <11>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_qhm_spdm: mas-qhm-spdm {
+			cell-id = <MSM_BUS_MASTER_SPDM>;
+			label = "mas-qhm-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_cnoc_a2noc>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_qhm_tic: mas-qhm-tic {
+			cell-id = <MSM_BUS_MASTER_TIC>;
+			label = "mas-qhm-tic";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_tlmm_south
+				&slv_qhs_spss_cfg &slv_qhs_camera_cfg
+				 &slv_qhs_sdc4 &slv_qhs_sdc2
+				 &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg
+				 &slv_qhs_snoc_cfg &slv_qhs_glm
+				 &slv_qhs_pdm &slv_qhs_a2_noc_cfg
+				 &slv_qhs_qdss_cfg &slv_qhs_display_cfg
+				 &slv_qhs_tcsr &slv_qhs_dcc_cfg
+				 &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
+				 &slv_qhs_phy_refgen_south
+				 &slv_qhs_pcie_gen3_cfg
+				 &slv_qhs_pcie0_cfg &slv_qhs_gpuss_cfg
+				 &slv_qhs_venus_cfg &slv_qhs_tsif
+				 &slv_qhs_compute_dsp_cfg &slv_qhs_aop
+				 &slv_qhs_qupv3_north &slv_qhs_usb3_0
+				 &slv_srvc_cnoc &slv_qhs_ufs_card_cfg
+				 &slv_qhs_usb3_1 &slv_qhs_ipa
+				 &slv_qhs_cpr_cx &slv_qhs_a1_noc_cfg
+				 &slv_qhs_aoss &slv_qhs_prng
+				 &slv_qhs_vsense_ctrl_cfg &slv_qhs_qupv3_south
+				 &slv_qhs_spdm &slv_qhs_crypto0_cfg
+				 &slv_qhs_pimem_cfg &slv_qhs_tlmm_north
+				 &slv_qhs_clk_ctl &slv_qhs_imem_cfg>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_qnm_snoc: mas-qnm-snoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
+			label = "mas-qnm-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_tlmm_south
+				&slv_qhs_spss_cfg &slv_qhs_camera_cfg
+				 &slv_qhs_sdc4 &slv_qhs_sdc2
+				 &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg
+				 &slv_qhs_snoc_cfg &slv_qhs_glm
+				 &slv_qhs_pdm &slv_qhs_a2_noc_cfg
+				 &slv_qhs_qdss_cfg &slv_qhs_display_cfg
+				 &slv_qhs_tcsr &slv_qhs_dcc_cfg
+				 &slv_qhs_ddrss_cfg &slv_qhs_phy_refgen_south
+				 &slv_qhs_pcie_gen3_cfg &slv_qhs_pcie0_cfg
+				 &slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
+				 &slv_qhs_tsif &slv_qhs_compute_dsp_cfg
+				 &slv_qhs_aop &slv_qhs_qupv3_north
+				 &slv_qhs_usb3_0 &slv_srvc_cnoc
+				 &slv_qhs_ufs_card_cfg &slv_qhs_usb3_1
+				 &slv_qhs_ipa &slv_qhs_cpr_cx
+				 &slv_qhs_a1_noc_cfg &slv_qhs_aoss
+				 &slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
+				 &slv_qhs_qupv3_south &slv_qhs_spdm
+				 &slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg
+				 &slv_qhs_tlmm_north &slv_qhs_clk_ctl
+				 &slv_qhs_imem_cfg>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_xm_qdss_dap: mas-xm-qdss-dap {
+			cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
+			label = "mas-xm-qdss-dap";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_tlmm_south
+				 &slv_qhs_spss_cfg &slv_qhs_camera_cfg
+				 &slv_qhs_sdc4 &slv_qhs_sdc2
+				 &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg
+				 &slv_qhs_snoc_cfg &slv_qhs_glm
+				 &slv_qhs_pdm &slv_qhs_a2_noc_cfg
+				 &slv_qhs_qdss_cfg &slv_qhs_display_cfg
+				 &slv_qhs_tcsr &slv_qhs_dcc_cfg
+				 &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
+				 &slv_qhs_phy_refgen_south
+				 &slv_qhs_pcie_gen3_cfg
+				 &slv_qhs_pcie0_cfg &slv_qhs_gpuss_cfg
+				 &slv_qhs_venus_cfg &slv_qhs_tsif
+				 &slv_qhs_compute_dsp_cfg &slv_qhs_aop
+				 &slv_qhs_qupv3_north &slv_qhs_usb3_0
+				 &slv_srvc_cnoc &slv_qhs_ufs_card_cfg
+				 &slv_qhs_usb3_1 &slv_qhs_ipa
+				 &slv_qhs_cpr_cx &slv_qhs_a1_noc_cfg
+				 &slv_qhs_aoss &slv_qhs_prng
+				 &slv_qhs_vsense_ctrl_cfg &slv_qhs_qupv3_south
+				 &slv_qhs_spdm &slv_qhs_crypto0_cfg
+				 &slv_qhs_pimem_cfg &slv_qhs_tlmm_north
+				 &slv_qhs_clk_ctl &slv_qhs_imem_cfg>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_qhm_cnoc: mas-qhm-cnoc {
+			cell-id = <MSM_BUS_MASTER_CNOC_DC_NOC>;
+			label = "mas-qhm-cnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_memnoc &slv_qhs_llcc>;
+			qcom,bus-dev = <&fab_dc_noc>;
+		};
+
+		mas_acm_l3: mas-acm-l3 {
+			cell-id = <MSM_BUS_MASTER_AMPSS_M0>;
+			label = "mas-acm-l3";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_gnoc
+				&slv_qns_gladiator_sodv &slv_qns_gnoc_memnoc>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+		};
+
+		mas_pm_gnoc_cfg: mas-pm-gnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_GNOC_CFG>;
+			label = "mas-pm-gnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_gnoc>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+		};
+
+		mas_llcc_mc: mas-llcc-mc {
+			cell-id = <MSM_BUS_MASTER_LLCC>;
+			label = "mas-llcc-mc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <4>;
+			qcom,connections = <&slv_ebi>;
+			qcom,bus-dev = <&fab_mc_virt>;
+		};
+
+		mas_acm_tcu: mas-acm-tcu {
+			cell-id = <MSM_BUS_MASTER_TCU_0>;
+			label = "mas-acm-tcu";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <0>;
+			qcom,connections = <&slv_qns_apps_io &slv_qns_llcc
+				&slv_qns_memnoc_snoc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,bcms = <&bcm_sh3>;
+		};
+
+		mas_qhm_memnoc_cfg: mas-qhm-memnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_MEM_NOC_CFG>;
+			label = "mas-qhm-memnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_memnoc
+				&slv_qhs_mdsp_ms_mpu_cfg>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		mas_qnm_apps: mas-qnm-apps {
+			cell-id = <MSM_BUS_MASTER_GNOC_MEM_NOC>;
+			label = "mas-qnm-apps";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <2 3>;
+			qcom,connections = <&slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,bcms = <&bcm_sh5>;
+		};
+
+		mas_qnm_mnoc_hf: mas-qnm-mnoc-hf {
+			cell-id = <MSM_BUS_MASTER_MNOC_HF_MEM_NOC>;
+			label = "mas-qnm-mnoc-hf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <4 5>;
+			qcom,connections = <&slv_qns_apps_io &slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		mas_qnm_mnoc_sf: mas-qnm-mnoc-sf {
+			cell-id = <MSM_BUS_MASTER_MNOC_SF_MEM_NOC>;
+			label = "mas-qnm-mnoc-sf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,connections = <&slv_qns_apps_io
+				 &slv_qns_llcc &slv_qns_memnoc_snoc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		mas_qnm_snoc_gc: mas-qnm-snoc-gc {
+			cell-id = <MSM_BUS_MASTER_SNOC_GC_MEM_NOC>;
+			label = "mas-qnm-snoc-gc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <8>;
+			qcom,connections = <&slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		mas_qnm_snoc_sf: mas-qnm-snoc-sf {
+			cell-id = <MSM_BUS_MASTER_SNOC_SF_MEM_NOC>;
+			label = "mas-qnm-snoc-sf";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <9>;
+			qcom,connections = <&slv_qns_apps_io &slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		mas_qxm_gpu: mas-qxm-gpu {
+			cell-id = <MSM_BUS_MASTER_GRAPHICS_3D>;
+			label = "mas-qxm-gpu";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <10 11>;
+			qcom,connections = <&slv_qns_apps_io
+				 &slv_qns_llcc &slv_qns_memnoc_snoc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,bcms = <&bcm_sh4>;
+		};
+
+		mas_qhm_mnoc_cfg: mas-qhm-mnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_CNOC_MNOC_CFG>;
+			label = "mas-qhm-mnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_mnoc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm5>;
+		};
+
+		mas_qxm_camnoc_hf: mas-qxm-camnoc-hf {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF>;
+			label = "mas-qxm-camnoc-hf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <1 2>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qxm_camnoc_sf: mas-qxm-camnoc-sf {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_SF>;
+			label = "mas-qxm-camnoc-sf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <0>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+		};
+
+		mas_qxm_mdp0: mas-qxm-mdp0 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT0>;
+			label = "mas-qxm-mdp0";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <3>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qxm_mdp1: mas-qxm-mdp1 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT1>;
+			label = "mas-qxm-mdp1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qxm_rot: mas-qxm-rot {
+			cell-id = <MSM_BUS_MASTER_ROTATOR>;
+			label = "mas-qxm-rot";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <5>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+		};
+
+		mas_qxm_venus0: mas-qxm-venus0 {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P0>;
+			label = "mas-qxm-venus0";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <6>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+		};
+
+		mas_qxm_venus1: mas-qxm-venus1 {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P1>;
+			label = "mas-qxm-venus1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+		};
+
+		mas_qxm_venus_arm9: mas-qxm-venus-arm9 {
+			cell-id = <MSM_BUS_MASTER_VIDEO_PROC>;
+			label = "mas-qxm-venus-arm9";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <8>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+		};
+
+		mas_qhm_snoc_cfg: mas-qhm-snoc-cfg {
+			cell-id = <MSM_BUS_MASTER_SNOC_CFG>;
+			label = "mas-qhm-snoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_snoc>;
+			qcom,bus-dev = <&fab_system_noc>;
+		};
+
+		mas_qnm_aggre1_noc: mas-qnm-aggre1-noc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_MAS>;
+			label = "mas-qnm-aggre1-noc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_pimem
+				 &slv_qns_memnoc_sf &slv_qxs_imem
+				 &slv_qhs_apss &slv_qns_cnoc
+				 &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn9>;
+		};
+
+		mas_qnm_aggre2_noc: mas-qnm-aggre2-noc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_MAS>;
+			label = "mas-qnm-aggre2-noc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_pimem
+				 &slv_qns_memnoc_sf &slv_qxs_pcie_gen3
+				 &slv_qxs_imem &slv_qhs_apss
+				 &slv_qns_cnoc &slv_qxs_pcie
+				 &slv_xs_sys_tcu_cfg &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn11>;
+		};
+
+		mas_qnm_gladiator_sodv: mas-qnm-gladiator-sodv {
+			cell-id = <MSM_BUS_MASTER_GNOC_SNOC>;
+			label = "mas-qnm-gladiator-sodv";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_pimem
+				 &slv_qxs_pcie_gen3 &slv_qxs_imem
+				 &slv_qhs_apss &slv_qns_cnoc
+				 &slv_qxs_pcie &slv_xs_sys_tcu_cfg
+				 &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn12>;
+		};
+
+		mas_qnm_memnoc: mas-qnm-memnoc {
+			cell-id = <MSM_BUS_MASTER_MEM_NOC_SNOC>;
+			label = "mas-qnm-memnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_imem
+				 &slv_qhs_apss &slv_qxs_pimem
+				 &slv_qns_cnoc &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn15>;
+		};
+
+		mas_qnm_pcie_anoc: mas-qnm-pcie-anoc {
+			cell-id = <MSM_BUS_MASTER_ANOC_PCIE_SNOC>;
+			label = "mas-qnm-pcie-anoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_imem
+				 &slv_qhs_apss &slv_qns_cnoc
+				 &slv_qns_memnoc_sf &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn14>;
+		};
+
+		mas_qxm_pimem: mas-qxm-pimem {
+			cell-id = <MSM_BUS_MASTER_PIMEM>;
+			label = "mas-qxm-pimem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <3>;
+			qcom,connections = <&slv_qxs_imem &slv_qns_memnoc_gc>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn4>;
+		};
+
+		/*Internal nodes*/
+
+		/*Slaves*/
+
+		slv_qns_a1noc_snoc:slv-qns-a1noc-snoc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_SLV>;
+			label = "slv-qns-a1noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,connections = <&mas_qnm_aggre1_noc>;
+		};
+
+		slv_srvc_aggre1_noc:slv-srvc-aggre1-noc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_A1NOC>;
+			label = "slv-srvc-aggre1-noc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,bcms = <&bcm_sn9>;
+		};
+
+		slv_qns_a2noc_snoc:slv-qns-a2noc-snoc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_SLV>;
+			label = "slv-qns-a2noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,connections = <&mas_qnm_aggre2_noc>;
+		};
+
+		slv_qns_pcie_snoc:slv-qns-pcie-snoc {
+			cell-id = <MSM_BUS_SLAVE_ANOC_PCIE_SNOC>;
+			label = "slv-qns-pcie-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,connections = <&mas_qnm_pcie_anoc>;
+		};
+
+		slv_srvc_aggre2_noc:slv-srvc-aggre2-noc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_A2NOC>;
+			label = "slv-srvc-aggre2-noc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,bcms = <&bcm_sn11>;
+		};
+
+		slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
+			label = "slv-qhs-a1-noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_a1noc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_a2_noc_cfg:slv-qhs-a2-noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A2NOC_CFG>;
+			label = "slv-qhs-a2-noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_a2noc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_aop:slv-qhs-aop {
+			cell-id = <MSM_BUS_SLAVE_AOP>;
+			label = "slv-qhs-aop";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_aoss:slv-qhs-aoss {
+			cell-id = <MSM_BUS_SLAVE_AOSS>;
+			label = "slv-qhs-aoss";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_camera_cfg:slv-qhs-camera-cfg {
+			cell-id = <MSM_BUS_SLAVE_CAMERA_CFG>;
+			label = "slv-qhs-camera-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_clk_ctl:slv-qhs-clk-ctl {
+			cell-id = <MSM_BUS_SLAVE_CLK_CTL>;
+			label = "slv-qhs-clk-ctl";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_compute_dsp_cfg:slv-qhs-compute-dsp-cfg {
+			cell-id = <MSM_BUS_SLAVE_CDSP_CFG>;
+			label = "slv-qhs-compute-dsp-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_cpr_cx:slv-qhs-cpr-cx {
+			cell-id = <MSM_BUS_SLAVE_RBCPR_CX_CFG>;
+			label = "slv-qhs-cpr-cx";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_crypto0_cfg:slv-qhs-crypto0-cfg {
+			cell-id = <MSM_BUS_SLAVE_CRYPTO_0_CFG>;
+			label = "slv-qhs-crypto0-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_dcc_cfg:slv-qhs-dcc-cfg {
+			cell-id = <MSM_BUS_SLAVE_DCC_CFG>;
+			label = "slv-qhs-dcc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_cnoc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_ddrss_cfg:slv-qhs-ddrss-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_DDRSS>;
+			label = "slv-qhs-ddrss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_display_cfg:slv-qhs-display-cfg {
+			cell-id = <MSM_BUS_SLAVE_DISPLAY_CFG>;
+			label = "slv-qhs-display-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_glm:slv-qhs-glm {
+			cell-id = <MSM_BUS_SLAVE_GLM>;
+			label = "slv-qhs-glm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_gpuss_cfg:slv-qhs-gpuss-cfg {
+			cell-id = <MSM_BUS_SLAVE_GRAPHICS_3D_CFG>;
+			label = "slv-qhs-gpuss-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_imem_cfg:slv-qhs-imem-cfg {
+			cell-id = <MSM_BUS_SLAVE_IMEM_CFG>;
+			label = "slv-qhs-imem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_ipa:slv-qhs-ipa {
+			cell-id = <MSM_BUS_SLAVE_IPA_CFG>;
+			label = "slv-qhs-ipa";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_mnoc_cfg:slv-qhs-mnoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MNOC_CFG>;
+			label = "slv-qhs-mnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_mnoc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_pcie0_cfg:slv-qhs-pcie0-cfg {
+			cell-id = <MSM_BUS_SLAVE_PCIE_0_CFG>;
+			label = "slv-qhs-pcie0-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_pcie_gen3_cfg:slv-qhs-pcie-gen3-cfg {
+			cell-id = <MSM_BUS_SLAVE_PCIE_1_CFG>;
+			label = "slv-qhs-pcie-gen3-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_pdm:slv-qhs-pdm {
+			cell-id = <MSM_BUS_SLAVE_PDM>;
+			label = "slv-qhs-pdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_phy_refgen_south:slv-qhs-phy-refgen-south {
+			cell-id = <MSM_BUS_SLAVE_SOUTH_PHY_CFG>;
+			label = "slv-qhs-phy-refgen-south";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_pimem_cfg:slv-qhs-pimem-cfg {
+			cell-id = <MSM_BUS_SLAVE_PIMEM_CFG>;
+			label = "slv-qhs-pimem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_prng:slv-qhs-prng {
+			cell-id = <MSM_BUS_SLAVE_PRNG>;
+			label = "slv-qhs-prng";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_qdss_cfg:slv-qhs-qdss-cfg {
+			cell-id = <MSM_BUS_SLAVE_QDSS_CFG>;
+			label = "slv-qhs-qdss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_qupv3_north:slv-qhs-qupv3-north {
+			cell-id = <MSM_BUS_SLAVE_BLSP_2>;
+			label = "slv-qhs-qupv3-north";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_qupv3_south:slv-qhs-qupv3-south {
+			cell-id = <MSM_BUS_SLAVE_BLSP_1>;
+			label = "slv-qhs-qupv3-south";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_sdc2:slv-qhs-sdc2 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_2>;
+			label = "slv-qhs-sdc2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_sdc4:slv-qhs-sdc4 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_4>;
+			label = "slv-qhs-sdc4";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_snoc_cfg:slv-qhs-snoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_SNOC_CFG>;
+			label = "slv-qhs-snoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_snoc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_spdm:slv-qhs-spdm {
+			cell-id = <MSM_BUS_SLAVE_SPDM_WRAPPER>;
+			label = "slv-qhs-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_spss_cfg:slv-qhs-spss-cfg {
+			cell-id = <MSM_BUS_SLAVE_SPSS_CFG>;
+			label = "slv-qhs-spss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tcsr:slv-qhs-tcsr {
+			cell-id = <MSM_BUS_SLAVE_TCSR>;
+			label = "slv-qhs-tcsr";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tlmm_north:slv-qhs-tlmm-north {
+			cell-id = <MSM_BUS_SLAVE_TLMM_NORTH>;
+			label = "slv-qhs-tlmm-north";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tlmm_south:slv-qhs-tlmm-south {
+			cell-id = <MSM_BUS_SLAVE_TLMM_SOUTH>;
+			label = "slv-qhs-tlmm-south";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tsif:slv-qhs-tsif {
+			cell-id = <MSM_BUS_SLAVE_TSIF>;
+			label = "slv-qhs-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_ufs_card_cfg:slv-qhs-ufs-card-cfg {
+			cell-id = <MSM_BUS_SLAVE_UFS_CARD_CFG>;
+			label = "slv-qhs-ufs-card-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_ufs_mem_cfg:slv-qhs-ufs-mem-cfg {
+			cell-id = <MSM_BUS_SLAVE_UFS_MEM_CFG>;
+			label = "slv-qhs-ufs-mem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_usb3_0:slv-qhs-usb3-0 {
+			cell-id = <MSM_BUS_SLAVE_USB3>;
+			label = "slv-qhs-usb3-0";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_usb3_1:slv-qhs-usb3-1 {
+			cell-id = <MSM_BUS_SLAVE_USB3_1>;
+			label = "slv-qhs-usb3-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_venus_cfg:slv-qhs-venus-cfg {
+			cell-id = <MSM_BUS_SLAVE_VENUS_CFG>;
+			label = "slv-qhs-venus-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_vsense_ctrl_cfg:slv-qhs-vsense-ctrl-cfg {
+			cell-id = <MSM_BUS_SLAVE_VSENSE_CTRL_CFG>;
+			label = "slv-qhs-vsense-ctrl-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qns_cnoc_a2noc:slv-qns-cnoc-a2noc {
+			cell-id = <MSM_BUS_SLAVE_CNOC_A2NOC>;
+			label = "slv-qns-cnoc-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qnm_cnoc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_srvc_cnoc:slv-srvc-cnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_CNOC>;
+			label = "slv-srvc-cnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_llcc:slv-qhs-llcc {
+			cell-id = <MSM_BUS_SLAVE_LLCC_CFG>;
+			label = "slv-qhs-llcc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_dc_noc>;
+		};
+
+		slv_qhs_memnoc:slv-qhs-memnoc {
+			cell-id = <MSM_BUS_SLAVE_MEM_NOC_CFG>;
+			label = "slv-qhs-memnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_dc_noc>;
+			qcom,connections = <&mas_qhm_memnoc_cfg>;
+		};
+
+		slv_qns_gladiator_sodv:slv-qns-gladiator-sodv {
+			cell-id = <MSM_BUS_SLAVE_GNOC_SNOC>;
+			label = "slv-qns-gladiator-sodv";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+			qcom,connections = <&mas_qnm_gladiator_sodv>;
+		};
+
+		slv_qns_gnoc_memnoc:slv-qns-gnoc-memnoc {
+			cell-id = <MSM_BUS_SLAVE_GNOC_MEM_NOC>;
+			label = "slv-qns-gnoc-memnoc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+			qcom,connections = <&mas_qnm_apps>;
+		};
+
+		slv_srvc_gnoc:slv-srvc-gnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_GNOC>;
+			label = "slv-srvc-gnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+		};
+
+		slv_ebi:slv-ebi {
+			cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
+			label = "slv-ebi";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <4>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_mc0>;
+		};
+
+		slv_qhs_mdsp_ms_mpu_cfg:slv-qhs-mdsp-ms-mpu-cfg {
+			cell-id = <MSM_BUS_SLAVE_MSS_PROC_MS_MPU_CFG>;
+			label = "slv-qhs-mdsp-ms-mpu-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		slv_qns_apps_io:slv-qns-apps-io {
+			cell-id = <MSM_BUS_SLAVE_MEM_NOC_GNOC>;
+			label = "slv-qns-apps-io";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,bcms = <&bcm_sh1>;
+		};
+
+		slv_qns_llcc:slv-qns-llcc {
+			cell-id = <MSM_BUS_SLAVE_LLCC>;
+			label = "slv-qns-llcc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <4>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,connections = <&mas_llcc_mc>;
+			qcom,bcms = <&bcm_sh0>;
+		};
+
+		slv_qns_memnoc_snoc:slv-qns-memnoc-snoc {
+			cell-id = <MSM_BUS_SLAVE_MEM_NOC_SNOC>;
+			label = "slv-qns-memnoc-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,connections = <&mas_qnm_memnoc>;
+			qcom,bcms = <&bcm_sh2>;
+		};
+
+		slv_srvc_memnoc:slv-srvc-memnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_MEM_NOC>;
+			label = "slv-srvc-memnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		slv_qns2_mem_noc:slv-qns2-mem-noc {
+			cell-id = <MSM_BUS_SLAVE_MNOC_SF_MEM_NOC>;
+			label = "slv-qns2-mem-noc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,connections = <&mas_qnm_mnoc_sf>;
+			qcom,bcms = <&bcm_mm2>;
+		};
+
+		slv_qns_mem_noc_hf:slv-qns-mem-noc-hf {
+			cell-id = <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>;
+			label = "slv-qns-mem-noc-hf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,connections = <&mas_qnm_mnoc_hf>;
+			qcom,bcms = <&bcm_mm0>;
+		};
+
+		slv_srvc_mnoc:slv-srvc-mnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_MNOC>;
+			label = "slv-srvc-mnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+		};
+
+		slv_qhs_apss:slv-qhs-apss {
+			cell-id = <MSM_BUS_SLAVE_APPSS>;
+			label = "slv-qhs-apss";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn6>;
+		};
+
+		slv_qns_cnoc:slv-qns-cnoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_SLV>;
+			label = "slv-qns-cnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,connections = <&mas_qnm_snoc>;
+			qcom,bcms = <&bcm_sn3>;
+		};
+
+		slv_qns_memnoc_gc:slv-qns-memnoc-gc {
+			cell-id = <MSM_BUS_SLAVE_SNOC_MEM_NOC_GC>;
+			label = "slv-qns-memnoc-gc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,connections = <&mas_qnm_snoc_gc>;
+			qcom,bcms = <&bcm_sn2>;
+		};
+
+		slv_qns_memnoc_sf:slv-qns-memnoc-sf {
+			cell-id = <MSM_BUS_SLAVE_SNOC_MEM_NOC_SF>;
+			label = "slv-qns-memnoc-sf";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,connections = <&mas_qnm_snoc_sf>;
+			qcom,bcms = <&bcm_sn0>;
+		};
+
+		slv_qxs_imem:slv-qxs-imem {
+			cell-id = <MSM_BUS_SLAVE_OCIMEM>;
+			label = "slv-qxs-imem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn1>;
+		};
+
+		slv_qxs_pcie:slv-qxs-pcie {
+			cell-id = <MSM_BUS_SLAVE_PCIE_0>;
+			label = "slv-qxs-pcie";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn7>;
+		};
+
+		slv_qxs_pcie_gen3:slv-qxs-pcie-gen3 {
+			cell-id = <MSM_BUS_SLAVE_PCIE_1>;
+			label = "slv-qxs-pcie-gen3";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn8>;
+		};
+
+		slv_qxs_pimem:slv-qxs-pimem {
+			cell-id = <MSM_BUS_SLAVE_PIMEM>;
+			label = "slv-qxs-pimem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+		};
+
+		slv_srvc_snoc:slv-srvc-snoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_SNOC>;
+			label = "slv-srvc-snoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn6>;
+		};
+
+		slv_xs_qdss_stm:slv-xs-qdss-stm {
+			cell-id = <MSM_BUS_SLAVE_QDSS_STM>;
+			label = "slv-xs-qdss-stm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn5>;
+		};
+
+		slv_xs_sys_tcu_cfg:slv-xs-sys-tcu-cfg {
+			cell-id = <MSM_BUS_SLAVE_TCU>;
+			label = "slv-xs-sys-tcu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn6>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-camera.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
copy to arch/arm64/boot/dts/qcom/sdm845-cdp.dts
index f8f916e..22e3aea 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-cdp.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-cdp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 CDP";
-	compatible = "qcom,sdmbat-cdp", "qcom,sdmbat", "qcom,cdp";
+	model = "Qualcomm Technologies, Inc. MSM sdm845 v1 CDP";
+	compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
 	qcom,board-id = <1 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
similarity index 93%
rename from arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 930c8de..00bd301 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-pinctrl.dtsi"
+#include "sdm845-pinctrl.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-coresight.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-ion.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
copy to arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index fb8e85a..f7af60c 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-mtp.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-mtp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 MTP";
-	compatible = "qcom,sdmbat-mtp", "qcom,sdmbat", "qcom,mtp";
+	model = "Qualcomm Technologies, Inc. MSM sdm845 v1 MTP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
 	qcom,board-id = <8 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
similarity index 93%
copy from arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 930c8de..00bd301 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-pinctrl.dtsi"
+#include "sdm845-pinctrl.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
similarity index 94%
rename from arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 84010bd..a69525c 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -12,7 +12,7 @@
 
 &soc {
 	tlmm: pinctrl@03400000 {
-		compatible = "qcom,msmskunk-pinctrl";
+		compatible = "qcom,sdm845-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-regulator.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/msmskunk-rumi.dts
rename to arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index f22e5fd..7a4ac64 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -14,12 +14,12 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "msmskunk.dtsi"
-#include "msmskunk-rumi.dtsi"
-#include "msmskunk-usb.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-rumi.dtsi"
+#include "sdm845-usb.dtsi"
 / {
-	model = "Qualcomm Technologies, Inc. MSM SKUNK RUMI";
-	compatible = "qcom,msmskunk-rumi", "qcom,msmskunk", "qcom,rumi";
+	model = "Qualcomm Technologies, Inc. SDM845 RUMI";
+	compatible = "qcom,sdm845-rumi", "qcom,sdm845", "qcom,rumi";
 	qcom,board-id = <15 0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
similarity index 96%
rename from arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 1f235d3..3d70a17 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -11,7 +11,7 @@
  */
 
 &ufsphy_mem {
-	compatible = "qcom,ufs-phy-qrbtc-msmskunk";
+	compatible = "qcom,ufs-phy-qrbtc-sdm845";
 
 	vdda-phy-supply = <&pm8998_l1>;
 	vdda-pll-supply = <&pm8998_l2>;
@@ -64,7 +64,7 @@
 };
 
 &ufsphy_card {
-	compatible = "qcom,ufs-phy-qrbtc-msmskunk";
+	compatible = "qcom,ufs-phy-qrbtc-sdm845";
 
 	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
 	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-sde-display.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
similarity index 96%
rename from arch/arm64/boot/dts/qcom/msmskunk-sde.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index a6b7f8e..980ef8c 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -55,6 +55,8 @@
 					1 5 9 13>;
 		qcom,sde-sspp-excl-rect = <1 1 1 1
 						1 1 1 1>;
+		qcom,sde-sspp-smart-dma-priority = <5 6 7 8 1 2 3 4>;
+		qcom,sde-smart-dma-rev = "smart_dma_v2";
 
 		qcom,sde-mixer-pair-mask = <2 1 6 0 0 3>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts b/arch/arm64/boot/dts/qcom/sdm845-sim.dts
similarity index 78%
copy from arch/arm64/boot/dts/qcom/sdmbat-sim.dts
copy to arch/arm64/boot/dts/qcom/sdm845-sim.dts
index 216b3d0..2fa77bb 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-sim.dts
@@ -14,12 +14,11 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-sim.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-sim.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT SIM";
-	compatible = "qcom,sdmbat-sim", "qcom,sdmbat", "qcom,sim";
+	model = "Qualcomm Technologies, Inc. SDM845 SIM";
+	compatible = "qcom,sdm845-sim", "qcom,sdm845", "qcom,sim";
 	qcom,board-id = <16 0>;
 };
-
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-sim.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-smp2p.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
similarity index 98%
rename from arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index dd6d4d4..442fcff 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -11,7 +11,7 @@
  * GNU General Public License for more details.
  */
 
-#include <dt-bindings/clock/qcom,gcc-skunk.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
 &soc {
 	usb3: ssusb@a600000 {
 		compatible = "qcom,dwc-usb3-msm";
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
similarity index 96%
rename from arch/arm64/boot/dts/qcom/msmskunk.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845.dtsi
index 794e379..27d703f 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -11,19 +11,19 @@
  */
 
 #include "skeleton64.dtsi"
-#include <dt-bindings/clock/qcom,gcc-skunk.h>
-#include <dt-bindings/clock/qcom,camcc-skunk.h>
-#include <dt-bindings/clock/qcom,dispcc-skunk.h>
-#include <dt-bindings/clock/qcom,gpucc-skunk.h>
-#include <dt-bindings/clock/qcom,videocc-skunk.h>
-#include <dt-bindings/clock/qcom,cpucc-skunk.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+#include <dt-bindings/clock/qcom,videocc-sdm845.h>
+#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/soc/qcom,tcs-mbox.h>
 
 / {
-	model = "Qualcomm Technologies, Inc. MSM SKUNK";
-	compatible = "qcom,msmskunk";
+	model = "Qualcomm Technologies, Inc. SDM845";
+	compatible = "qcom,sdm845";
 	qcom,msm-id = <321 0x0>;
 	interrupt-parent = <&intc>;
 
@@ -376,9 +376,9 @@
 	};
 };
 
-#include "msm-gdsc-skunk.dtsi"
-#include "msmskunk-sde.dtsi"
-#include "msmskunk-sde-display.dtsi"
+#include "msm-gdsc-sdm845.dtsi"
+#include "sdm845-sde.dtsi"
+#include "sdm845-sde-display.dtsi"
 
 &soc {
 	#address-cells = <1>;
@@ -466,7 +466,7 @@
 	};
 
 	clock_gcc: qcom,gcc@100000 {
-		compatible = "qcom,gcc-msmskunk";
+		compatible = "qcom,gcc-sdm845";
 		reg = <0x100000 0x1f0000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
@@ -476,7 +476,7 @@
 	};
 
 	clock_videocc: qcom,videocc@ab00000 {
-		compatible = "qcom,video_cc-msmskunk";
+		compatible = "qcom,video_cc-sdm845";
 		reg = <0xab00000 0x10000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
@@ -795,6 +795,17 @@
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_3_out 0 0>;
 	};
 
+	slim_aud: slim@171c0000 {
+		cell-index = <1>;
+		compatible = "qcom,slim-ngd";
+		reg = <0x171c0000 0x2c000>,
+			<0x17184000 0x2a000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 163 0>, <0 164 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		qcom,ea-pc = <0x270>;
+	};
+
 	eud: qcom,msm-eud@88e0000 {
 		compatible = "qcom,msm-eud";
 		interrupt-names = "eud_irq";
@@ -1064,8 +1075,8 @@
 		reg = <0x1300000 0x50000>;
 		reg-names = "llcc_base";
 
-		llcc: qcom,msmskunk-llcc {
-			compatible = "qcom,msmskunk-llcc";
+		llcc: qcom,sdm845-llcc {
+			compatible = "qcom,sdm845-llcc";
 			#cache-cells = <1>;
 			max-slices = <32>;
 			qcom,dump-size = <0x3c0000>;
@@ -1601,9 +1612,10 @@
 	status = "ok";
 };
 
-#include "msmskunk-regulator.dtsi"
-#include "msmskunk-coresight.dtsi"
-#include "msm-arm-smmu-skunk.dtsi"
-#include "msmskunk-ion.dtsi"
-#include "msmskunk-smp2p.dtsi"
-#include "msmskunk-camera.dtsi"
+#include "sdm845-regulator.dtsi"
+#include "sdm845-coresight.dtsi"
+#include "msm-arm-smmu-sdm845.dtsi"
+#include "sdm845-ion.dtsi"
+#include "sdm845-smp2p.dtsi"
+#include "sdm845-camera.dtsi"
+#include "sdm845-bus.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
deleted file mode 100644
index af7a194..0000000
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "msmskunk-mtp.dtsi"
-#include "sdmbat-pinctrl.dtsi"
diff --git a/arch/arm64/configs/msmskunk-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
similarity index 95%
rename from arch/arm64/configs/msmskunk-perf_defconfig
rename to arch/arm64/configs/sdm845-perf_defconfig
index f1d4395..e70996b 100644
--- a/arch/arm64/configs/msmskunk-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -41,7 +41,7 @@
 CONFIG_MODULE_SIG_SHA512=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSMSKUNK=y
+CONFIG_ARCH_SDM845=y
 CONFIG_PCI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
@@ -198,6 +198,7 @@
 CONFIG_RMNET_DATA=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
@@ -253,16 +254,18 @@
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=y
-CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_PINCTRL_MSMSKUNK=y
-CONFIG_PINCTRL_SDMBAT=y
+CONFIG_PINCTRL_SDM845=y
+CONFIG_PINCTRL_SDM830=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_RPMH=y
@@ -275,6 +278,9 @@
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_DRM=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -301,7 +307,10 @@
 CONFIG_USB_DWC3_MSM=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
 CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
 CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
@@ -337,8 +346,8 @@
 CONFIG_ION_MSM=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_MSM_GCC_SKUNK=y
-CONFIG_MSM_VIDEOCC_SKUNK=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
@@ -346,7 +355,7 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_LLCC=y
-CONFIG_QCOM_MSMSKUNK_LLCC=y
+CONFIG_QCOM_SDM845_LLCC=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -370,6 +379,7 @@
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_EXTCON=y
 CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
 CONFIG_PWM=y
 CONFIG_ARM_GIC_V3_ACL=y
 CONFIG_ANDROID=y
diff --git a/arch/arm64/configs/msmskunk_defconfig b/arch/arm64/configs/sdm845_defconfig
similarity index 96%
rename from arch/arm64/configs/msmskunk_defconfig
rename to arch/arm64/configs/sdm845_defconfig
index f3924ed..80a75a9 100644
--- a/arch/arm64/configs/msmskunk_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -42,7 +42,7 @@
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSMSKUNK=y
+CONFIG_ARCH_SDM845=y
 CONFIG_PCI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
@@ -204,6 +204,7 @@
 CONFIG_RMNET_DATA=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
@@ -260,16 +261,18 @@
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=y
-CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_PINCTRL_MSMSKUNK=y
-CONFIG_PINCTRL_SDMBAT=y
+CONFIG_PINCTRL_SDM845=y
+CONFIG_PINCTRL_SDM830=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_RPMH=y
@@ -282,6 +285,9 @@
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_DRM=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -307,7 +313,10 @@
 CONFIG_USB_DWC3_MSM=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
 CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
 CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
@@ -348,8 +357,8 @@
 CONFIG_ION_MSM=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_MSM_GCC_SKUNK=y
-CONFIG_MSM_VIDEOCC_SKUNK=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
@@ -357,7 +366,7 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_LLCC=y
-CONFIG_QCOM_MSMSKUNK_LLCC=y
+CONFIG_QCOM_SDM845_LLCC=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -381,6 +390,7 @@
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_EXTCON=y
 CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
 CONFIG_PWM=y
 CONFIG_ARM_GIC_V3_ACL=y
 CONFIG_PHY_XGENE=y
diff --git a/drivers/Makefile b/drivers/Makefile
index e7ebee4..1419893 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -80,6 +80,7 @@
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
 obj-$(CONFIG_SPMI)		+= spmi/
+obj-$(CONFIG_SOUNDWIRE)		+= soundwire/
 obj-$(CONFIG_SLIMBUS)		+= slimbus/
 obj-$(CONFIG_HSI)		+= hsi/
 obj-y				+= net/
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 3cc9bff..70ed8d9 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -354,4 +354,33 @@
 	  Say Y here to compile support for HCI over Qualcomm SMD into the
 	  kernel or say M to compile as a module.
 
+config MSM_BT_POWER
+	bool "MSM Bluetooth Power Control"
+	depends on ARCH_QCOM && RFKILL
+	help
+	  Provides a parameter to switch on/off power from PMIC
+	  to Bluetooth device.
+
+config BTFM_SLIM
+	bool "MSM Bluetooth/FM Slimbus Driver"
+	select SLIMBUS
+	default MSM_BT_POWER
+	help
+	  This enables BT/FM slimbus driver to get multiple audio channel.
+	  This will make use of slimbus platform driver and slimbus codec
+	  driver to communicate with slimbus machine driver and LPSS which
+	  is Slimbus master.
+
+	  Slimbus slave initialization and configuration will be done through
+	  this driver.
+
+config BTFM_SLIM_WCN3990
+	bool "MSM Bluetooth/FM WCN3990 Device"
+	default BTFM_SLIM
+	depends on BTFM_SLIM
+	help
+	  This enables specific driver handle for WCN3990 device.
+	  It is designed to adapt any future BT/FM device to implement a specific
+	  chip initialization process and control.
+
 endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b1fc29a..122b184 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -25,6 +25,12 @@
 obj-$(CONFIG_BT_RTL)		+= btrtl.o
 obj-$(CONFIG_BT_QCA)		+= btqca.o
 
+obj-$(CONFIG_MSM_BT_POWER)	+= bluetooth-power.o
+
+obj-$(CONFIG_BTFM_SLIM)		+= btfm_slim.o
+obj-$(CONFIG_BTFM_SLIM)		+= btfm_slim_codec.o
+obj-$(CONFIG_BTFM_SLIM_WCN3990)	+= btfm_slim_wcn3990.o
+
 btmrvl-y			:= btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)	+= btmrvl_debugfs.o
 
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
new file mode 100644
index 0000000..bfc3648
--- /dev/null
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -0,0 +1,776 @@
+/* Copyright (c) 2009-2010, 2013-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Bluetooth Power Switch Module
+ * controls power to external Bluetooth device
+ * with interface to power management device
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/bluetooth-power.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+
+#if defined(CONFIG_CNSS)
+#include <net/cnss.h>
+#endif
+
+#include "btfm_slim.h"
+#include <linux/fs.h>
+
+#define BT_PWR_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BT_PWR_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
+#define BT_PWR_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
+
+
+static const struct of_device_id bt_power_match_table[] = {
+	{	.compatible = "qca,ar3002" },
+	{	.compatible = "qca,qca6174" },
+	{	.compatible = "qca,wcn3990" },
+	{}
+};
+
+static struct bluetooth_power_platform_data *bt_power_pdata;
+static struct platform_device *btpdev;
+static bool previous;
+static int pwr_state;
+struct class *bt_class;
+static int bt_major;
+
+static int bt_vreg_init(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+	struct device *dev = &btpdev->dev;
+
+	BT_PWR_DBG("vreg_get for : %s", vreg->name);
+
+	/* Get the regulator handle */
+	vreg->reg = regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		rc = PTR_ERR(vreg->reg);
+		pr_err("%s: regulator_get(%s) failed. rc=%d\n",
+			__func__, vreg->name, rc);
+		goto out;
+	}
+
+	if ((regulator_count_voltages(vreg->reg) > 0)
+			&& (vreg->low_vol_level) && (vreg->high_vol_level))
+		vreg->set_voltage_sup = 1;
+
+out:
+	return rc;
+}
+
+static int bt_vreg_enable(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("vreg_en for : %s", vreg->name);
+
+	if (!vreg->is_enabled) {
+		if (vreg->set_voltage_sup) {
+			rc = regulator_set_voltage(vreg->reg,
+						vreg->low_vol_level,
+						vreg->high_vol_level);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+			}
+		}
+
+		if (vreg->load_uA >= 0) {
+			rc = regulator_set_load(vreg->reg,
+					vreg->load_uA);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_mode(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+			}
+		}
+
+		rc = regulator_enable(vreg->reg);
+		if (rc < 0) {
+			BT_PWR_ERR("regulator_enable(%s) failed. rc=%d\n",
+					vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = true;
+	}
+out:
+	return rc;
+}
+
+static int bt_vreg_disable(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	if (!vreg)
+		return rc;
+
+	BT_PWR_DBG("vreg_disable for : %s", vreg->name);
+
+	if (vreg->is_enabled) {
+		rc = regulator_disable(vreg->reg);
+		if (rc < 0) {
+			BT_PWR_ERR("regulator_disable(%s) failed. rc=%d\n",
+					vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = false;
+
+		if (vreg->set_voltage_sup) {
+			/* Set the min voltage to 0 */
+			rc = regulator_set_voltage(vreg->reg, 0,
+					vreg->high_vol_level);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+			}
+		}
+		if (vreg->load_uA >= 0) {
+			rc = regulator_set_load(vreg->reg, 0);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_mode(%s) failed rc=%d\n",
+						vreg->name, rc);
+			}
+		}
+	}
+out:
+	return rc;
+}
+
+static int bt_configure_vreg(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("config %s", vreg->name);
+
+	/* Get the regulator handle for vreg */
+	if (!(vreg->reg)) {
+		rc = bt_vreg_init(vreg);
+		if (rc < 0)
+			return rc;
+	}
+	rc = bt_vreg_enable(vreg);
+
+	return rc;
+}
+
+static int bt_clk_enable(struct bt_power_clk_data *clk)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("%s", clk->name);
+
+	/* Get the clock handle for vreg */
+	if (!clk->clk || clk->is_enabled) {
+		BT_PWR_ERR("error - node: %p, clk->is_enabled:%d",
+			clk->clk, clk->is_enabled);
+		return -EINVAL;
+	}
+
+	rc = clk_prepare_enable(clk->clk);
+	if (rc) {
+		BT_PWR_ERR("failed to enable %s, rc(%d)\n", clk->name, rc);
+		return rc;
+	}
+
+	clk->is_enabled = true;
+	return rc;
+}
+
+static int bt_clk_disable(struct bt_power_clk_data *clk)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("%s", clk->name);
+
+	/* Get the clock handle for vreg */
+	if (!clk->clk || !clk->is_enabled) {
+		BT_PWR_ERR("error - node: %p, clk->is_enabled:%d",
+			clk->clk, clk->is_enabled);
+		return -EINVAL;
+	}
+	clk_disable_unprepare(clk->clk);
+
+	clk->is_enabled = false;
+	return rc;
+}
+
+static int bt_configure_gpios(int on)
+{
+	int rc = 0;
+	int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst;
+
+	BT_PWR_DBG("bt_gpio= %d on: %d", bt_reset_gpio, on);
+
+	if (on) {
+		rc = gpio_request(bt_reset_gpio, "bt_sys_rst_n");
+		if (rc) {
+			BT_PWR_ERR("unable to request gpio %d (%d)\n",
+					bt_reset_gpio, rc);
+			return rc;
+		}
+
+		rc = gpio_direction_output(bt_reset_gpio, 0);
+		if (rc) {
+			BT_PWR_ERR("Unable to set direction\n");
+			return rc;
+		}
+		msleep(50);
+		rc = gpio_direction_output(bt_reset_gpio, 1);
+		if (rc) {
+			BT_PWR_ERR("Unable to set direction\n");
+			return rc;
+		}
+		msleep(50);
+	} else {
+		gpio_set_value(bt_reset_gpio, 0);
+		msleep(100);
+	}
+	return rc;
+}
+
+static int bluetooth_power(int on)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("on: %d", on);
+
+	if (on) {
+		if (bt_power_pdata->bt_vdd_io) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_io);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddio config failed");
+				goto out;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_xtal) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_xtal);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddxtal config failed");
+				goto vdd_xtal_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_core) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_core);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddcore config failed");
+				goto vdd_core_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_pa) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_pa);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddpa config failed");
+				goto vdd_pa_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_ldo) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_ldo);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddldo config failed");
+				goto vdd_ldo_fail;
+			}
+		}
+		if (bt_power_pdata->bt_chip_pwd) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_chip_pwd);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power chippwd config failed");
+				goto chip_pwd_fail;
+			}
+		}
+		/* Parse dt_info and check if a target requires clock voting.
+		 * Enable BT clock when BT is on and disable it when BT is off
+		 */
+		if (bt_power_pdata->bt_chip_clk) {
+			rc = bt_clk_enable(bt_power_pdata->bt_chip_clk);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power gpio config failed");
+				goto clk_fail;
+			}
+		}
+		if (bt_power_pdata->bt_gpio_sys_rst > 0) {
+			rc = bt_configure_gpios(on);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power gpio config failed");
+				goto gpio_fail;
+			}
+		}
+	} else {
+		if (bt_power_pdata->bt_gpio_sys_rst > 0)
+			bt_configure_gpios(on);
+gpio_fail:
+		if (bt_power_pdata->bt_gpio_sys_rst > 0)
+			gpio_free(bt_power_pdata->bt_gpio_sys_rst);
+		if (bt_power_pdata->bt_chip_clk)
+			bt_clk_disable(bt_power_pdata->bt_chip_clk);
+clk_fail:
+		if (bt_power_pdata->bt_chip_pwd)
+			bt_vreg_disable(bt_power_pdata->bt_chip_pwd);
+chip_pwd_fail:
+		if (bt_power_pdata->bt_vdd_ldo)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_ldo);
+vdd_ldo_fail:
+		if (bt_power_pdata->bt_vdd_pa)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_pa);
+vdd_pa_fail:
+		if (bt_power_pdata->bt_vdd_core)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_core);
+vdd_core_fail:
+		if (bt_power_pdata->bt_vdd_xtal)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_xtal);
+vdd_xtal_fail:
+		if (bt_power_pdata->bt_vdd_io)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_io);
+	}
+out:
+	return rc;
+}
+
+static int bluetooth_toggle_radio(void *data, bool blocked)
+{
+	int ret = 0;
+	int (*power_control)(int enable);
+
+	power_control =
+		((struct bluetooth_power_platform_data *)data)->bt_power_setup;
+
+	if (previous != blocked)
+		ret = (*power_control)(!blocked);
+	if (!ret)
+		previous = blocked;
+	return ret;
+}
+
+static const struct rfkill_ops bluetooth_power_rfkill_ops = {
+	.set_block = bluetooth_toggle_radio,
+};
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_CLD_LL_CORE)
+static ssize_t enable_extldo(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int ret;
+	bool enable = false;
+	struct cnss_platform_cap cap;
+
+	ret = cnss_get_platform_cap(&cap);
+	if (ret) {
+		BT_PWR_ERR("Platform capability info from CNSS not available!");
+		enable = false;
+	} else if (!ret && (cap.cap_flag & CNSS_HAS_EXTERNAL_SWREG)) {
+		enable = true;
+	}
+	return snprintf(buf, 6, "%s", (enable ? "true" : "false"));
+}
+#else
+static ssize_t enable_extldo(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, 6, "%s", "false");
+}
+#endif
+
+static DEVICE_ATTR(extldo, 0444, enable_extldo, NULL);
+
+static int bluetooth_power_rfkill_probe(struct platform_device *pdev)
+{
+	struct rfkill *rfkill;
+	int ret;
+
+	rfkill = rfkill_alloc("bt_power", &pdev->dev, RFKILL_TYPE_BLUETOOTH,
+			      &bluetooth_power_rfkill_ops,
+			      pdev->dev.platform_data);
+
+	if (!rfkill) {
+		dev_err(&pdev->dev, "rfkill allocate failed\n");
+		return -ENOMEM;
+	}
+
+	/* add file into rfkill0 to handle LDO27 */
+	ret = device_create_file(&pdev->dev, &dev_attr_extldo);
+	if (ret < 0)
+		BT_PWR_ERR("device create file error!");
+
+	/* force Bluetooth off during init to allow for user control */
+	rfkill_init_sw_state(rfkill, 1);
+	previous = 1;
+
+	ret = rfkill_register(rfkill);
+	if (ret) {
+		dev_err(&pdev->dev, "rfkill register failed=%d\n", ret);
+		rfkill_destroy(rfkill);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, rfkill);
+
+	return 0;
+}
+
+static void bluetooth_power_rfkill_remove(struct platform_device *pdev)
+{
+	struct rfkill *rfkill;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	rfkill = platform_get_drvdata(pdev);
+	if (rfkill)
+		rfkill_unregister(rfkill);
+	rfkill_destroy(rfkill);
+	platform_set_drvdata(pdev, NULL);
+}
+
+#define MAX_PROP_SIZE 32
+static int bt_dt_parse_vreg_info(struct device *dev,
+		struct bt_power_vreg_data **vreg_data, const char *vreg_name)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	struct bt_power_vreg_data *vreg;
+	struct device_node *np = dev->of_node;
+
+	BT_PWR_DBG("vreg dev tree parse for %s", vreg_name);
+
+	*vreg_data = NULL;
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+	if (of_parse_phandle(np, prop_name, 0)) {
+		vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+		if (!vreg) {
+			dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		vreg->name = vreg_name;
+
+		/* Parse voltage-level from each node */
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"%s-voltage-level", vreg_name);
+		prop = of_get_property(np, prop_name, &len);
+		if (!prop || (len != (2 * sizeof(__be32)))) {
+			dev_warn(dev, "%s %s property\n",
+				prop ? "invalid format" : "no", prop_name);
+		} else {
+			vreg->low_vol_level = be32_to_cpup(&prop[0]);
+			vreg->high_vol_level = be32_to_cpup(&prop[1]);
+		}
+
+		/* Parse current-level from each node */
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"%s-current-level", vreg_name);
+		ret = of_property_read_u32(np, prop_name, &vreg->load_uA);
+		if (ret < 0) {
+			BT_PWR_DBG("%s property is not valid\n", prop_name);
+			vreg->load_uA = -1;
+			ret = 0;
+		}
+
+		*vreg_data = vreg;
+		BT_PWR_DBG("%s: vol=[%d %d]uV, current=[%d]uA\n",
+			vreg->name, vreg->low_vol_level,
+			vreg->high_vol_level,
+			vreg->load_uA);
+	} else
+		BT_PWR_INFO("%s: is not provided in device tree", vreg_name);
+
+err:
+	return ret;
+}
+
+static int bt_dt_parse_clk_info(struct device *dev,
+		struct bt_power_clk_data **clk_data)
+{
+	int ret = -EINVAL;
+	struct bt_power_clk_data *clk = NULL;
+	struct device_node *np = dev->of_node;
+
+	BT_PWR_DBG("");
+
+	*clk_data = NULL;
+	if (of_parse_phandle(np, "clocks", 0)) {
+		clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+		if (!clk) {
+			BT_PWR_ERR("No memory for clocks");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		/* Allocated 20 bytes size buffer for clock name string */
+		clk->name = devm_kzalloc(dev, 20, GFP_KERNEL);
+
+		/* Parse clock name from node */
+		ret = of_property_read_string_index(np, "clock-names", 0,
+				&(clk->name));
+		if (ret < 0) {
+			BT_PWR_ERR("reading \"clock-names\" failed");
+			return ret;
+		}
+
+		clk->clk = devm_clk_get(dev, clk->name);
+		if (IS_ERR(clk->clk)) {
+			ret = PTR_ERR(clk->clk);
+			BT_PWR_ERR("failed to get %s, ret (%d)",
+				clk->name, ret);
+			clk->clk = NULL;
+			return ret;
+		}
+
+		*clk_data = clk;
+	} else {
+		BT_PWR_ERR("clocks is not provided in device tree");
+	}
+
+err:
+	return ret;
+}
+
+static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
+{
+	int rc;
+
+	BT_PWR_DBG("");
+
+	if (!bt_power_pdata)
+		return -ENOMEM;
+
+	if (pdev->dev.of_node) {
+		bt_power_pdata->bt_gpio_sys_rst =
+			of_get_named_gpio(pdev->dev.of_node,
+						"qca,bt-reset-gpio", 0);
+		if (bt_power_pdata->bt_gpio_sys_rst < 0)
+			BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_core,
+					"qca,bt-vdd-core");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-core not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_io,
+					"qca,bt-vdd-io");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-io not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_xtal,
+					"qca,bt-vdd-xtal");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-xtal not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_pa,
+					"qca,bt-vdd-pa");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-pa not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_ldo,
+					"qca,bt-vdd-ldo");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-ldo not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_chip_pwd,
+					"qca,bt-chip-pwd");
+		if (rc < 0)
+			BT_PWR_ERR("bt-chip-pwd not provided in device tree");
+
+		rc = bt_dt_parse_clk_info(&pdev->dev,
+					&bt_power_pdata->bt_chip_clk);
+		if (rc < 0)
+			BT_PWR_ERR("clock not provided in device tree");
+	}
+
+	bt_power_pdata->bt_power_setup = bluetooth_power;
+
+	return 0;
+}
+
+static int bt_power_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	bt_power_pdata =
+		kzalloc(sizeof(struct bluetooth_power_platform_data),
+			GFP_KERNEL);
+
+	if (!bt_power_pdata) {
+		BT_PWR_ERR("Failed to allocate memory");
+		return -ENOMEM;
+	}
+
+	if (pdev->dev.of_node) {
+		ret = bt_power_populate_dt_pinfo(pdev);
+		if (ret < 0) {
+			BT_PWR_ERR("Failed to populate device tree info");
+			goto free_pdata;
+		}
+		pdev->dev.platform_data = bt_power_pdata;
+	} else if (pdev->dev.platform_data) {
+		/* Optional data set to default if not provided */
+		if (!((struct bluetooth_power_platform_data *)
+			(pdev->dev.platform_data))->bt_power_setup)
+			((struct bluetooth_power_platform_data *)
+				(pdev->dev.platform_data))->bt_power_setup =
+						bluetooth_power;
+
+		memcpy(bt_power_pdata, pdev->dev.platform_data,
+			sizeof(struct bluetooth_power_platform_data));
+		pwr_state = 0;
+	} else {
+		BT_PWR_ERR("Failed to get platform data");
+		goto free_pdata;
+	}
+
+	if (bluetooth_power_rfkill_probe(pdev) < 0)
+		goto free_pdata;
+
+	btpdev = pdev;
+
+	return 0;
+
+free_pdata:
+	kfree(bt_power_pdata);
+	return ret;
+}
+
+static int bt_power_remove(struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	bluetooth_power_rfkill_remove(pdev);
+
+	if (bt_power_pdata->bt_chip_pwd->reg)
+		regulator_put(bt_power_pdata->bt_chip_pwd->reg);
+
+	kfree(bt_power_pdata);
+
+	return 0;
+}
+
+int bt_register_slimdev(struct device *dev)
+{
+	BT_PWR_DBG("");
+	if (!bt_power_pdata || (dev == NULL)) {
+		BT_PWR_ERR("Failed to allocate memory");
+		return -EINVAL;
+	}
+	bt_power_pdata->slim_dev = dev;
+	return 0;
+}
+
+static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int ret = 0, pwr_cntrl = 0;
+
+	switch (cmd) {
+	case BT_CMD_SLIM_TEST:
+		if (!bt_power_pdata->slim_dev) {
+			BT_PWR_ERR("slim_dev is null\n");
+			return -EINVAL;
+		}
+		ret = btfm_slim_hw_init(
+			bt_power_pdata->slim_dev->platform_data
+		);
+		break;
+	case BT_CMD_PWR_CTRL:
+		pwr_cntrl = (int)arg;
+		BT_PWR_ERR("BT_CMD_PWR_CTRL pwr_cntrl:%d", pwr_cntrl);
+		if (pwr_state != pwr_cntrl) {
+			ret = bluetooth_power(pwr_cntrl);
+			if (!ret)
+				pwr_state = pwr_cntrl;
+		} else {
+			BT_PWR_ERR("BT chip state is already :%d no change d\n"
+				, pwr_state);
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static struct platform_driver bt_power_driver = {
+	.probe = bt_power_probe,
+	.remove = bt_power_remove,
+	.driver = {
+		.name = "bt_power",
+		.owner = THIS_MODULE,
+		.of_match_table = bt_power_match_table,
+	},
+};
+
+static const struct file_operations bt_dev_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl = bt_ioctl,
+	.compat_ioctl = bt_ioctl,
+};
+
+static int __init bluetooth_power_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&bt_power_driver);
+
+	bt_major = register_chrdev(0, "bt", &bt_dev_fops);
+	if (bt_major < 0) {
+		BTFMSLIM_ERR("failed to allocate char dev\n");
+		goto chrdev_unreg;
+	}
+
+	bt_class = class_create(THIS_MODULE, "bt-dev");
+	if (IS_ERR(bt_class)) {
+		BTFMSLIM_ERR("coudn't create class");
+		goto chrdev_unreg;
+	}
+
+
+	if (device_create(bt_class, NULL, MKDEV(bt_major, 0),
+		NULL, "btpower") == NULL) {
+		BTFMSLIM_ERR("failed to allocate char dev\n");
+		goto chrdev_unreg;
+	}
+	return 0;
+
+chrdev_unreg:
+	unregister_chrdev(bt_major, "bt");
+	class_destroy(bt_class);
+	return ret;
+}
+
+static void __exit bluetooth_power_exit(void)
+{
+	platform_driver_unregister(&bt_power_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Bluetooth power control driver");
+
+module_init(bluetooth_power_init);
+module_exit(bluetooth_power_exit);
diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c
new file mode 100644
index 0000000..dc9bb0b
--- /dev/null
+++ b/drivers/bluetooth/btfm_slim.c
@@ -0,0 +1,563 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <btfm_slim.h>
+#include <btfm_slim_wcn3990.h>
+#include <linux/bluetooth-power.h>
+
+int btfm_slim_write(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *src, uint8_t pgd)
+{
+	int ret, i;
+	struct slim_ele_access msg;
+	int slim_write_tries = SLIM_SLAVE_RW_MAX_TRIES;
+
+	BTFMSLIM_DBG("Write to %s", pgd?"PGD":"IFD");
+	msg.start_offset = SLIM_SLAVE_REG_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	for ( ; slim_write_tries != 0; slim_write_tries--) {
+		mutex_lock(&btfmslim->xfer_lock);
+		ret = slim_change_val_element(pgd ? btfmslim->slim_pgd :
+			&btfmslim->slim_ifd, &msg, src, bytes);
+		mutex_unlock(&btfmslim->xfer_lock);
+		if (ret == 0)
+			break;
+		usleep_range(5000, 5100);
+	}
+
+	if (ret) {
+		BTFMSLIM_ERR("failed (%d)", ret);
+		return ret;
+	}
+
+	for (i = 0; i < bytes; i++)
+		BTFMSLIM_DBG("Write 0x%02x to reg 0x%x", ((uint8_t *)src)[i],
+			reg + i);
+	return 0;
+}
+
+int btfm_slim_write_pgd(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *src)
+{
+	return btfm_slim_write(btfmslim, reg, bytes, src, PGD);
+}
+
+int btfm_slim_write_inf(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *src)
+{
+	return btfm_slim_write(btfmslim, reg, bytes, src, IFD);
+}
+
+int btfm_slim_read(struct btfmslim *btfmslim, unsigned short reg,
+				int bytes, void *dest, uint8_t pgd)
+{
+	int ret, i;
+	struct slim_ele_access msg;
+	int slim_read_tries = SLIM_SLAVE_RW_MAX_TRIES;
+
+	BTFMSLIM_DBG("Read from %s", pgd?"PGD":"IFD");
+	msg.start_offset = SLIM_SLAVE_REG_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	for ( ; slim_read_tries != 0; slim_read_tries--) {
+		mutex_lock(&btfmslim->xfer_lock);
+		ret = slim_request_val_element(pgd ? btfmslim->slim_pgd :
+			&btfmslim->slim_ifd, &msg, dest, bytes);
+		mutex_unlock(&btfmslim->xfer_lock);
+		if (ret == 0)
+			break;
+		usleep_range(5000, 5100);
+	}
+
+	if (ret)
+		BTFMSLIM_ERR("failed (%d)", ret);
+
+	for (i = 0; i < bytes; i++)
+		BTFMSLIM_DBG("Read 0x%02x from reg 0x%x", ((uint8_t *)dest)[i],
+			reg + i);
+
+	return 0;
+}
+
+int btfm_slim_read_pgd(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *dest)
+{
+	return btfm_slim_read(btfmslim, reg, bytes, dest, PGD);
+}
+
+int btfm_slim_read_inf(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *dest)
+{
+	return btfm_slim_read(btfmslim, reg, bytes, dest, IFD);
+}
+
+int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
+	uint8_t rxport, uint32_t rates, uint8_t grp, uint8_t nchan)
+{
+	int ret, i;
+	struct slim_ch prop;
+	struct btfmslim_ch *chan = ch;
+	uint16_t ch_h[2];
+
+	if (!btfmslim || !ch)
+		return -EINVAL;
+
+	BTFMSLIM_DBG("port:%d", ch->port);
+
+	/* Define the channel with below parameters */
+	prop.prot = SLIM_AUTO_ISO;
+	prop.baser = SLIM_RATE_4000HZ;
+	prop.dataf = (rates == 48000) ? SLIM_CH_DATAF_NOT_DEFINED
+			: SLIM_CH_DATAF_LPCM_AUDIO;
+	prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
+	prop.ratem = (rates/4000);
+	prop.sampleszbits = 16;
+
+	ch_h[0] = ch->ch_hdl;
+	ch_h[1] = (grp) ? (ch+1)->ch_hdl : 0;
+
+	ret = slim_define_ch(btfmslim->slim_pgd, &prop, ch_h, nchan, grp,
+			&ch->grph);
+	if (ret < 0) {
+		BTFMSLIM_ERR("slim_define_ch failed ret[%d]", ret);
+		goto error;
+	}
+
+	for (i = 0; i < nchan; i++, ch++) {
+		/* Enable port through registration setting */
+		if (btfmslim->vendor_port_en) {
+			ret = btfmslim->vendor_port_en(btfmslim, ch->port,
+					rxport, 1);
+			if (ret < 0) {
+				BTFMSLIM_ERR("vendor_port_en failed ret[%d]",
+					ret);
+				goto error;
+			}
+		}
+
+		if (rxport) {
+			BTFMSLIM_INFO("slim_connect_sink(port: %d, ch: %d)",
+				ch->port, ch->ch);
+			/* Connect Port with channel given by Machine driver*/
+			ret = slim_connect_sink(btfmslim->slim_pgd,
+				&ch->port_hdl, 1, ch->ch_hdl);
+			if (ret < 0) {
+				BTFMSLIM_ERR("slim_connect_sink failed ret[%d]",
+					ret);
+				goto remove_channel;
+			}
+
+		} else {
+			BTFMSLIM_INFO("slim_connect_src(port: %d, ch: %d)",
+				ch->port, ch->ch);
+			/* Connect Port with channel given by Machine driver*/
+			ret = slim_connect_src(btfmslim->slim_pgd, ch->port_hdl,
+				ch->ch_hdl);
+			if (ret < 0) {
+				BTFMSLIM_ERR("slim_connect_src failed ret[%d]",
+					ret);
+				goto remove_channel;
+			}
+		}
+	}
+
+	/* Activate the channel immediately */
+	BTFMSLIM_INFO(
+		"port: %d, ch: %d, grp: %d, ch->grph: 0x%x, ch_hdl: 0x%x",
+		chan->port, chan->ch, grp, chan->grph, chan->ch_hdl);
+	ret = slim_control_ch(btfmslim->slim_pgd, (grp ? chan->grph :
+		chan->ch_hdl), SLIM_CH_ACTIVATE, true);
+	if (ret < 0) {
+		BTFMSLIM_ERR("slim_control_ch failed ret[%d]", ret);
+		goto remove_channel;
+	}
+
+error:
+	return ret;
+
+remove_channel:
+	/* Remove the channel immediately*/
+	ret = slim_control_ch(btfmslim->slim_pgd, (grp ? ch->grph : ch->ch_hdl),
+			SLIM_CH_REMOVE, true);
+	if (ret < 0)
+		BTFMSLIM_ERR("slim_control_ch failed ret[%d]", ret);
+
+	return ret;
+}
+
+int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
+	uint8_t rxport, uint8_t grp, uint8_t nchan)
+{
+	int ret, i;
+
+	if (!btfmslim || !ch)
+		return -EINVAL;
+
+	BTFMSLIM_INFO("port:%d, grp: %d, ch->grph:0x%x, ch->ch_hdl:0x%x ",
+		ch->port, grp, ch->grph, ch->ch_hdl);
+	/* Remove the channel immediately*/
+	ret = slim_control_ch(btfmslim->slim_pgd, (grp ? ch->grph : ch->ch_hdl),
+			SLIM_CH_REMOVE, true);
+	if (ret < 0) {
+		BTFMSLIM_ERR("slim_control_ch failed ret[%d]", ret);
+		ret = slim_disconnect_ports(btfmslim->slim_pgd,
+			&ch->port_hdl, 1);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slim_disconnect_ports failed ret[%d]",
+				ret);
+			goto error;
+		}
+	}
+
+	/* Disable port through registration setting */
+	for (i = 0; i < nchan; i++, ch++) {
+		if (btfmslim->vendor_port_en) {
+			ret = btfmslim->vendor_port_en(btfmslim, ch->port,
+				rxport, 0);
+			if (ret < 0) {
+				BTFMSLIM_ERR("vendor_port_en failed ret[%d]",
+					ret);
+				break;
+			}
+		}
+	}
+error:
+	return ret;
+}
+static int btfm_slim_get_logical_addr(struct slim_device *slim)
+{
+	int ret = 0;
+	const unsigned long timeout = jiffies +
+			      msecs_to_jiffies(SLIM_SLAVE_PRESENT_TIMEOUT);
+
+	do {
+		ret = slim_get_logical_addr(slim, slim->e_addr,
+			ARRAY_SIZE(slim->e_addr), &slim->laddr);
+		if (!ret)  {
+			BTFMSLIM_DBG("Assigned l-addr: 0x%x", slim->laddr);
+			break;
+		}
+		/* Give SLIMBUS time to report present and be ready. */
+		usleep_range(1000, 1100);
+		BTFMSLIM_DBG("retyring get logical addr");
+	} while (time_before(jiffies, timeout));
+
+	return ret;
+}
+
+static int btfm_slim_alloc_port(struct btfmslim *btfmslim)
+{
+	int ret = -EINVAL, i;
+	struct btfmslim_ch *rx_chs;
+	struct btfmslim_ch *tx_chs;
+
+	if (!btfmslim)
+		return ret;
+
+	rx_chs = btfmslim->rx_chs;
+	tx_chs = btfmslim->tx_chs;
+
+	if (!rx_chs || !tx_chs)
+		return ret;
+
+	BTFMSLIM_DBG("Rx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0 ; (rx_chs->port != BTFM_SLIM_PGD_PORT_LAST) &&
+		(i < BTFM_SLIM_NUM_CODEC_DAIS); i++, rx_chs++) {
+
+		/* Get Rx port handler from slimbus driver based
+		 * on port number
+		 */
+		ret = slim_get_slaveport(btfmslim->slim_pgd->laddr,
+			rx_chs->port, &rx_chs->port_hdl, SLIM_SINK);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slave port failure port#%d - ret[%d]",
+				rx_chs->port, SLIM_SINK);
+			return ret;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", rx_chs->id,
+			rx_chs->name, rx_chs->port, rx_chs->port_hdl,
+			rx_chs->ch, rx_chs->ch_hdl);
+	}
+
+	BTFMSLIM_DBG("Tx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0; (tx_chs->port != BTFM_SLIM_PGD_PORT_LAST) &&
+		(i < BTFM_SLIM_NUM_CODEC_DAIS); i++, tx_chs++) {
+
+		/* Get Tx port handler from slimbus driver based
+		 * on port number
+		 */
+		ret = slim_get_slaveport(btfmslim->slim_pgd->laddr,
+			tx_chs->port, &tx_chs->port_hdl, SLIM_SRC);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slave port failure port#%d - ret[%d]",
+				tx_chs->port, SLIM_SRC);
+			return ret;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", tx_chs->id,
+			tx_chs->name, tx_chs->port, tx_chs->port_hdl,
+			tx_chs->ch, tx_chs->ch_hdl);
+	}
+	return ret;
+}
+
+int btfm_slim_hw_init(struct btfmslim *btfmslim)
+{
+	int ret;
+
+	BTFMSLIM_DBG("");
+	if (!btfmslim)
+		return -EINVAL;
+
+	if (btfmslim->enabled) {
+		BTFMSLIM_DBG("Already enabled");
+		return 0;
+	}
+	mutex_lock(&btfmslim->io_lock);
+
+	/* Assign Logical Address for PGD (Ported Generic Device)
+	 * enumeration address
+	 */
+	ret = btfm_slim_get_logical_addr(btfmslim->slim_pgd);
+	if (ret) {
+		BTFMSLIM_ERR("failed to get slimbus %s logical address: %d",
+		       btfmslim->slim_pgd->name, ret);
+		goto error;
+	}
+
+	/* Assign Logical Address for Ported Generic Device
+	 * enumeration address
+	 */
+	ret = btfm_slim_get_logical_addr(&btfmslim->slim_ifd);
+	if (ret) {
+		BTFMSLIM_ERR("failed to get slimbus %s logical address: %d",
+		       btfmslim->slim_ifd.name, ret);
+		goto error;
+	}
+
+	/* Allocate ports with logical address to get port handler from
+	 * slimbus driver
+	 */
+	ret = btfm_slim_alloc_port(btfmslim);
+	if (ret)
+		goto error;
+
+	/* Start vendor specific initialization and get port information */
+	if (btfmslim->vendor_init)
+		ret = btfmslim->vendor_init(btfmslim);
+
+	/* Only when all registers read/write successfully, it set to
+	 * enabled status
+	 */
+	btfmslim->enabled = 1;
+error:
+	mutex_unlock(&btfmslim->io_lock);
+	return ret;
+}
+
+
+int btfm_slim_hw_deinit(struct btfmslim *btfmslim)
+{
+	int ret = 0;
+
+	if (!btfmslim)
+		return -EINVAL;
+
+	if (!btfmslim->enabled) {
+		BTFMSLIM_DBG("Already disabled");
+		return 0;
+	}
+	mutex_lock(&btfmslim->io_lock);
+	btfmslim->enabled = 0;
+	mutex_unlock(&btfmslim->io_lock);
+	return ret;
+}
+
+static int btfm_slim_get_dt_info(struct btfmslim *btfmslim)
+{
+	int ret = 0;
+	struct slim_device *slim = btfmslim->slim_pgd;
+	struct slim_device *slim_ifd = &btfmslim->slim_ifd;
+	struct property *prop;
+
+	if (!slim || !slim_ifd)
+		return -EINVAL;
+
+	if (slim->dev.of_node) {
+		BTFMSLIM_DBG("Platform data from device tree (%s)",
+			slim->name);
+		ret = of_property_read_string(slim->dev.of_node,
+			"qcom,btfm-slim-ifd", &slim_ifd->name);
+		if (ret) {
+			BTFMSLIM_ERR("Looking up %s property in node %s failed",
+				"qcom,btfm-slim-ifd",
+				 slim->dev.of_node->full_name);
+			return -ENODEV;
+		}
+		BTFMSLIM_DBG("qcom,btfm-slim-ifd (%s)", slim_ifd->name);
+
+		prop = of_find_property(slim->dev.of_node,
+				"qcom,btfm-slim-ifd-elemental-addr", NULL);
+		if (!prop) {
+			BTFMSLIM_ERR("Looking up %s property in node %s failed",
+				"qcom,btfm-slim-ifd-elemental-addr",
+				slim->dev.of_node->full_name);
+			return -ENODEV;
+		} else if (prop->length != 6) {
+			BTFMSLIM_ERR(
+				"invalid codec slim ifd addr. addr length= %d",
+				prop->length);
+			return -ENODEV;
+		}
+		memcpy(slim_ifd->e_addr, prop->value, 6);
+		BTFMSLIM_DBG(
+			"PGD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x",
+			slim->e_addr[0], slim->e_addr[1], slim->e_addr[2],
+			slim->e_addr[3], slim->e_addr[4], slim->e_addr[5]);
+		BTFMSLIM_DBG(
+			"IFD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x",
+			slim_ifd->e_addr[0], slim_ifd->e_addr[1],
+			slim_ifd->e_addr[2], slim_ifd->e_addr[3],
+			slim_ifd->e_addr[4], slim_ifd->e_addr[5]);
+	} else {
+		BTFMSLIM_ERR("Platform data is not valid");
+	}
+
+	return ret;
+}
+
+static int btfm_slim_probe(struct slim_device *slim)
+{
+	int ret = 0;
+	struct btfmslim *btfm_slim;
+
+	BTFMSLIM_DBG("");
+	if (!slim->ctrl)
+		return -EINVAL;
+
+	/* Allocation btfmslim data pointer */
+	btfm_slim = kzalloc(sizeof(struct btfmslim), GFP_KERNEL);
+	if (btfm_slim == NULL) {
+		BTFMSLIM_ERR("error, allocation failed");
+		return -ENOMEM;
+	}
+	/* BTFM Slimbus driver control data configuration */
+	btfm_slim->slim_pgd = slim;
+
+	/* Assign vendor specific function */
+	btfm_slim->rx_chs = SLIM_SLAVE_RXPORT;
+	btfm_slim->tx_chs = SLIM_SLAVE_TXPORT;
+	btfm_slim->vendor_init = SLIM_SLAVE_INIT;
+	btfm_slim->vendor_port_en = SLIM_SLAVE_PORT_EN;
+
+	/* Created Mutex for slimbus data transfer */
+	mutex_init(&btfm_slim->io_lock);
+	mutex_init(&btfm_slim->xfer_lock);
+
+	/* Get Device tree node for Interface Device enumeration address */
+	ret = btfm_slim_get_dt_info(btfm_slim);
+	if (ret)
+		goto dealloc;
+
+	/* Add Interface Device for slimbus driver */
+	ret = slim_add_device(btfm_slim->slim_pgd->ctrl, &btfm_slim->slim_ifd);
+	if (ret) {
+		BTFMSLIM_ERR("error, adding SLIMBUS device failed");
+		goto dealloc;
+	}
+
+	/* Platform driver data allocation */
+	slim->dev.platform_data = btfm_slim;
+
+	/* Driver specific data allocation */
+	btfm_slim->dev = &slim->dev;
+	ret = btfm_slim_register_codec(&slim->dev);
+	ret = bt_register_slimdev(&slim->dev);
+	return ret;
+
+dealloc:
+	mutex_destroy(&btfm_slim->io_lock);
+	mutex_destroy(&btfm_slim->xfer_lock);
+	kfree(btfm_slim);
+	return ret;
+}
+static int btfm_slim_remove(struct slim_device *slim)
+{
+	struct btfmslim *btfm_slim = slim->dev.platform_data;
+
+	BTFMSLIM_DBG("");
+	mutex_destroy(&btfm_slim->io_lock);
+	mutex_destroy(&btfm_slim->xfer_lock);
+	snd_soc_unregister_codec(&slim->dev);
+
+	BTFMSLIM_DBG("slim_remove_device() - btfm_slim->slim_ifd");
+	slim_remove_device(&btfm_slim->slim_ifd);
+
+	kfree(btfm_slim);
+
+	BTFMSLIM_DBG("slim_remove_device() - btfm_slim->slim_pgd");
+	slim_remove_device(slim);
+	return 0;
+}
+
+static const struct slim_device_id btfm_slim_id[] = {
+	{SLIM_SLAVE_COMPATIBLE_STR, 0},
+	{}
+};
+
+static struct slim_driver btfm_slim_driver = {
+	.driver = {
+		.name = "btfmslim-driver",
+		.owner = THIS_MODULE,
+	},
+	.probe = btfm_slim_probe,
+	.remove = btfm_slim_remove,
+	.id_table = btfm_slim_id
+};
+
+static int __init btfm_slim_init(void)
+{
+	int ret;
+
+	BTFMSLIM_DBG("");
+	ret = slim_driver_register(&btfm_slim_driver);
+	if (ret)
+		BTFMSLIM_ERR("Failed to register slimbus driver: %d", ret);
+	return ret;
+}
+
+static void __exit btfm_slim_exit(void)
+{
+	BTFMSLIM_DBG("");
+	slim_driver_unregister(&btfm_slim_driver);
+}
+
+module_init(btfm_slim_init);
+module_exit(btfm_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BTFM Slimbus Slave driver");
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
new file mode 100644
index 0000000..00d46a5
--- /dev/null
+++ b/drivers/bluetooth/btfm_slim.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef BTFM_SLIM_H
+#define BTFM_SLIM_H
+#include <linux/slimbus/slimbus.h>
+
+#define BTFMSLIM_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
+
+/* Vendor specific defines
+ * This should redefines in slimbus slave specific header
+ */
+#define SLIM_SLAVE_COMPATIBLE_STR	"btfmslim_slave"
+#define SLIM_SLAVE_REG_OFFSET		0x0000
+#define SLIM_SLAVE_RXPORT		NULL
+#define SLIM_SLAVE_TXPORT		NULL
+#define SLIM_SLAVE_INIT			NULL
+#define SLIM_SLAVE_PORT_EN		NULL
+
+/* Misc defines */
+#define SLIM_SLAVE_RW_MAX_TRIES		3
+#define SLIM_SLAVE_PRESENT_TIMEOUT	100
+
+#define PGD	1
+#define IFD	0
+
+
+/* Codec driver defines */
+enum {
+	BTFM_FM_SLIM_TX = 0,
+	BTFM_BT_SCO_SLIM_TX,
+	BTFM_BT_SCO_A2DP_SLIM_RX,
+	BTFM_BT_SPLIT_A2DP_SLIM_RX,
+	BTFM_SLIM_NUM_CODEC_DAIS
+};
+
+/* Slimbus Port defines - This should be redefined in specific device file */
+#define BTFM_SLIM_PGD_PORT_LAST				0xFF
+
+struct btfmslim_ch {
+	int id;
+	char *name;
+	uint32_t port_hdl;	/* slimbus port handler */
+	uint16_t port;		/* slimbus port number */
+
+	uint8_t ch;		/* slimbus channel number */
+	uint16_t ch_hdl;	/* slimbus channel handler */
+	uint16_t grph;	/* slimbus group channel handler */
+};
+
+struct btfmslim {
+	struct device *dev;
+	struct slim_device *slim_pgd;
+	struct slim_device slim_ifd;
+	struct mutex io_lock;
+	struct mutex xfer_lock;
+	uint8_t enabled;
+
+	uint32_t num_rx_port;
+	uint32_t num_tx_port;
+
+	struct btfmslim_ch *rx_chs;
+	struct btfmslim_ch *tx_chs;
+
+	int (*vendor_init)(struct btfmslim *btfmslim);
+	int (*vendor_port_en)(struct btfmslim *btfmslim, uint8_t port_num,
+		uint8_t rxport, uint8_t enable);
+};
+
+/**
+ * btfm_slim_hw_init: Initialize slimbus slave device
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_hw_init(struct btfmslim *btfmslim);
+
+/**
+ * btfm_slim_hw_deinit: Deinitialize slimbus slave device
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_hw_deinit(struct btfmslim *btfmslim);
+
+/**
+ * btfm_slim_write: write value to pgd or ifd device
+ * @btfmslim: slimbus slave device data pointer.
+ * @reg: slimbus slave register address
+ * @bytes: length of data
+ * @src: data pointer to write
+ * @pgd: selection for device: either PGD or IFD
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_write(struct btfmslim *btfmslim,
+	uint16_t reg, int bytes, void *src, uint8_t pgd);
+
+
+
+/**
+ * btfm_slim_read: read value from pgd or ifd device
+ * @btfmslim: slimbus slave device data pointer.
+ * @reg: slimbus slave register address
+ * @bytes: length of data
+ * @dest: data pointer to read
+ * @pgd: selection for device: either PGD or IFD
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_read(struct btfmslim *btfmslim,
+	uint16_t reg, int bytes, void *dest, uint8_t pgd);
+
+
+/**
+ * btfm_slim_enable_ch: enable channel for slimbus slave port
+ * @btfmslim: slimbus slave device data pointer.
+ * @ch: slimbus slave channel pointer
+ * @rxport: rxport or txport
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_enable_ch(struct btfmslim *btfmslim,
+	struct btfmslim_ch *ch, uint8_t rxport, uint32_t rates,
+	uint8_t grp, uint8_t nchan);
+
+/**
+ * btfm_slim_disable_ch: disable channel for slimbus slave port
+ * @btfmslim: slimbus slave device data pointer.
+ * @ch: slimbus slave channel pointer
+ * @rxport: rxport or txport
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_disable_ch(struct btfmslim *btfmslim,
+	struct btfmslim_ch *ch, uint8_t rxport, uint8_t grp, uint8_t nchan);
+
+/**
+ * btfm_slim_register_codec: Register codec driver in slimbus device node
+ * @dev: device node
+ * Returns:
+ * -ENOMEM
+ * 0
+ */
+int btfm_slim_register_codec(struct device *dev);
+#endif /* BTFM_SLIM_H */
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
new file mode 100644
index 0000000..86760cd
--- /dev/null
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <btfm_slim.h>
+
+static int btfm_slim_codec_write(struct snd_soc_codec *codec, unsigned int reg,
+	unsigned int value)
+{
+	return 0;
+}
+
+static unsigned int btfm_slim_codec_read(struct snd_soc_codec *codec,
+				unsigned int reg)
+{
+	return 0;
+}
+
+static int btfm_slim_codec_probe(struct snd_soc_codec *codec)
+{
+	return 0;
+}
+
+static int btfm_slim_codec_remove(struct snd_soc_codec *codec)
+{
+	return 0;
+}
+
+static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int ret;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+
+	BTFMSLIM_DBG("substream = %s  stream = %d",
+		 substream->name, substream->stream);
+	ret = btfm_slim_hw_init(btfmslim);
+	return ret;
+}
+
+static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+
+	BTFMSLIM_DBG("substream = %s  stream = %d",
+		 substream->name, substream->stream);
+	btfm_slim_hw_deinit(btfmslim);
+}
+
+static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d",
+		dai->name, dai->id, params_rate(params),
+		params_channels(params));
+
+	return 0;
+}
+
+int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
+	struct snd_soc_dai *dai)
+{
+	int i, ret = -EINVAL;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *ch;
+	uint8_t rxport, grp = false, nchan = 1;
+
+	BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+		dai->id, dai->rate);
+
+	switch (dai->id) {
+	case BTFM_FM_SLIM_TX:
+		grp = true; nchan = 2;
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_SLIM_TX:
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_A2DP_SLIM_RX:
+	case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+		ch = btfmslim->rx_chs;
+		rxport = 1;
+		break;
+	case BTFM_SLIM_NUM_CODEC_DAIS:
+	default:
+		BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
+		return ret;
+	}
+
+	/* Search for dai->id matched port handler */
+	for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != dai->id); ch++, i++)
+		;
+
+	if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
+		(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
+		BTFMSLIM_ERR("ch is invalid!!");
+		return ret;
+	}
+
+	ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
+	return ret;
+}
+
+int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
+	struct snd_soc_dai *dai)
+{
+	int i, ret = -EINVAL;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *ch;
+	uint8_t rxport, grp = false, nchan = 1;
+
+	BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+		dai->id, dai->rate);
+
+	switch (dai->id) {
+	case BTFM_FM_SLIM_TX:
+		grp = true; nchan = 2;
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_SLIM_TX:
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_A2DP_SLIM_RX:
+	case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+		ch = btfmslim->rx_chs;
+		rxport = 1;
+		break;
+	case BTFM_SLIM_NUM_CODEC_DAIS:
+	default:
+		BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
+		return ret;
+	}
+
+	/* Search for dai->id matched port handler */
+	for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != dai->id); ch++, i++)
+		;
+
+	if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
+		(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
+		BTFMSLIM_ERR("ch is invalid!!");
+		return ret;
+	}
+	ret = btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+	return ret;
+}
+
+/* This function will be called once during boot up */
+static int btfm_slim_dai_set_channel_map(struct snd_soc_dai *dai,
+				unsigned int tx_num, unsigned int *tx_slot,
+				unsigned int rx_num, unsigned int *rx_slot)
+{
+	int ret = -EINVAL, i;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *rx_chs;
+	struct btfmslim_ch *tx_chs;
+
+	BTFMSLIM_DBG("");
+
+	if (!btfmslim)
+		return ret;
+
+	rx_chs = btfmslim->rx_chs;
+	tx_chs = btfmslim->tx_chs;
+
+	if (!rx_chs || !tx_chs)
+		return ret;
+
+	BTFMSLIM_DBG("Rx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0; (rx_chs->port != BTFM_SLIM_PGD_PORT_LAST) && (i < rx_num);
+		i++, rx_chs++) {
+		/* Set Rx Channel number from machine driver and
+		 * get channel handler from slimbus driver
+		 */
+		rx_chs->ch = *(uint8_t *)(rx_slot + i);
+		ret = slim_query_ch(btfmslim->slim_pgd, rx_chs->ch,
+			&rx_chs->ch_hdl);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slim_query_ch failure ch#%d - ret[%d]",
+				rx_chs->ch, ret);
+			goto error;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", rx_chs->id,
+			rx_chs->name, rx_chs->port, rx_chs->port_hdl,
+			rx_chs->ch, rx_chs->ch_hdl);
+	}
+
+	BTFMSLIM_DBG("Tx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0; (tx_chs->port != BTFM_SLIM_PGD_PORT_LAST) && (i < tx_num);
+		i++, tx_chs++) {
+		/* Set Tx Channel number from machine driver and
+		 * get channel handler from slimbus driver
+		 */
+		tx_chs->ch = *(uint8_t *)(tx_slot + i);
+		ret = slim_query_ch(btfmslim->slim_pgd, tx_chs->ch,
+			&tx_chs->ch_hdl);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slim_query_ch failure ch#%d - ret[%d]",
+				tx_chs->ch, ret);
+			goto error;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", tx_chs->id,
+			tx_chs->name, tx_chs->port, tx_chs->port_hdl,
+			tx_chs->ch, tx_chs->ch_hdl);
+	}
+
+error:
+	return ret;
+}
+
+static int btfm_slim_dai_get_channel_map(struct snd_soc_dai *dai,
+				 unsigned int *tx_num, unsigned int *tx_slot,
+				 unsigned int *rx_num, unsigned int *rx_slot)
+{
+	int i, ret = -EINVAL, *slot = NULL, j = 0, num = 1;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *ch = NULL;
+
+	if (!btfmslim)
+		return ret;
+
+	switch (dai->id) {
+	case BTFM_FM_SLIM_TX:
+		num = 2;
+	case BTFM_BT_SCO_SLIM_TX:
+		if (!tx_slot || !tx_num) {
+			BTFMSLIM_ERR("Invalid tx_slot %p or tx_num %p",
+				tx_slot, tx_num);
+			return -EINVAL;
+		}
+		ch = btfmslim->tx_chs;
+		if (!ch)
+			return -EINVAL;
+		slot = tx_slot;
+		*rx_slot = 0;
+		*tx_num = num;
+		*rx_num = 0;
+		break;
+	case BTFM_BT_SCO_A2DP_SLIM_RX:
+	case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+		if (!rx_slot || !rx_num) {
+			BTFMSLIM_ERR("Invalid rx_slot %p or rx_num %p",
+				 rx_slot, rx_num);
+			return -EINVAL;
+		}
+		ch = btfmslim->rx_chs;
+		if (!ch)
+			return -EINVAL;
+		slot = rx_slot;
+		*tx_slot = 0;
+		*tx_num = 0;
+		*rx_num = num;
+		break;
+	}
+
+	do {
+		if (!ch)
+			return -EINVAL;
+		for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) && (ch->id !=
+			BTFM_SLIM_NUM_CODEC_DAIS) && (ch->id != dai->id);
+			ch++, i++)
+			;
+
+		if (ch->id == BTFM_SLIM_NUM_CODEC_DAIS ||
+			i == BTFM_SLIM_NUM_CODEC_DAIS) {
+			BTFMSLIM_ERR(
+				"No channel has been allocated for dai (%d)",
+				dai->id);
+			return -EINVAL;
+		}
+		if (!slot)
+			return -EINVAL;
+		*(slot + j) = ch->ch;
+		BTFMSLIM_DBG("id:%d, port:%d, ch:%d, slot: %d", ch->id,
+			ch->port, ch->ch, *(slot + j));
+
+		/* In case it has mulitiple channels */
+		if (++j < num)
+			ch++;
+	} while (j < num);
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops btfmslim_dai_ops = {
+	.startup = btfm_slim_dai_startup,
+	.shutdown = btfm_slim_dai_shutdown,
+	.hw_params = btfm_slim_dai_hw_params,
+	.prepare = btfm_slim_dai_prepare,
+	.hw_free = btfm_slim_dai_hw_free,
+	.set_channel_map = btfm_slim_dai_set_channel_map,
+	.get_channel_map = btfm_slim_dai_get_channel_map,
+};
+
+static struct snd_soc_dai_driver btfmslim_dai[] = {
+	{	/* FM Audio data multiple channel  : FM -> qdsp */
+		.name = "btfm_fm_slim_tx",
+		.id = BTFM_FM_SLIM_TX,
+		.capture = {
+			.stream_name = "FM TX Capture",
+			.rates = SNDRV_PCM_RATE_48000, /* 48 KHz */
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 48000,
+			.rate_min = 48000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+	{	/* Bluetooth SCO voice uplink: bt -> modem */
+		.name = "btfm_bt_sco_slim_tx",
+		.id = BTFM_BT_SCO_SLIM_TX,
+		.capture = {
+			.stream_name = "SCO TX Capture",
+			/* 8 KHz or 16 KHz */
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 16000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+	{	/* Bluetooth SCO voice downlink: modem -> bt or A2DP Playback */
+		.name = "btfm_bt_sco_a2dp_slim_rx",
+		.id = BTFM_BT_SCO_A2DP_SLIM_RX,
+		.playback = {
+			.stream_name = "SCO A2DP RX Playback",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000
+				| SNDRV_PCM_RATE_48000, /* 8 or 16 or 48 Khz*/
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 48000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+	{	/* Bluetooth Split A2DP data: qdsp -> bt */
+		.name = "btfm_bt_split_a2dp_slim_rx",
+		.id = BTFM_BT_SPLIT_A2DP_SLIM_RX,
+		.playback = {
+			.stream_name = "SPLIT A2DP Playback",
+			.rates = SNDRV_PCM_RATE_48000, /* 48 KHz */
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 48000,
+			.rate_min = 48000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+};
+
+static struct snd_soc_codec_driver btfmslim_codec = {
+	.probe	= btfm_slim_codec_probe,
+	.remove	= btfm_slim_codec_remove,
+	.read		= btfm_slim_codec_read,
+	.write	= btfm_slim_codec_write,
+};
+
+int btfm_slim_register_codec(struct device *dev)
+{
+	int ret = 0;
+
+	BTFMSLIM_DBG("");
+	/* Register Codec driver */
+	ret = snd_soc_register_codec(dev, &btfmslim_codec,
+		btfmslim_dai, ARRAY_SIZE(btfmslim_dai));
+
+	if (ret)
+		BTFMSLIM_ERR("failed to register codec (%d)", ret);
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("BTFM Slimbus Codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
new file mode 100644
index 0000000..c2d5b7b
--- /dev/null
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -0,0 +1,130 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slimbus/slimbus.h>
+#include <btfm_slim.h>
+#include <btfm_slim_wcn3990.h>
+
+/* WCN3990 Port assignment */
+struct btfmslim_ch wcn3990_rxport[] = {
+	{.id = BTFM_BT_SCO_A2DP_SLIM_RX, .name = "SCO_A2P_Rx",
+	.port = CHRK_SB_PGD_PORT_RX_SCO},
+	{.id = BTFM_BT_SPLIT_A2DP_SLIM_RX, .name = "A2P_Rx",
+	.port = CHRK_SB_PGD_PORT_RX_A2P},
+	{.id = BTFM_SLIM_NUM_CODEC_DAIS, .name = "",
+	.port = BTFM_SLIM_PGD_PORT_LAST},
+};
+
+struct btfmslim_ch wcn3990_txport[] = {
+	{.id = BTFM_FM_SLIM_TX, .name = "FM_Tx1",
+	.port = CHRK_SB_PGD_PORT_TX1_FM},
+	{.id = BTFM_FM_SLIM_TX, .name = "FM_Tx2",
+	.port = CHRK_SB_PGD_PORT_TX2_FM},
+	{.id = BTFM_BT_SCO_SLIM_TX, .name = "SCO_Tx",
+	.port = CHRK_SB_PGD_PORT_TX_SCO},
+	{.id = BTFM_SLIM_NUM_CODEC_DAIS, .name = "",
+	.port = BTFM_SLIM_PGD_PORT_LAST},
+};
+
+/* Function description */
+int btfm_slim_chrk_hw_init(struct btfmslim *btfmslim)
+{
+	int ret = 0;
+	uint8_t reg_val;
+
+	BTFMSLIM_DBG("");
+
+	if (!btfmslim)
+		return -EINVAL;
+
+	/* Get SB_SLAVE_HW_REV_MSB value*/
+	ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_MSB,  1,
+		&reg_val, IFD);
+	if (ret) {
+		BTFMSLIM_ERR("failed to read (%d)", ret);
+		goto error;
+	}
+	BTFMSLIM_DBG("Major Rev: 0x%x, Minor Rev: 0x%x",
+		(reg_val & 0xF0) >> 4, (reg_val & 0x0F));
+
+	/* Get SB_SLAVE_HW_REV_LSB value*/
+	ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_LSB,  1,
+		&reg_val, IFD);
+	if (ret) {
+		BTFMSLIM_ERR("failed to read (%d)", ret);
+		goto error;
+	}
+	BTFMSLIM_DBG("Step Rev: 0x%x", reg_val);
+
+error:
+	return ret;
+}
+
+
+int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
+	uint8_t rxport, uint8_t enable)
+{
+	int ret = 0;
+	uint8_t reg_val = 0;
+	uint16_t reg;
+
+	BTFMSLIM_DBG("enable(%d)", enable);
+	if (rxport) {
+		/* Port enable */
+		reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
+	} else { /* txport */
+		/* Multiple Channel Setting - only FM Tx will be multiple
+		 * channel
+		 */
+		if (enable && (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
+			port_num == CHRK_SB_PGD_PORT_TX2_FM)) {
+
+			reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
+				(0x1 << CHRK_SB_PGD_PORT_TX2_FM);
+			reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+			ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+			if (ret) {
+				BTFMSLIM_ERR("failed to write (%d)", ret);
+				goto error;
+			}
+		}
+
+		/* Enable Tx port hw auto recovery for underrun or
+		 * overrun error
+		 */
+		reg_val = (enable) ? (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
+				CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY) : 0x0;
+
+		ret = btfm_slim_write(btfmslim,
+			CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num), 1,
+			&reg_val, IFD);
+		if (ret) {
+			BTFMSLIM_ERR("failed to write (%d)", ret);
+			goto error;
+		}
+
+		/* Port enable */
+		reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
+	}
+
+	if (enable)
+		/* Set water mark to 1 and enable the port */
+		reg_val = CHRK_SB_PGD_PORT_ENABLE | CHRK_SB_PGD_PORT_WM_LB;
+	else
+		reg_val = CHRK_SB_PGD_PORT_DISABLE;
+
+	ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+	if (ret)
+		BTFMSLIM_ERR("failed to write (%d)", ret);
+
+error:
+	return ret;
+}
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.h b/drivers/bluetooth/btfm_slim_wcn3990.h
new file mode 100644
index 0000000..6bbdb6b
--- /dev/null
+++ b/drivers/bluetooth/btfm_slim_wcn3990.h
@@ -0,0 +1,140 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef BTFM_SLIM_WCN3990_H
+#define BTFM_SLIM_WCN3990_H
+//#ifdef CONFIG_BTFM_SLIM_WCN3990
+#include <btfm_slim.h>
+
+/* Registers Address */
+#define CHRK_SB_COMP_TEST			0x00000000
+#define CHRK_SB_SLAVE_HW_REV_MSB		0x00000001
+#define CHRK_SB_SLAVE_HW_REV_LSB		0x00000002
+#define CHRK_SB_DEBUG_FEATURES			0x00000005
+#define CHRK_SB_INTF_INT_EN			0x00000010
+#define CHRK_SB_INTF_INT_STATUS			0x00000011
+#define CHRK_SB_INTF_INT_CLR			0x00000012
+#define CHRK_SB_FRM_CFG				0x00000013
+#define CHRK_SB_FRM_STATUS			0x00000014
+#define CHRK_SB_FRM_INT_EN			0x00000015
+#define CHRK_SB_FRM_INT_STATUS			0x00000016
+#define CHRK_SB_FRM_INT_CLR			0x00000017
+#define CHRK_SB_FRM_WAKEUP			0x00000018
+#define CHRK_SB_FRM_CLKCTL_DONE			0x00000019
+#define CHRK_SB_FRM_IE_STATUS			0x0000001A
+#define CHRK_SB_FRM_VE_STATUS			0x0000001B
+#define CHRK_SB_PGD_TX_CFG_STATUS		0x00000020
+#define CHRK_SB_PGD_RX_CFG_STATUS		0x00000021
+#define CHRK_SB_PGD_DEV_INT_EN			0x00000022
+#define CHRK_SB_PGD_DEV_INT_STATUS		0x00000023
+#define CHRK_SB_PGD_DEV_INT_CLR			0x00000024
+#define CHRK_SB_PGD_PORT_INT_EN_RX_0		0x00000030
+#define CHRK_SB_PGD_PORT_INT_EN_RX_1		0x00000031
+#define CHRK_SB_PGD_PORT_INT_EN_TX_0		0x00000032
+#define CHRK_SB_PGD_PORT_INT_EN_TX_1		0x00000033
+#define CHRK_SB_PGD_PORT_INT_STATUS_RX_0	0x00000034
+#define CHRK_SB_PGD_PORT_INT_STATUS_RX_1	0x00000035
+#define CHRK_SB_PGD_PORT_INT_STATUS_TX_0	0x00000036
+#define CHRK_SB_PGD_PORT_INT_STATUS_TX_1	0x00000037
+#define CHRK_SB_PGD_PORT_INT_CLR_RX_0		0x00000038
+#define CHRK_SB_PGD_PORT_INT_CLR_RX_1		0x00000039
+#define CHRK_SB_PGD_PORT_INT_CLR_TX_0		0x0000003A
+#define CHRK_SB_PGD_PORT_INT_CLR_TX_1		0x0000003B
+#define CHRK_SB_PGD_PORT_RX_CFGN(n)		(0x00000040 + n)
+#define CHRK_SB_PGD_PORT_TX_CFGN(n)		(0x00000050 + n)
+#define CHRK_SB_PGD_PORT_INT_RX_SOURCEN(n)	(0x00000060 + n)
+#define CHRK_SB_PGD_PORT_INT_TX_SOURCEN(n)	(0x00000070 + n)
+#define CHRK_SB_PGD_PORT_RX_STATUSN(n)		(0x00000080 + n)
+#define CHRK_SB_PGD_PORT_TX_STATUSN(n)		(0x00000090 + n)
+#define CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(n)	(0x00000100 + 0x4*n)
+#define CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_1(n)	(0x00000101 + 0x4*n)
+#define CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(n)	(0x00000180 + 0x4*n)
+#define CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_1(n)	(0x00000181 + 0x4*n)
+#define CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(n)	(0x000001F0 + n)
+
+/* Register Bit Setting */
+#define CHRK_ENABLE_OVERRUN_AUTO_RECOVERY	(0x1 << 1)
+#define CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY	(0x1 << 0)
+#define CHRK_SB_PGD_PORT_ENABLE			(0x1 << 0)
+#define CHRK_SB_PGD_PORT_DISABLE		(0x0 << 0)
+#define CHRK_SB_PGD_PORT_WM_L1			(0x1 << 1)
+#define CHRK_SB_PGD_PORT_WM_L2			(0x2 << 1)
+#define CHRK_SB_PGD_PORT_WM_L3			(0x3 << 1)
+#define CHRK_SB_PGD_PORT_WM_LB			(0xB << 1)
+
+#define CHRK_SB_PGD_PORT_RX_NUM			16
+#define CHRK_SB_PGD_PORT_TX_NUM			16
+
+/* PGD Port Map */
+#define CHRK_SB_PGD_PORT_TX_SCO			0
+#define CHRK_SB_PGD_PORT_TX1_FM			1
+#define CHRK_SB_PGD_PORT_TX2_FM			2
+#define CHRK_SB_PGD_PORT_RX_SCO			16
+#define CHRK_SB_PGD_PORT_RX_A2P			17
+
+
+/* Function Prototype */
+
+/*
+ * btfm_slim_chrk_hw_init: Initialize wcn3990 specific slimbus slave device
+ * @btfmslim: slimbus slave device data pointer.
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_chrk_hw_init(struct btfmslim *btfmslim);
+
+/*
+ * btfm_slim_chrk_enable_rxport: Enable wcn3990 Rx port by given port number
+ * @btfmslim: slimbus slave device data pointer.
+ * @portNum: slimbus slave port number to enable
+ * @rxport: rxport or txport
+ * @enable: enable port or disable port
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t portNum,
+	uint8_t rxport, uint8_t enable);
+
+/* Specific defines for wcn3990 slimbus device */
+#define WCN3990_SLIM_REG_OFFSET		0x0800
+
+#ifdef SLIM_SLAVE_REG_OFFSET
+#undef SLIM_SLAVE_REG_OFFSET
+#define SLIM_SLAVE_REG_OFFSET		WCN3990_SLIM_REG_OFFSET
+#endif
+
+/* Assign vendor specific function */
+extern struct btfmslim_ch wcn3990_txport[];
+extern struct btfmslim_ch wcn3990_rxport[];
+
+#ifdef SLIM_SLAVE_RXPORT
+#undef SLIM_SLAVE_RXPORT
+#define SLIM_SLAVE_RXPORT (&wcn3990_rxport[0])
+#endif
+
+#ifdef SLIM_SLAVE_TXPORT
+#undef SLIM_SLAVE_TXPORT
+#define SLIM_SLAVE_TXPORT (&wcn3990_txport[0])
+#endif
+
+#ifdef SLIM_SLAVE_INIT
+#undef SLIM_SLAVE_INIT
+#define SLIM_SLAVE_INIT btfm_slim_chrk_hw_init
+#endif
+
+#ifdef SLIM_SLAVE_PORT_EN
+#undef SLIM_SLAVE_PORT_EN
+#define SLIM_SLAVE_PORT_EN btfm_slim_chrk_enable_port
+#endif
+//#endif /* CONFIG_BTFM_WCN3990 */
+#endif /* BTFM_SLIM_WCN3990_H */
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1238349..83db1416 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -460,6 +460,26 @@
 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
 
 /*
+ * Aggregate the rate of all child nodes which are enabled and exclude the
+ * child node which requests for clk_aggregate_rate.
+ */
+unsigned long clk_aggregate_rate(struct clk_hw *hw,
+					const struct clk_core *parent)
+{
+	struct clk_core *child;
+	unsigned long aggre_rate = 0;
+
+	hlist_for_each_entry(child, &parent->children, child_node) {
+		if (child->enable_count &&
+				strcmp(child->name, hw->init->name))
+			aggre_rate = max(child->rate, aggre_rate);
+	}
+
+	return aggre_rate;
+}
+EXPORT_SYMBOL_GPL(clk_aggregate_rate);
+
+/*
  * Helper for finding best parent to provide a given frequency. This can be used
  * directly as a determine_rate callback (e.g. for a mux), or from a more
  * complex clock that may combine a mux with other operations.
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index e864e8b..f734b76 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -151,20 +151,20 @@
 	  Say Y if you want to support multimedia devices such as display,
 	  graphics, video encode/decode, camera, etc.
 
-config MSM_GCC_SKUNK
-	tristate "MSMSKUNK Global Clock Controller"
+config MSM_GCC_SDM845
+	tristate "SDM845 Global Clock Controller"
 	depends on COMMON_CLK_QCOM
 	help
 	  Support for the global clock controller on Qualcomm Technologies, Inc
-	  MSMskunk devices.
+	  sdm845 devices.
 	  Say Y if you want to use peripheral devices such as UART, SPI,
 	  i2c, USB, UFS, SD/eMMC, PCIe, etc.
 
-config MSM_VIDEOCC_SKUNK
-	tristate "MSMSKUNK Video Clock Controller"
+config MSM_VIDEOCC_SDM845
+	tristate "SDM845 Video Clock Controller"
 	depends on COMMON_CLK_QCOM
 	help
 	  Support for the video clock controller on Qualcomm Technologies, Inc
-	  MSMskunk devices.
+	  sdm845 devices.
 	  Say Y if you want to support video devices and functionality such as
 	  video encode/decode.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 5ab4129..62bdf21 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -9,7 +9,7 @@
 clk-qcom-y += clk-branch.o
 clk-qcom-y += clk-regmap-divider.o
 clk-qcom-y += clk-regmap-mux.o
-clk-qcom-y += reset.o
+clk-qcom-y += reset.o clk-voter.o
 clk-qcom-y += clk-dummy.o
 clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
 
@@ -26,8 +26,8 @@
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
-obj-$(CONFIG_MSM_GCC_SKUNK) += gcc-msmskunk.o
-obj-$(CONFIG_MSM_VIDEOCC_SKUNK) += videocc-msmskunk.o
+obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o
+obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
 obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-voter.c b/drivers/clk/qcom/clk-voter.c
new file mode 100644
index 0000000..b0c7e4a
--- /dev/null
+++ b/drivers/clk/qcom/clk-voter.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+
+#include "clk-voter.h"
+
+static int voter_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	int ret = 0;
+	struct clk_voter *v = to_clk_voter(hw);
+	unsigned long cur_rate, new_rate, other_rate = 0;
+
+	if (v->is_branch)
+		return ret;
+
+	if (v->enabled) {
+		struct clk_hw *parent = clk_hw_get_parent(hw);
+
+		if (!parent)
+			return -EINVAL;
+
+		/*
+		 * Get the aggregate rate without this clock's vote and update
+		 * if the new rate is different than the current rate.
+		 */
+		other_rate = clk_aggregate_rate(hw, parent->core);
+
+		cur_rate = max(other_rate, clk_get_rate(hw->clk));
+		new_rate = max(other_rate, rate);
+
+		if (new_rate != cur_rate) {
+			ret = clk_set_rate(parent->clk, new_rate);
+			if (ret)
+				return ret;
+		}
+	}
+	v->rate =  rate;
+
+	return ret;
+}
+
+static int voter_clk_prepare(struct clk_hw *hw)
+{
+	int ret = 0;
+	unsigned long cur_rate;
+	struct clk_hw *parent;
+	struct clk_voter *v = to_clk_voter(hw);
+
+	parent = clk_hw_get_parent(hw);
+	if (!parent)
+		return -EINVAL;
+
+	if (v->is_branch) {
+		v->enabled = true;
+		return ret;
+	}
+
+	/*
+	 * Increase the rate if this clock is voting for a higher rate
+	 * than the current rate.
+	 */
+	cur_rate = clk_aggregate_rate(hw, parent->core);
+
+	if (v->rate > cur_rate) {
+		ret = clk_set_rate(parent->clk, v->rate);
+		if (ret)
+			return ret;
+	}
+	v->enabled = true;
+
+	return ret;
+}
+
+static void voter_clk_unprepare(struct clk_hw *hw)
+{
+	unsigned long cur_rate, new_rate;
+	struct clk_hw *parent;
+	struct clk_voter *v = to_clk_voter(hw);
+
+
+	parent = clk_hw_get_parent(hw);
+	if (!parent)
+		return;
+	/*
+	 * Decrease the rate if this clock was the only one voting for
+	 * the highest rate.
+	 */
+	v->enabled = false;
+	if (v->is_branch)
+		return;
+
+	new_rate = clk_aggregate_rate(hw, parent->core);
+	cur_rate = max(new_rate, v->rate);
+
+	if (new_rate < cur_rate)
+		clk_set_rate(parent->clk, new_rate);
+}
+
+static int voter_clk_is_enabled(struct clk_hw *hw)
+{
+	struct clk_voter *v = to_clk_voter(hw);
+
+	return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long *parent_rate)
+{
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+	if (!parent_hw)
+		return -EINVAL;
+
+	return clk_hw_round_rate(parent_hw, rate);
+}
+
+static unsigned long voter_clk_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_voter *v = to_clk_voter(hw);
+
+	return v->rate;
+}
+
+const struct clk_ops clk_ops_voter = {
+	.prepare = voter_clk_prepare,
+	.unprepare = voter_clk_unprepare,
+	.set_rate = voter_clk_set_rate,
+	.is_enabled = voter_clk_is_enabled,
+	.round_rate = voter_clk_round_rate,
+	.recalc_rate = voter_clk_recalc_rate,
+};
diff --git a/drivers/clk/qcom/clk-voter.h b/drivers/clk/qcom/clk-voter.h
new file mode 100644
index 0000000..0ae2b1a
--- /dev/null
+++ b/drivers/clk/qcom/clk-voter.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __QCOM_CLK_VOTER_H__
+#define __QCOM_CLK_VOTER_H__
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+struct clk_voter {
+	int is_branch;
+	bool enabled;
+	struct clk_hw hw;
+	unsigned long rate;
+};
+
+extern const struct clk_ops clk_ops_voter;
+
+#define to_clk_voter(_hw) container_of(_hw, struct clk_voter, hw)
+
+#define __DEFINE_CLK_VOTER(clk_name, _parent_name, _default_rate, _is_branch) \
+	struct clk_voter clk_name = {					 \
+		.is_branch = (_is_branch),				  \
+		.rate = _default_rate,					   \
+		.hw.init = &(struct clk_init_data){			   \
+			.ops = &clk_ops_voter,				   \
+			.name = #clk_name,				   \
+			.parent_names = (const char *[]){ #_parent_name }, \
+			.num_parents = 1,				   \
+		},							   \
+	}
+
+#define DEFINE_CLK_VOTER(clk_name, _parent_name, _default_rate) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent_name, _default_rate, 0)
+
+#define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent_name) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent_name, 1000, 1)
+
+#endif
diff --git a/drivers/clk/qcom/gcc-msmskunk.c b/drivers/clk/qcom/gcc-sdm845.c
similarity index 97%
rename from drivers/clk/qcom/gcc-msmskunk.c
rename to drivers/clk/qcom/gcc-sdm845.c
index 59eb0ec..92e0ffa 100644
--- a/drivers/clk/qcom/gcc-msmskunk.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -23,7 +23,7 @@
 #include <linux/regmap.h>
 #include <linux/reset-controller.h>
 
-#include <dt-bindings/clock/qcom,gcc-skunk.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
 
 #include "common.h"
 #include "clk-regmap.h"
@@ -32,7 +32,7 @@
 #include "clk-branch.h"
 #include "reset.h"
 #include "clk-alpha-pll.h"
-#include "vdd-level-skunk.h"
+#include "vdd-level-sdm845.h"
 
 #define GCC_APCS_CLOCK_SLEEP_ENA_VOTE_OFFSET	0x52008
 #define CPUSS_AHB_CLK_SLEEP_ENA			BIT(21)
@@ -3302,11 +3302,11 @@
 	},
 };
 
-struct clk_hw *gcc_msmskunk_hws[] = {
+struct clk_hw *gcc_sdm845_hws[] = {
 	[GCC_XO] =      &bi_tcxo.hw,
 };
 
-static struct clk_regmap *gcc_msmskunk_clocks[] = {
+static struct clk_regmap *gcc_sdm845_clocks[] = {
 	[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
 	[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
 	[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
@@ -3497,7 +3497,7 @@
 	[GPLL1] = &gpll1.clkr,
 };
 
-static const struct qcom_reset_map gcc_msmskunk_resets[] = {
+static const struct qcom_reset_map gcc_sdm845_resets[] = {
 	[GCC_GPU_BCR] = { 0x71000 },
 	[GCC_MMSS_BCR] = { 0xb000 },
 	[GCC_PCIE_0_BCR] = { 0x6b000 },
@@ -3517,7 +3517,7 @@
 	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
 };
 
-static const struct regmap_config gcc_msmskunk_regmap_config = {
+static const struct regmap_config gcc_sdm845_regmap_config = {
 	.reg_bits	= 32,
 	.reg_stride	= 4,
 	.val_bits	= 32,
@@ -3525,33 +3525,33 @@
 	.fast_io	= true,
 };
 
-static const struct qcom_cc_desc gcc_msmskunk_desc = {
-	.config = &gcc_msmskunk_regmap_config,
-	.clks = gcc_msmskunk_clocks,
-	.num_clks = ARRAY_SIZE(gcc_msmskunk_clocks),
-	.resets = gcc_msmskunk_resets,
-	.num_resets = ARRAY_SIZE(gcc_msmskunk_resets),
+static const struct qcom_cc_desc gcc_sdm845_desc = {
+	.config = &gcc_sdm845_regmap_config,
+	.clks = gcc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+	.resets = gcc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(gcc_sdm845_resets),
 };
 
-static const struct of_device_id gcc_msmskunk_match_table[] = {
-	{ .compatible = "qcom,gcc-msmskunk" },
+static const struct of_device_id gcc_sdm845_match_table[] = {
+	{ .compatible = "qcom,gcc-sdm845" },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, gcc_msmskunk_match_table);
+MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
 
-static int gcc_msmskunk_probe(struct platform_device *pdev)
+static int gcc_sdm845_probe(struct platform_device *pdev)
 {
 	struct clk *clk;
 	struct regmap *regmap;
 	int ret = 0, i;
 
-	regmap = qcom_cc_map(pdev, &gcc_msmskunk_desc);
+	regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
 	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
 
 	/* register hardware clocks */
-	for (i = 0; i < ARRAY_SIZE(gcc_msmskunk_hws); i++) {
-		clk = devm_clk_register(&pdev->dev, gcc_msmskunk_hws[i]);
+	for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+		clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
 		if (IS_ERR(clk))
 			return PTR_ERR(clk);
 	}
@@ -3579,7 +3579,7 @@
 		return PTR_ERR(vdd_cx_ao.regulator[0]);
 	}
 
-	ret = qcom_cc_really_probe(pdev, &gcc_msmskunk_desc, regmap);
+	ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
 		return ret;
@@ -3617,26 +3617,26 @@
 	return ret;
 }
 
-static struct platform_driver gcc_msmskunk_driver = {
-	.probe		= gcc_msmskunk_probe,
+static struct platform_driver gcc_sdm845_driver = {
+	.probe		= gcc_sdm845_probe,
 	.driver		= {
-		.name	= "gcc-msmskunk",
-		.of_match_table = gcc_msmskunk_match_table,
+		.name	= "gcc-sdm845",
+		.of_match_table = gcc_sdm845_match_table,
 	},
 };
 
-static int __init gcc_msmskunk_init(void)
+static int __init gcc_sdm845_init(void)
 {
-	return platform_driver_register(&gcc_msmskunk_driver);
+	return platform_driver_register(&gcc_sdm845_driver);
 }
-core_initcall(gcc_msmskunk_init);
+core_initcall(gcc_sdm845_init);
 
-static void __exit gcc_msmskunk_exit(void)
+static void __exit gcc_sdm845_exit(void)
 {
-	platform_driver_unregister(&gcc_msmskunk_driver);
+	platform_driver_unregister(&gcc_sdm845_driver);
 }
-module_exit(gcc_msmskunk_exit);
+module_exit(gcc_sdm845_exit);
 
-MODULE_DESCRIPTION("QTI GCC MSMSKUNK Driver");
+MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:gcc-msmskunk");
+MODULE_ALIAS("platform:gcc-sdm845");
diff --git a/drivers/clk/qcom/vdd-level-skunk.h b/drivers/clk/qcom/vdd-level-sdm845.h
similarity index 100%
rename from drivers/clk/qcom/vdd-level-skunk.h
rename to drivers/clk/qcom/vdd-level-sdm845.h
diff --git a/drivers/clk/qcom/videocc-msmskunk.c b/drivers/clk/qcom/videocc-sdm845.c
similarity index 85%
rename from drivers/clk/qcom/videocc-msmskunk.c
rename to drivers/clk/qcom/videocc-sdm845.c
index 670efb5..0e9cf88 100644
--- a/drivers/clk/qcom/videocc-msmskunk.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -23,7 +23,7 @@
 #include <linux/regmap.h>
 #include <linux/reset-controller.h>
 
-#include <dt-bindings/clock/qcom,videocc-skunk.h>
+#include <dt-bindings/clock/qcom,videocc-sdm845.h>
 
 #include "common.h"
 #include "clk-regmap.h"
@@ -32,7 +32,7 @@
 #include "clk-branch.h"
 #include "reset.h"
 #include "clk-alpha-pll.h"
-#include "vdd-level-skunk.h"
+#include "vdd-level-sdm845.h"
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
 
@@ -298,7 +298,7 @@
 	},
 };
 
-static struct clk_regmap *video_cc_msmskunk_clocks[] = {
+static struct clk_regmap *video_cc_sdm845_clocks[] = {
 	[VIDEO_CC_APB_CLK] = &video_cc_apb_clk.clkr,
 	[VIDEO_CC_AT_CLK] = &video_cc_at_clk.clkr,
 	[VIDEO_CC_DEBUG_CLK] = &video_cc_debug_clk.clkr,
@@ -315,14 +315,14 @@
 	[VIDEO_PLL0] = &video_pll0.clkr,
 };
 
-static const struct qcom_reset_map video_cc_msmskunk_resets[] = {
+static const struct qcom_reset_map video_cc_sdm845_resets[] = {
 	[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
 	[VIDEO_CC_VCODEC0_BCR] = { 0x870 },
 	[VIDEO_CC_VCODEC1_BCR] = { 0x8b0 },
 	[VIDEO_CC_VENUS_BCR] = { 0x810 },
 };
 
-static const struct regmap_config video_cc_msmskunk_regmap_config = {
+static const struct regmap_config video_cc_sdm845_regmap_config = {
 	.reg_bits	= 32,
 	.reg_stride	= 4,
 	.val_bits	= 32,
@@ -330,26 +330,26 @@
 	.fast_io	= true,
 };
 
-static const struct qcom_cc_desc video_cc_msmskunk_desc = {
-	.config = &video_cc_msmskunk_regmap_config,
-	.clks = video_cc_msmskunk_clocks,
-	.num_clks = ARRAY_SIZE(video_cc_msmskunk_clocks),
-	.resets = video_cc_msmskunk_resets,
-	.num_resets = ARRAY_SIZE(video_cc_msmskunk_resets),
+static const struct qcom_cc_desc video_cc_sdm845_desc = {
+	.config = &video_cc_sdm845_regmap_config,
+	.clks = video_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(video_cc_sdm845_clocks),
+	.resets = video_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(video_cc_sdm845_resets),
 };
 
-static const struct of_device_id video_cc_msmskunk_match_table[] = {
-	{ .compatible = "qcom,video_cc-msmskunk" },
+static const struct of_device_id video_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,video_cc-sdm845" },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, video_cc_msmskunk_match_table);
+MODULE_DEVICE_TABLE(of, video_cc_sdm845_match_table);
 
-static int video_cc_msmskunk_probe(struct platform_device *pdev)
+static int video_cc_sdm845_probe(struct platform_device *pdev)
 {
 	struct regmap *regmap;
 	int ret = 0;
 
-	regmap = qcom_cc_map(pdev, &video_cc_msmskunk_desc);
+	regmap = qcom_cc_map(pdev, &video_cc_sdm845_desc);
 	if (IS_ERR(regmap)) {
 		pr_err("Failed to map the Video CC registers\n");
 		return PTR_ERR(regmap);
@@ -365,7 +365,7 @@
 
 	clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
 
-	ret = qcom_cc_really_probe(pdev, &video_cc_msmskunk_desc, regmap);
+	ret = qcom_cc_really_probe(pdev, &video_cc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register Video CC clocks\n");
 		return ret;
@@ -375,26 +375,26 @@
 	return ret;
 }
 
-static struct platform_driver video_cc_msmskunk_driver = {
-	.probe		= video_cc_msmskunk_probe,
+static struct platform_driver video_cc_sdm845_driver = {
+	.probe		= video_cc_sdm845_probe,
 	.driver		= {
-		.name	= "video_cc-msmskunk",
-		.of_match_table = video_cc_msmskunk_match_table,
+		.name	= "video_cc-sdm845",
+		.of_match_table = video_cc_sdm845_match_table,
 	},
 };
 
-static int __init video_cc_msmskunk_init(void)
+static int __init video_cc_sdm845_init(void)
 {
-	return platform_driver_register(&video_cc_msmskunk_driver);
+	return platform_driver_register(&video_cc_sdm845_driver);
 }
-core_initcall(video_cc_msmskunk_init);
+core_initcall(video_cc_sdm845_init);
 
-static void __exit video_cc_msmskunk_exit(void)
+static void __exit video_cc_sdm845_exit(void)
 {
-	platform_driver_unregister(&video_cc_msmskunk_driver);
+	platform_driver_unregister(&video_cc_sdm845_driver);
 }
-module_exit(video_cc_msmskunk_exit);
+module_exit(video_cc_sdm845_exit);
 
-MODULE_DESCRIPTION("QCOM VIDEO_CC MSMSKUNK Driver");
+MODULE_DESCRIPTION("QCOM VIDEO_CC SDM845 Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:video_cc-msmskunk");
+MODULE_ALIAS("platform:video_cc-sdm845");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 06e9b53..499dfcc 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -64,7 +64,7 @@
 
 #define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
 #define MAX_CRTCS      8
-#define MAX_PLANES     12
+#define MAX_PLANES     20
 #define MAX_ENCODERS   8
 #define MAX_BRIDGES    8
 #define MAX_CONNECTORS 8
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index b7c0e68..008a527 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -33,6 +33,7 @@
 	struct list_head active_list;
 	struct list_head dirty_list;
 	bool is_dspp_feature;
+	u32 prop_blob_sz;
 };
 
 struct sde_cp_prop_attach {
@@ -253,6 +254,12 @@
 		DRM_ERROR("invalid blob id %lld\n", val);
 		return -EINVAL;
 	}
+	if (blob->length != prop_node->prop_blob_sz) {
+		DRM_ERROR("invalid blob len %zd exp %d feature %d\n",
+		    blob->length, prop_node->prop_blob_sz, prop_node->feature);
+		drm_property_unreference_blob(blob);
+		return -EINVAL;
+	}
 	/* Release refernce to existing payload of the property */
 	if (prop_node->blob_ptr)
 		drm_property_unreference_blob(prop_node->blob_ptr);
@@ -406,7 +413,7 @@
 }
 
 static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
-					     u32 feature)
+			u32 feature, u32 blob_sz)
 {
 	struct drm_property *prop;
 	struct sde_cp_node *prop_node = NULL;
@@ -440,6 +447,7 @@
 
 	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
 				feature, val);
+	prop_node->prop_blob_sz = blob_sz;
 
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
@@ -892,7 +900,7 @@
 	switch (version) {
 	case 1:
 		sde_cp_crtc_create_blob_property(crtc, feature_name,
-					SDE_CP_CRTC_DSPP_PCC);
+			SDE_CP_CRTC_DSPP_PCC, sizeof(struct drm_msm_pcc));
 		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
@@ -987,7 +995,7 @@
 	switch (version) {
 	case 1:
 		sde_cp_crtc_create_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_LM_GC);
+			SDE_CP_CRTC_LM_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
@@ -1011,7 +1019,8 @@
 	switch (version) {
 	case 4:
 		sde_cp_crtc_create_blob_property(crtc, feature_name,
-					SDE_CP_CRTC_DSPP_GAMUT);
+			SDE_CP_CRTC_DSPP_GAMUT,
+			sizeof(struct drm_msm_3d_gamut));
 		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
@@ -1035,7 +1044,7 @@
 	switch (version) {
 	case 1:
 		sde_cp_crtc_create_blob_property(crtc, feature_name,
-					SDE_CP_CRTC_DSPP_GC);
+			SDE_CP_CRTC_DSPP_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index b0bde45..9832ca5 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -270,6 +270,9 @@
 
 		stage_cfg->stage[lm_idx][pstate->stage][idx] =
 							sde_plane_pipe(plane);
+		stage_cfg->multirect_index
+				[lm_idx][pstate->stage][idx] =
+				pstate->multirect_index;
 		mixer[lm_idx].flush_mask |= flush_mask;
 
 		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
@@ -300,6 +303,9 @@
 			idx = right_crtc_zpos_cnt[pstate->stage]++;
 			stage_cfg->stage[RIGHT_MIXER][pstate->stage][idx] =
 							sde_plane_pipe(plane);
+			stage_cfg->multirect_index
+				[RIGHT_MIXER][pstate->stage][idx] =
+				pstate->multirect_index;
 			mixer[RIGHT_MIXER].flush_mask |= flush_mask;
 
 			/* blend config update */
@@ -1156,8 +1162,8 @@
 struct plane_state {
 	struct sde_plane_state *sde_pstate;
 	const struct drm_plane_state *drm_pstate;
-
 	int stage;
+	u32 pipe_id;
 };
 
 static int pstate_cmp(const void *a, const void *b)
@@ -1182,7 +1188,7 @@
 		struct drm_crtc_state *state)
 {
 	struct sde_crtc *sde_crtc;
-	struct plane_state pstates[SDE_STAGE_MAX * 2];
+	struct plane_state pstates[SDE_STAGE_MAX * 4];
 	struct sde_crtc_state *cstate;
 
 	const struct drm_plane_state *pstate;
@@ -1190,8 +1196,12 @@
 	struct drm_display_mode *mode;
 
 	int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
 	int left_crtc_zpos_cnt[SDE_STAGE_MAX] = {0};
 	int right_crtc_zpos_cnt[SDE_STAGE_MAX] = {0};
+	struct sde_multirect_plane_states multirect_plane[SDE_STAGE_MAX * 2];
+	int multirect_count = 0;
+	const struct drm_plane_state *pipe_staged[SSPP_MAX];
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -1209,6 +1219,8 @@
 	mode = &state->adjusted_mode;
 	SDE_DEBUG("%s: check", sde_crtc->name);
 
+	memset(pipe_staged, 0, sizeof(pipe_staged));
+
 	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
 
 	 /* get plane state for all drm planes associated with crtc state */
@@ -1226,6 +1238,7 @@
 		pstates[cnt].drm_pstate = pstate;
 		pstates[cnt].stage = sde_plane_get_property(
 				pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
+		pstates[cnt].pipe_id = sde_plane_pipe(plane);
 
 		/* check dim layer stage with every plane */
 		for (i = 0; i < cstate->num_dim_layers; i++) {
@@ -1238,6 +1251,17 @@
 			}
 		}
 
+		if (pipe_staged[pstates[cnt].pipe_id]) {
+			multirect_plane[multirect_count].r0 =
+				pipe_staged[pstates[cnt].pipe_id];
+			multirect_plane[multirect_count].r1 = pstate;
+			multirect_count++;
+
+			pipe_staged[pstates[cnt].pipe_id] = NULL;
+		} else {
+			pipe_staged[pstates[cnt].pipe_id] = pstate;
+		}
+
 		cnt++;
 
 		if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h,
@@ -1253,6 +1277,15 @@
 		}
 	}
 
+	for (i = 1; i < SSPP_MAX; i++) {
+		if (pipe_staged[i] &&
+			is_sde_plane_virtual(pipe_staged[i]->plane)) {
+			SDE_ERROR("invalid use of virtual plane: %d\n",
+					pipe_staged[i]->plane->base.id);
+			goto end;
+		}
+	}
+
 	/* Check dim layer rect bounds and stage */
 	for (i = 0; i < cstate->num_dim_layers; i++) {
 		if ((CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.y,
@@ -1300,7 +1333,7 @@
 			goto end;
 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
 			if (left_crtc_zpos_cnt[z_pos] == 2) {
-				SDE_ERROR("> 2 plane @ stage%d on left\n",
+				SDE_ERROR("> 2 planes @ stage %d on left\n",
 					z_pos);
 				rc = -EINVAL;
 				goto end;
@@ -1308,7 +1341,7 @@
 			left_crtc_zpos_cnt[z_pos]++;
 		} else {
 			if (right_crtc_zpos_cnt[z_pos] == 2) {
-				SDE_ERROR("> 2 plane @ stage%d on right\n",
+				SDE_ERROR("> 2 planes @ stage %d on right\n",
 					z_pos);
 				rc = -EINVAL;
 				goto end;
@@ -1319,6 +1352,17 @@
 		SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos);
 	}
 
+	for (i = 0; i < multirect_count; i++) {
+		if (sde_plane_validate_multirect_v2(&multirect_plane[i])) {
+			SDE_ERROR(
+			"multirect validation failed for planes (%d - %d)\n",
+					multirect_plane[i].r0->plane->base.id,
+					multirect_plane[i].r1->plane->base.id);
+			rc = -EINVAL;
+			break;
+		}
+	}
+
 end:
 	return rc;
 }
@@ -1427,6 +1471,16 @@
 		sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
 	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
 		sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
+
+	if (sde_is_custom_client()) {
+		if (catalog->smart_dma_rev == SDE_SSPP_SMART_DMA_V1)
+			sde_kms_info_add_keystr(info,
+					"smart_dma_rev", "smart_dma_v1");
+		if (catalog->smart_dma_rev == SDE_SSPP_SMART_DMA_V2)
+			sde_kms_info_add_keystr(info,
+					"smart_dma_rev", "smart_dma_v2");
+	}
+
 	sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
 	msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
 			info->data, info->len, CRTC_PROP_INFO);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 533147c..c53a373 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -46,9 +46,6 @@
 /* default hardware block size if dtsi entry is not present */
 #define DEFAULT_SDE_HW_BLOCK_LEN 0x100
 
-/* default rects for multi rect case */
-#define DEFAULT_SDE_SSPP_MAX_RECTS 1
-
 /* total number of intf - dp, dsi, hdmi */
 #define INTF_COUNT			3
 
@@ -106,6 +103,7 @@
 	CDP,
 	SRC_SPLIT,
 	DIM_LAYER,
+	SMART_DMA_REV,
 	SDE_PROP_MAX,
 };
 
@@ -118,11 +116,11 @@
 	SSPP_CLK_STATUS,
 	SSPP_DANGER,
 	SSPP_SAFE,
-	SSPP_MAX_RECTS,
 	SSPP_SCALE_SIZE,
 	SSPP_VIG_BLOCKS,
 	SSPP_RGB_BLOCKS,
 	SSPP_EXCL_RECT,
+	SSPP_SMART_DMA,
 	SSPP_PROP_MAX,
 };
 
@@ -283,6 +281,7 @@
 	{CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
 	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
 	{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
+	{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
 };
 
 static struct sde_prop_type sspp_prop[] = {
@@ -296,11 +295,12 @@
 		PROP_TYPE_BIT_OFFSET_ARRAY},
 	{SSPP_DANGER, "qcom,sde-sspp-danger-lut", false, PROP_TYPE_U32_ARRAY},
 	{SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
-	{SSPP_MAX_RECTS, "qcom,sde-sspp-max-rects", false, PROP_TYPE_U32_ARRAY},
 	{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
 	{SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
 	{SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
 	{SSPP_EXCL_RECT, "qcom,sde-sspp-excl-rect", false, PROP_TYPE_U32_ARRAY},
+	{SSPP_SMART_DMA, "qcom,sde-sspp-smart-dma-priority", false,
+		PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type vig_prop[] = {
@@ -900,6 +900,13 @@
 		sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
 
 		set_bit(SDE_SSPP_SRC, &sspp->features);
+
+		sblk->smart_dma_priority =
+			PROP_VALUE_ACCESS(prop_value, SSPP_SMART_DMA, i);
+
+		if (sblk->smart_dma_priority && sde_cfg->smart_dma_rev)
+			set_bit(sde_cfg->smart_dma_rev, &sspp->features);
+
 		sblk->src_blk.id = SDE_SSPP_SRC;
 
 		of_property_read_string_index(np,
@@ -1815,7 +1822,7 @@
 
 static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
 {
-	int rc, len, prop_count[SDE_PROP_MAX];
+	int rc, dma_rc, len, prop_count[SDE_PROP_MAX];
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[SDE_PROP_MAX];
 	const char *type;
@@ -1879,16 +1886,38 @@
 		cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
 
 	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
-	if (!rc && !strcmp(type, "qseedv3"))
+	if (!rc && !strcmp(type, "qseedv3")) {
 		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
-	else if (!rc && !strcmp(type, "qseedv2"))
+	} else if (!rc && !strcmp(type, "qseedv2")) {
 		cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
+	} else if (rc) {
+		SDE_DEBUG("invalid QSEED configuration\n");
+		rc = 0;
+	}
 
 	rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
-	if (!rc && !strcmp(type, "csc"))
+	if (!rc && !strcmp(type, "csc")) {
 		cfg->csc_type = SDE_SSPP_CSC;
-	else if (!rc && !strcmp(type, "csc-10bit"))
+	} else if (!rc && !strcmp(type, "csc-10bit")) {
 		cfg->csc_type = SDE_SSPP_CSC_10BIT;
+	} else if (rc) {
+		SDE_DEBUG("invalid csc configuration\n");
+		rc = 0;
+	}
+
+	/*
+	 * Current SDE support only Smart DMA 2.0.
+	 * No support for Smart DMA 1.0 yet.
+	 */
+	cfg->smart_dma_rev = 0;
+	dma_rc = of_property_read_string(np, sde_prop[SMART_DMA_REV].prop_name,
+			&type);
+	if (!dma_rc && !strcmp(type, "smart_dma_v2")) {
+		cfg->smart_dma_rev = SDE_SSPP_SMART_DMA_V2;
+	} else if (!dma_rc && !strcmp(type, "smart_dma_v1")) {
+		SDE_ERROR("smart dma 1.0 is not supported in SDE\n");
+		cfg->smart_dma_rev = 0;
+	}
 
 	cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
 	cfg->has_dim_layer = PROP_VALUE_ACCESS(prop_value, DIM_LAYER, 0);
@@ -1940,7 +1969,7 @@
 		break;
 	case SDE_HW_VER_300:
 	case SDE_HW_VER_400:
-		/* update msm8998 and skunk target here */
+		/* update msm8998 and sdm845 target here */
 		break;
 	}
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index ef69ae0..d28be49a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -42,9 +42,9 @@
 #define SDE_HW_VER_171	SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
 #define SDE_HW_VER_172	SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
 #define SDE_HW_VER_300	SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
-#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* msmskunk v1.0 */
+#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
 
-#define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
+#define IS_SDM845_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
 
 #define MAX_IMG_WIDTH 0x3fff
 #define MAX_IMG_HEIGHT 0x3fff
@@ -93,6 +93,8 @@
  * @SDE_SSPP_CURSOR,         SSPP can be used as a cursor layer
  * @SDE_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
  * @SDE_SSPP_EXCL_RECT,      SSPP supports exclusion rect
+ * @SDE_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
+ * @SDE_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
@@ -109,6 +111,8 @@
 	SDE_SSPP_CURSOR,
 	SDE_SSPP_QOS,
 	SDE_SSPP_EXCL_RECT,
+	SDE_SSPP_SMART_DMA_V1,
+	SDE_SSPP_SMART_DMA_V2,
 	SDE_SSPP_MAX
 };
 
@@ -317,6 +321,7 @@
  * @creq_vblank: creq priority during vertical blanking
  * @danger_vblank: danger priority during vertical blanking
  * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
  * @src_blk:
  * @scaler_blk:
  * @csc_blk:
@@ -342,6 +347,7 @@
 	u32 maxupscale;
 	u32 maxhdeciexp; /* max decimation is 2^value */
 	u32 maxvdeciexp; /* max decimation is 2^value */
+	u32 smart_dma_priority;
 	struct sde_src_blk src_blk;
 	struct sde_scaler_blk scaler_blk;
 	struct sde_pp_blk csc_blk;
@@ -627,6 +633,7 @@
  * @highest_bank_bit   highest memory bit setting for tile buffers.
  * @qseed_type         qseed2 or qseed3 support.
  * @csc_type           csc or csc_10bit support.
+ * @smart_dma_rev      Supported version of SmartDMA feature.
  * @has_src_split      source split feature status
  * @has_cdp            Client driver prefetch feature status
  */
@@ -640,6 +647,7 @@
 	u32 highest_bank_bit;
 	u32 qseed_type;
 	u32 csc_type;
+	u32 smart_dma_rev;
 	bool has_src_split;
 	bool has_cdp;
 	bool has_dim_layer;
@@ -721,4 +729,13 @@
  */
 void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
 
+/**
+ * sde_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg:          pointer to sspp cfg
+ */
+static inline bool sde_hw_sspp_multirect_enabled(const struct sde_sspp_cfg *cfg)
+{
+	return test_bit(SDE_SSPP_SMART_DMA_V1, &cfg->features) ||
+			 test_bit(SDE_SSPP_SMART_DMA_V2, &cfg->features);
+}
 #endif /* _SDE_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index c7cbb93..dad039e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -55,19 +55,6 @@
  */
 static u32 offsite_v_coeff[] = {0x00060002};
 
-/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
-static struct sde_csc_cfg rgb2yuv_cfg = {
-	{
-		0x0083, 0x0102, 0x0032,
-		0x1fb5, 0x1f6c, 0x00e1,
-		0x00e1, 0x1f45, 0x1fdc
-	},
-	{ 0x00, 0x00, 0x00 },
-	{ 0x0040, 0x0200, 0x0200 },
-	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
-	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
 static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -327,12 +314,6 @@
 	_setup_cdm_ops(&c->ops, c->cdm_hw_cap->features);
 	c->hw_mdp = hw_mdp;
 
-	/*
-	 * Perform any default initialization for the chroma down module
-	 * @setup default csc coefficients
-	 */
-	sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
-
 	return c;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index f1f66f3..ab2c473 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -101,6 +101,14 @@
 #define SSPP	0
 #define DSPP	1
 
+#define PGC_C0_OFF 0x4
+#define PGC_C0_INDEX_OFF 0x8
+#define PGC_8B_ROUND_EN BIT(1)
+#define PGC_EN BIT(0)
+#define PGC_TBL_NUM 3
+#define PGC_LUT_SWAP_OFF 0x1c
+
+
 static void __setup_pa_hue(struct sde_hw_blk_reg_map *hw,
 			const struct sde_pp_blk *blk, uint32_t hue,
 			int location)
@@ -451,3 +459,49 @@
 	op_mode |= DSPP_OP_PA_EN | DSPP_OP_PA_LUTV_EN;
 	SDE_REG_WRITE(&ctx->hw, base, op_mode);
 }
+
+void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct drm_msm_pgc_lut *payload = NULL;
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	u32 c0_off, c1_off, c2_off, i;
+
+	if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
+			sizeof(struct drm_msm_pgc_lut))) {
+		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
+			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+			  ((hw_cfg) ? hw_cfg->len : 0),
+			  sizeof(struct drm_msm_pgc_lut));
+		return;
+	}
+
+	if (!hw_cfg->payload) {
+		DRM_DEBUG_DRIVER("Disable pgc feature\n");
+		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base, 0);
+		return;
+	}
+	payload = hw_cfg->payload;
+
+	/* Initialize index offsets */
+	c0_off = ctx->cap->sblk->gc.base + PGC_C0_INDEX_OFF;
+	c1_off = c0_off + (sizeof(u32) * 2);
+	c2_off = c1_off + (sizeof(u32) * 2);
+	SDE_REG_WRITE(&ctx->hw, c0_off, 0);
+	SDE_REG_WRITE(&ctx->hw, c1_off, 0);
+	SDE_REG_WRITE(&ctx->hw, c2_off, 0);
+
+	/* Initialize table offsets */
+	c0_off = ctx->cap->sblk->gc.base + PGC_C0_OFF;
+	c1_off = c0_off + (sizeof(u32) * 2);
+	c2_off = c1_off + (sizeof(u32) * 2);
+
+	for (i = 0; i < PGC_TBL_LEN; i++) {
+		SDE_REG_WRITE(&ctx->hw, c0_off, payload->c0[i]);
+		SDE_REG_WRITE(&ctx->hw, c1_off, payload->c1[i]);
+		SDE_REG_WRITE(&ctx->hw, c2_off, payload->c2[i]);
+	}
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base + PGC_LUT_SWAP_OFF,
+			BIT(0));
+	i = BIT(0) | ((payload->flags & PGC_8B_ROUND) ? BIT(1) : 0);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base, i);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
index 0f9bc0e..25e446b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -75,4 +75,11 @@
  */
 void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg);
 
+/**
+ * sde_setup_dspp_gc_v1_7 - setup DSPP gc feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to gc data
+ */
+void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
 #endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 56d9f2a..19e3a7a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -20,6 +20,8 @@
 	(0x40 + (((lm) - LM_0) * 0x004))
 #define   CTL_LAYER_EXT2(lm)             \
 	(0x70 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT3(lm)             \
+	(0xA0 + (((lm) - LM_0) * 0x004))
 #define   CTL_TOP                       0x014
 #define   CTL_FLUSH                     0x018
 #define   CTL_START                     0x01C
@@ -281,7 +283,8 @@
 	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
+	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
 	int i, j;
 	u8 stages;
 	int pipes_per_stage;
@@ -300,8 +303,6 @@
 		pipes_per_stage = 1;
 
 	mixercfg = BIT(24); /* always set BORDER_OUT */
-	mixercfg_ext = 0;
-	mixercfg_ext2 = 0;
 
 	for (i = 0; i <= stages; i++) {
 		/* overflow to ext register if 'i + 1 > 7' */
@@ -309,22 +310,41 @@
 		ext = i >= 7;
 
 		for (j = 0 ; j < pipes_per_stage; j++) {
+			enum sde_sspp_multirect_index rect_index =
+				stage_cfg->multirect_index[index][i][j];
+
 			switch (stage_cfg->stage[index][i][j]) {
 			case SSPP_VIG0:
-				mixercfg |= mix << 0;
-				mixercfg_ext |= ext << 0;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+				} else {
+					mixercfg |= mix << 0;
+					mixercfg_ext |= ext << 0;
+				}
 				break;
 			case SSPP_VIG1:
-				mixercfg |= mix << 3;
-				mixercfg_ext |= ext << 2;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+				} else {
+					mixercfg |= mix << 3;
+					mixercfg_ext |= ext << 2;
+				}
 				break;
 			case SSPP_VIG2:
-				mixercfg |= mix << 6;
-				mixercfg_ext |= ext << 4;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 6;
+					mixercfg_ext |= ext << 4;
+				}
 				break;
 			case SSPP_VIG3:
-				mixercfg |= mix << 26;
-				mixercfg_ext |= ext << 6;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 26;
+					mixercfg_ext |= ext << 6;
+				}
 				break;
 			case SSPP_RGB0:
 				mixercfg |= mix << 9;
@@ -343,20 +363,36 @@
 				mixercfg_ext |= ext << 14;
 				break;
 			case SSPP_DMA0:
-				mixercfg |= mix << 18;
-				mixercfg_ext |= ext << 16;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 18;
+					mixercfg_ext |= ext << 16;
+				}
 				break;
 			case SSPP_DMA1:
-				mixercfg |= mix << 21;
-				mixercfg_ext |= ext << 18;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 21;
+					mixercfg_ext |= ext << 18;
+				}
 				break;
 			case SSPP_DMA2:
-				mix = (i + 1) & 0xf;
-				mixercfg_ext2 |= mix << 0;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 0;
+				}
 				break;
 			case SSPP_DMA3:
-				mix = (i + 1) & 0xf;
-				mixercfg_ext2 |= mix << 4;
+				if (rect_index == SDE_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 4;
+				}
 				break;
 			case SSPP_CURSOR0:
 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
@@ -373,6 +409,7 @@
 	SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
 	SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
 	SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+	SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
 }
 
 static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 2fb7b37..670a03d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
 #include "sde_hw_catalog.h"
+#include "sde_hw_sspp.h"
 
 /**
  * sde_ctl_mode_sel: Interface mode selection
@@ -30,10 +31,13 @@
 struct sde_hw_ctl;
 /**
  * struct sde_hw_stage_cfg - blending stage cfg
- * @stage
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
  */
 struct sde_hw_stage_cfg {
 	enum sde_sspp stage[CRTC_DUAL_MIXERS][SDE_STAGE_MAX][PIPES_PER_STAGE];
+	enum sde_sspp_multirect_index multirect_index[CRTC_DUAL_MIXERS]
+					[SDE_STAGE_MAX][PIPES_PER_STAGE];
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 0f853db..51ab26e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -86,6 +86,12 @@
 				if (!ret)
 					c->ops.setup_gc =
 						reg_dmav1_setup_dspp_gcv18;
+				/** programming for v18 through ahb is same
+				 * as v17 hence assign v17 function
+				 */
+				else
+					c->ops.setup_gc =
+						sde_setup_dspp_gc_v1_7;
 			}
 			break;
 		default:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 4993036..e68e3c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -817,21 +817,11 @@
 
 static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
-
 	return 0;
 }
 
 static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 6552326..a471dad 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -109,7 +109,7 @@
 	}
 }
 
-static void sde_hw_lm_setup_blend_config_msmskunk(struct sde_hw_mixer *ctx,
+static void sde_hw_lm_setup_blend_config_sdm845(struct sde_hw_mixer *ctx,
 	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
@@ -227,8 +227,8 @@
 		unsigned long features)
 {
 	ops->setup_mixer_out = sde_hw_lm_setup_out;
-	if (IS_MSMSKUNK_TARGET(m->hwversion))
-		ops->setup_blend_config = sde_hw_lm_setup_blend_config_msmskunk;
+	if (IS_SDM845_TARGET(m->hwversion))
+		ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
 	else
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
 	ops->setup_alpha_out = sde_hw_lm_setup_color3;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 88dc98f..1b98683 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -32,8 +32,21 @@
 #define SSPP_SRC_FORMAT                    0x30
 #define SSPP_SRC_UNPACK_PATTERN            0x34
 #define SSPP_SRC_OP_MODE                   0x38
-#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
 
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1                 0x16C
+#define SSPP_SRC_XY_REC1                   0x168
+#define SSPP_OUT_SIZE_REC1                 0x160
+#define SSPP_OUT_XY_REC1                   0x164
+#define SSPP_SRC_FORMAT_REC1               0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1       0x178
+#define SSPP_SRC_OP_MODE_REC1              0x17C
+#define SSPP_MULTIRECT_OPMODE              0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1       0x180
+#define SSPP_EXCL_REC_SIZE_REC1            0x184
+#define SSPP_EXCL_REC_XY_REC1              0x188
+
+#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
 #define MDSS_MDP_OP_DEINTERLACE_ODD        BIT(23)
 #define MDSS_MDP_OP_IGC_ROM_1              BIT(18)
 #define MDSS_MDP_OP_IGC_ROM_0              BIT(17)
@@ -205,6 +218,32 @@
 	return rc;
 }
 
+static void sde_hw_sspp_setup_multirect(struct sde_hw_pipe *ctx,
+		enum sde_sspp_multirect_index index,
+		enum sde_sspp_multirect_mode mode)
+{
+	u32 mode_mask;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	if (index == SDE_SSPP_RECT_SOLO) {
+		/**
+		 * if rect index is RECT_SOLO, we cannot expect a
+		 * virtual plane sharing the same SSPP id. So we go
+		 * and disable multirect
+		 */
+		mode_mask = 0;
+	} else {
+		mode_mask = SDE_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+		mode_mask |= index;
+		mode_mask |= (mode == SDE_SSPP_MULTIRECT_TIME_MX) ? 0x4 : 0x0;
+	}
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
 static void _sspp_setup_opmode(struct sde_hw_pipe *ctx,
 		u32 mask, u8 en)
 {
@@ -248,24 +287,44 @@
  * Setup source pixel format, flip,
  */
 static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
-		const struct sde_format *fmt, u32 flags)
+		const struct sde_format *fmt, u32 flags,
+		enum sde_sspp_multirect_index rect_mode)
 {
 	struct sde_hw_blk_reg_map *c;
 	u32 chroma_samp, unpack, src_format;
 	u32 secure = 0;
 	u32 opmode = 0;
+	u32 op_mode_off, unpack_pat_off, format_off;
 	u32 idx;
 
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !fmt)
 		return;
 
+	if (rect_mode == SDE_SSPP_RECT_SOLO || rect_mode == SDE_SSPP_RECT_0) {
+		op_mode_off = SSPP_SRC_OP_MODE;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+		format_off = SSPP_SRC_FORMAT;
+	} else {
+		op_mode_off = SSPP_SRC_OP_MODE_REC1;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+		format_off = SSPP_SRC_FORMAT_REC1;
+	}
+
 	c = &ctx->hw;
-	opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx);
+	opmode = SDE_REG_READ(c, op_mode_off + idx);
 	opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
 			MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
 
-	if (flags & SDE_SSPP_SECURE_OVERLAY_SESSION)
-		secure = 0xF;
+	if (flags & SDE_SSPP_SECURE_OVERLAY_SESSION) {
+		secure = SDE_REG_READ(c, SSPP_SRC_ADDR_SW_STATUS + idx);
+
+		if (rect_mode == SDE_SSPP_RECT_SOLO)
+			secure |= 0xF;
+		else if (rect_mode == SDE_SSPP_RECT_0)
+			secure |= 0x5;
+		else if (rect_mode == SDE_SSPP_RECT_1)
+			secure |= 0xA;
+	}
 
 	if (flags & SDE_SSPP_FLIP_LR)
 		opmode |= MDSS_MDP_OP_FLIP_LR;
@@ -327,9 +386,9 @@
 			VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
 			SDE_FORMAT_IS_YUV(fmt));
 
-	SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format);
-	SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack);
-	SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode);
+	SDE_REG_WRITE(c, format_off + idx, src_format);
+	SDE_REG_WRITE(c, unpack_pat_off + idx, unpack);
+	SDE_REG_WRITE(c, op_mode_off + idx, opmode);
 	SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure);
 
 	/* clear previous UBWC error */
@@ -692,10 +751,12 @@
 static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
 		struct sde_hw_pipe_cfg *cfg,
 		struct sde_hw_pixel_ext *pe_ext,
+		enum sde_sspp_multirect_index rect_index,
 		void *scale_cfg)
 {
 	struct sde_hw_blk_reg_map *c;
 	u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+	u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
 	u32 decimation = 0;
 	u32 idx;
 
@@ -704,6 +765,18 @@
 
 	c = &ctx->hw;
 
+	if (rect_index == SDE_SSPP_RECT_SOLO || rect_index == SDE_SSPP_RECT_0) {
+		src_size_off = SSPP_SRC_SIZE;
+		src_xy_off = SSPP_SRC_XY;
+		out_size_off = SSPP_OUT_SIZE;
+		out_xy_off = SSPP_OUT_XY;
+	} else {
+		src_size_off = SSPP_SRC_SIZE_REC1;
+		src_xy_off = SSPP_SRC_XY_REC1;
+		out_size_off = SSPP_OUT_SIZE_REC1;
+		out_xy_off = SSPP_OUT_XY_REC1;
+	}
+
 	/* program pixel extension override */
 	if (pe_ext)
 		sde_hw_sspp_setup_pe_config(ctx, pe_ext);
@@ -714,10 +787,23 @@
 	dst_xy = (cfg->dst_rect.y << 16) | (cfg->dst_rect.x);
 	dst_size = (cfg->dst_rect.h << 16) | (cfg->dst_rect.w);
 
-	ystride0 = (cfg->layout.plane_pitch[0]) |
+	if (rect_index == SDE_SSPP_RECT_SOLO) {
+		ystride0 = (cfg->layout.plane_pitch[0]) |
 			(cfg->layout.plane_pitch[1] << 16);
-	ystride1 = (cfg->layout.plane_pitch[2]) |
+		ystride1 = (cfg->layout.plane_pitch[2]) |
 			(cfg->layout.plane_pitch[3] << 16);
+	} else {
+		ystride0 = SDE_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+		ystride1 = SDE_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+		if (rect_index == SDE_SSPP_RECT_0) {
+			ystride0 |= cfg->layout.plane_pitch[0];
+			ystride1 |= cfg->layout.plane_pitch[2];
+		}  else {
+			ystride0 |= cfg->layout.plane_pitch[0] << 16;
+			ystride1 |= cfg->layout.plane_pitch[2] << 16;
+		}
+	}
 
 	/* program scaler, phase registers, if pipes supporting scaling */
 	if (ctx->cap->features & SDE_SSPP_SCALER) {
@@ -728,10 +814,10 @@
 	}
 
 	/* rectangle register programming */
-	SDE_REG_WRITE(c, SSPP_SRC_SIZE + idx, src_size);
-	SDE_REG_WRITE(c, SSPP_SRC_XY + idx, src_xy);
-	SDE_REG_WRITE(c, SSPP_OUT_SIZE + idx, dst_size);
-	SDE_REG_WRITE(c, SSPP_OUT_XY + idx, dst_xy);
+	SDE_REG_WRITE(c, src_size_off + idx, src_size);
+	SDE_REG_WRITE(c, src_xy_off + idx, src_xy);
+	SDE_REG_WRITE(c, out_size_off + idx, dst_size);
+	SDE_REG_WRITE(c, out_xy_off + idx, dst_xy);
 
 	SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
 	SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
@@ -744,31 +830,48 @@
  * @excl_rect: Exclusion rect configs
  */
 static void _sde_hw_sspp_setup_excl_rect(struct sde_hw_pipe *ctx,
-		struct sde_rect *excl_rect)
+		struct sde_rect *excl_rect,
+		enum sde_sspp_multirect_index rect_index)
 {
 	struct sde_hw_blk_reg_map *c;
 	u32 size, xy;
 	u32 idx;
+	u32 reg_xy, reg_size;
+	u32 excl_ctrl, enable_bit;
 
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !excl_rect)
 		return;
 
+	if (rect_index == SDE_SSPP_RECT_0 || rect_index == SDE_SSPP_RECT_SOLO) {
+		reg_xy = SSPP_EXCL_REC_XY;
+		reg_size = SSPP_EXCL_REC_SIZE;
+		enable_bit = BIT(0);
+	} else {
+		reg_xy = SSPP_EXCL_REC_XY_REC1;
+		reg_size = SSPP_EXCL_REC_SIZE_REC1;
+		enable_bit = BIT(1);
+	}
+
 	c = &ctx->hw;
 
 	xy = (excl_rect->y << 16) | (excl_rect->x);
 	size = (excl_rect->h << 16) | (excl_rect->w);
 
+	excl_ctrl = SDE_REG_READ(c, SSPP_EXCL_REC_CTL + idx);
 	if (!size) {
-		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx, 0);
+		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx,
+				excl_ctrl & ~enable_bit);
 	} else {
-		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx, BIT(0));
-		SDE_REG_WRITE(c, SSPP_EXCL_REC_SIZE + idx, size);
-		SDE_REG_WRITE(c, SSPP_EXCL_REC_XY + idx, xy);
+		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx,
+				excl_ctrl | enable_bit);
+		SDE_REG_WRITE(c, reg_size + idx, size);
+		SDE_REG_WRITE(c, reg_xy + idx, xy);
 	}
 }
 
 static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cfg *cfg)
+		struct sde_hw_pipe_cfg *cfg,
+		enum sde_sspp_multirect_index rect_mode)
 {
 	int i;
 	u32 idx;
@@ -776,9 +879,21 @@
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
 		return;
 
-	for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
-		SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
-			cfg->layout.plane_addr[i]);
+	if (rect_mode == SDE_SSPP_RECT_SOLO) {
+		for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+			SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+					cfg->layout.plane_addr[i]);
+	} else if (rect_mode == SDE_SSPP_RECT_0) {
+		SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		SDE_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	} else {
+		SDE_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		SDE_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	}
 }
 
 static void sde_hw_sspp_setup_csc(struct sde_hw_pipe *ctx,
@@ -813,14 +928,19 @@
 	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0xC, cfg->noise_thr);
 }
 
-static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, u32 color)
+static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, u32 color, enum
+		sde_sspp_multirect_index rect_index)
 {
 	u32 idx;
 
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
 		return;
 
-	SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+	if (rect_index == SDE_SSPP_RECT_SOLO || rect_index == SDE_SSPP_RECT_0)
+		SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+	else
+		SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+				color);
 }
 
 static void sde_hw_sspp_setup_danger_safe_lut(struct sde_hw_pipe *ctx,
@@ -898,6 +1018,9 @@
 	if (test_bit(SDE_SSPP_SCALER_QSEED2, &features))
 		c->ops.setup_sharpening = sde_hw_sspp_setup_sharpening;
 
+	if (sde_hw_sspp_multirect_enabled(c->cap))
+		c->ops.setup_multirect = sde_hw_sspp_setup_multirect;
+
 	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
 		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
 	else
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index d7101fd..d81f673 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -50,6 +50,28 @@
 	SDE_SSPP_COMP_MAX
 };
 
+/**
+ * SDE_SSPP_RECT_SOLO - multirect disabled
+ * SDE_SSPP_RECT_0 - rect0 of a multirect pipe
+ * SDE_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum sde_sspp_multirect_index {
+	SDE_SSPP_RECT_SOLO = 0,
+	SDE_SSPP_RECT_0,
+	SDE_SSPP_RECT_1,
+};
+
+enum sde_sspp_multirect_mode {
+	SDE_SSPP_MULTIRECT_NONE = 0,
+	SDE_SSPP_MULTIRECT_PARALLEL,
+	SDE_SSPP_MULTIRECT_TIME_MX,
+};
+
 enum {
 	SDE_FRAME_LINEAR,
 	SDE_FRAME_TILE_A4X,
@@ -252,6 +274,8 @@
  *              4: Read 1 line/pixel drop 3  lines/pixels
  *              8: Read 1 line/pixel drop 7 lines/pixels
  *              16: Read 1 line/pixel drop 15 line/pixels
+ * @index:     index of the rectangle of SSPP
+ * @mode:      parallel or time multiplex multirect mode
  */
 struct sde_hw_pipe_cfg {
 	struct sde_hw_fmt_layout layout;
@@ -259,6 +283,8 @@
 	struct sde_rect dst_rect;
 	u8 horz_decimation;
 	u8 vert_decimation;
+	enum sde_sspp_multirect_index index;
+	enum sde_sspp_multirect_mode mode;
 };
 
 /**
@@ -292,37 +318,45 @@
 	 * @ctx: Pointer to pipe context
 	 * @cfg: Pointer to pipe config structure
 	 * @flags: Extra flags for format config
+	 * @index: rectangle index in multirect
 	 */
 	void (*setup_format)(struct sde_hw_pipe *ctx,
-			const struct sde_format *fmt, u32 flags);
+			const struct sde_format *fmt, u32 flags,
+			enum sde_sspp_multirect_index index);
 
 	/**
 	 * setup_rects - setup pipe ROI rectangles
 	 * @ctx: Pointer to pipe context
 	 * @cfg: Pointer to pipe config structure
 	 * @pe_ext: Pointer to pixel ext settings
+	 * @index: rectangle index in multirect
 	 * @scale_cfg: Pointer to scaler settings
 	 */
 	void (*setup_rects)(struct sde_hw_pipe *ctx,
 			struct sde_hw_pipe_cfg *cfg,
 			struct sde_hw_pixel_ext *pe_ext,
+			enum sde_sspp_multirect_index index,
 			void *scale_cfg);
 
 	/**
 	 * setup_excl_rect - setup pipe exclusion rectangle
 	 * @ctx: Pointer to pipe context
 	 * @excl_rect: Pointer to exclclusion rect structure
+	 * @index: rectangle index in multirect
 	 */
 	void (*setup_excl_rect)(struct sde_hw_pipe *ctx,
-			struct sde_rect *excl_rect);
+			struct sde_rect *excl_rect,
+			enum sde_sspp_multirect_index index);
 
 	/**
 	 * setup_sourceaddress - setup pipe source addresses
 	 * @ctx: Pointer to pipe context
 	 * @cfg: Pointer to pipe config structure
+	 * @index: rectangle index in multirect
 	 */
 	void (*setup_sourceaddress)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_cfg *cfg);
+			struct sde_hw_pipe_cfg *cfg,
+			enum sde_sspp_multirect_index index);
 
 	/**
 	 * setup_csc - setup color space coversion
@@ -336,8 +370,21 @@
 	 * @ctx: Pointer to pipe context
 	 * @const_color: Fill color value
 	 * @flags: Pipe flags
+	 * @index: rectangle index in multirect
 	 */
-	void (*setup_solidfill)(struct sde_hw_pipe *ctx, u32 color);
+	void (*setup_solidfill)(struct sde_hw_pipe *ctx, u32 color,
+			enum sde_sspp_multirect_index index);
+
+	/**
+	 * setup_multirect - setup multirect configuration
+	 * @ctx: Pointer to pipe context
+	 * @index: rectangle index in multirect
+	 * @mode: parallel fetch / time multiplex multirect mode
+	 */
+
+	void (*setup_multirect)(struct sde_hw_pipe *ctx,
+			enum sde_sspp_multirect_index index,
+			enum sde_sspp_multirect_mode mode);
 
 	/**
 	 * setup_sharpening - setup sharpening
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index cb3daba..5aadae0 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -716,8 +716,12 @@
 	struct msm_drm_private *priv;
 	struct sde_mdss_cfg *catalog;
 
-	int primary_planes_idx, i, ret;
-	int max_crtc_count, max_plane_count;
+	int primary_planes_idx = 0, i, ret;
+	int max_crtc_count;
+
+	u32 sspp_id[MAX_PLANES];
+	u32 master_plane_id[MAX_PLANES];
+	u32 num_virt_planes = 0;
 
 	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
 		SDE_ERROR("invalid sde_kms\n");
@@ -736,11 +740,9 @@
 		(void)_sde_kms_setup_displays(dev, priv, sde_kms);
 
 	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
-	max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
 
 	/* Create the planes */
-	primary_planes_idx = 0;
-	for (i = 0; i < max_plane_count; i++) {
+	for (i = 0; i < catalog->sspp_count; i++) {
 		bool primary = true;
 
 		if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
@@ -748,7 +750,7 @@
 			primary = false;
 
 		plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
-				(1UL << max_crtc_count) - 1);
+				(1UL << max_crtc_count) - 1, 0);
 		if (IS_ERR(plane)) {
 			SDE_ERROR("sde_plane_init failed\n");
 			ret = PTR_ERR(plane);
@@ -758,6 +760,27 @@
 
 		if (primary)
 			primary_planes[primary_planes_idx++] = plane;
+
+		if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
+			sde_is_custom_client()) {
+			int priority =
+				catalog->sspp[i].sblk->smart_dma_priority;
+			sspp_id[priority - 1] = catalog->sspp[i].id;
+			master_plane_id[priority - 1] = plane->base.id;
+			num_virt_planes++;
+		}
+	}
+
+	/* Initialize smart DMA virtual planes */
+	for (i = 0; i < num_virt_planes; i++) {
+		plane = sde_plane_init(dev, sspp_id[i], false,
+			(1UL << max_crtc_count) - 1, master_plane_id[i]);
+		if (IS_ERR(plane)) {
+			SDE_ERROR("sde_plane for virtual SSPP init failed\n");
+			ret = PTR_ERR(plane);
+			goto fail;
+		}
+		priv->planes[priv->num_planes++] = plane;
 	}
 
 	max_crtc_count = min(max_crtc_count, primary_planes_idx);
@@ -928,7 +951,7 @@
 /* the caller api needs to turn on clock before calling it */
 static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
 {
-	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
+	return;
 }
 
 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 746c19d..8ff08dc 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -23,11 +23,13 @@
 #include <uapi/drm/msm_drm_pp.h>
 
 #include "msm_prop.h"
+#include "msm_drv.h"
 
 #include "sde_kms.h"
 #include "sde_fence.h"
 #include "sde_formats.h"
 #include "sde_hw_sspp.h"
+#include "sde_hw_catalog_format.h"
 #include "sde_trace.h"
 #include "sde_crtc.h"
 #include "sde_vbif.h"
@@ -54,6 +56,15 @@
 
 #define SDE_PLANE_COLOR_FILL_FLAG	BIT(31)
 
+/* multirect rect index */
+enum {
+	R0,
+	R1,
+	R_MAX
+};
+
+#define TX_MODE_BUFFER_LINE_THRES 2
+
 /* dirty bits for update function */
 #define SDE_PLANE_DIRTY_RECTS	0x1
 #define SDE_PLANE_DIRTY_FORMAT	0x2
@@ -103,6 +114,7 @@
 	uint32_t color_fill;
 	bool is_error;
 	bool is_rt_pipe;
+	bool is_virtual;
 
 	struct sde_hw_pixel_ext pixel_ext;
 	bool pixel_ext_usr;
@@ -586,7 +598,8 @@
 	else if (ret)
 		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
 	else if (psde->pipe_hw->ops.setup_sourceaddress)
-		psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg);
+		psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg,
+						pstate->multirect_index);
 }
 
 static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde,
@@ -1055,6 +1068,8 @@
 		uint32_t color, uint32_t alpha)
 {
 	const struct sde_format *fmt;
+	const struct drm_plane *plane;
+	const struct sde_plane_state *pstate;
 
 	if (!psde) {
 		SDE_ERROR("invalid plane\n");
@@ -1066,6 +1081,9 @@
 		return -EINVAL;
 	}
 
+	plane = &psde->base;
+	pstate = to_sde_plane_state(plane->state);
+
 	SDE_DEBUG_PLANE(psde, "\n");
 
 	/*
@@ -1077,7 +1095,8 @@
 	/* update sspp */
 	if (fmt && psde->pipe_hw->ops.setup_solidfill) {
 		psde->pipe_hw->ops.setup_solidfill(psde->pipe_hw,
-				(color & 0xFFFFFF) | ((alpha & 0xFF) << 24));
+				(color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+				pstate->multirect_index);
 
 		/* override scaler/decimation if solid fill */
 		psde->pipe_cfg.src_rect.x = 0;
@@ -1089,11 +1108,13 @@
 
 		if (psde->pipe_hw->ops.setup_format)
 			psde->pipe_hw->ops.setup_format(psde->pipe_hw,
-					fmt, SDE_SSPP_SOLID_FILL);
+					fmt, SDE_SSPP_SOLID_FILL,
+					pstate->multirect_index);
 
 		if (psde->pipe_hw->ops.setup_rects)
 			psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
 					&psde->pipe_cfg, &psde->pixel_ext,
+					pstate->multirect_index,
 					psde->scaler3_cfg);
 	}
 
@@ -1221,13 +1242,21 @@
 
 			psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
 					&psde->pipe_cfg, &psde->pixel_ext,
+					pstate->multirect_index,
 					psde->scaler3_cfg);
 		}
 
 		/* update excl rect */
 		if (psde->pipe_hw->ops.setup_excl_rect)
 			psde->pipe_hw->ops.setup_excl_rect(psde->pipe_hw,
-					&pstate->excl_rect);
+					&pstate->excl_rect,
+					pstate->multirect_index);
+
+		if (psde->pipe_hw->ops.setup_multirect)
+			psde->pipe_hw->ops.setup_multirect(
+					psde->pipe_hw,
+					pstate->multirect_index,
+					pstate->multirect_mode);
 	}
 
 	if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
@@ -1243,7 +1272,8 @@
 			src_flags |= SDE_SSPP_FLIP_UD;
 
 		/* update format */
-		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags);
+		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
+				pstate->multirect_index);
 
 		/* update csc */
 		if (SDE_FORMAT_IS_YUV(fmt))
@@ -1277,6 +1307,98 @@
 	/* clear dirty */
 	pstate->dirty = 0x0;
 
+	/* clear multirect mode*/
+	pstate->multirect_index = SDE_SSPP_RECT_SOLO;
+	pstate->multirect_mode = SDE_SSPP_MULTIRECT_NONE;
+	return 0;
+}
+
+int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
+{
+	struct sde_plane_state *pstate[R_MAX];
+	const struct drm_plane_state *drm_state[R_MAX];
+	struct sde_rect src[R_MAX], dst[R_MAX];
+	struct sde_plane *sde_plane[R_MAX];
+	const struct sde_format *fmt[R_MAX];
+	bool q16_data = true;
+	int i, max_sspp_linewidth;
+	int buffer_lines = TX_MODE_BUFFER_LINE_THRES;
+
+	for (i = 0; i < R_MAX; i++) {
+		const struct msm_format *msm_fmt;
+
+		drm_state[i] = i ? plane->r1 : plane->r0;
+		pstate[i] = to_sde_plane_state(drm_state[i]);
+		sde_plane[i] = to_sde_plane(drm_state[i]->plane);
+
+		if (pstate[i] == NULL) {
+			SDE_ERROR("SDE plane state of plane id %d is NULL\n",
+				drm_state[i]->plane->base.id);
+			return -EINVAL;
+		}
+
+		POPULATE_RECT(&src[i], drm_state[i]->src_x, drm_state[i]->src_y,
+			drm_state[i]->src_w, drm_state[i]->src_h, q16_data);
+		POPULATE_RECT(&dst[i], drm_state[i]->crtc_x,
+				drm_state[i]->crtc_y, drm_state[i]->crtc_w,
+				drm_state[i]->crtc_h, !q16_data);
+
+		if (src[i].w != dst[i].w || src[i].h != dst[i].h) {
+			SDE_ERROR_PLANE(sde_plane[i],
+				"scaling is not supported in multirect mode\n");
+			return -EINVAL;
+		}
+
+		msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+		fmt[i] = to_sde_format(msm_fmt);
+		if (SDE_FORMAT_IS_YUV(fmt[i])) {
+			SDE_ERROR_PLANE(sde_plane[i],
+				"Unsupported format for multirect mode\n");
+			return -EINVAL;
+		}
+	}
+
+	max_sspp_linewidth = sde_plane[R0]->pipe_sblk->maxlinewidth;
+
+	/* Validate RECT's and set the mode */
+
+	/* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+	if (src[R0].w <= max_sspp_linewidth/2 &&
+			src[R1].w <= max_sspp_linewidth/2) {
+		if (dst[R0].x <= dst[R1].x) {
+			pstate[R0]->multirect_index = SDE_SSPP_RECT_0;
+			pstate[R1]->multirect_index = SDE_SSPP_RECT_1;
+		} else {
+			pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
+			pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
+		}
+
+		pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
+		pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
+		goto done;
+	}
+
+	/* TIME_MX Mode */
+	if (SDE_FORMAT_IS_UBWC(fmt[R0]))
+		buffer_lines = 2 * fmt[R0]->tile_height;
+
+	if (dst[R1].y >= dst[R0].y + dst[R0].h + buffer_lines) {
+		pstate[R0]->multirect_index = SDE_SSPP_RECT_0;
+		pstate[R1]->multirect_index = SDE_SSPP_RECT_1;
+	} else if (dst[R0].y >= dst[R1].y + dst[R1].h + buffer_lines) {
+		pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
+		pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
+	} else {
+		SDE_ERROR(
+			"No multirect mode possible for the planes (%d - %d)\n",
+			drm_state[R0]->plane->base.id,
+			drm_state[R1]->plane->base.id);
+		return -EINVAL;
+	}
+
+	pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
+	pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
+done:
 	return 0;
 }
 
@@ -1343,6 +1465,10 @@
 		   pstate->excl_rect.y != old_pstate->excl_rect.y) {
 		SDE_DEBUG_PLANE(psde, "excl rect updated\n");
 		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+	} else if (pstate->multirect_index != old_pstate->multirect_index ||
+			pstate->multirect_mode != old_pstate->multirect_mode) {
+		SDE_DEBUG_PLANE(psde, "multirect config updated\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
 	}
 
 	if (!state->fb || !old_state->fb) {
@@ -1614,7 +1740,7 @@
 
 /* helper to install properties which are common to planes and crtcs */
 static void _sde_plane_install_properties(struct drm_plane *plane,
-	struct sde_mdss_cfg *catalog)
+	struct sde_mdss_cfg *catalog, u32 master_plane_id)
 {
 	static const struct drm_prop_enum_list e_blend_op[] = {
 		{SDE_DRM_BLEND_OP_NOT_DEFINED,    "not_defined"},
@@ -1666,62 +1792,71 @@
 	msm_property_install_range(&psde->property_info, "input_fence",
 		0x0, 0, INR_OPEN_MAX, 0, PLANE_PROP_INPUT_FENCE);
 
-	if (psde->pipe_sblk->maxhdeciexp) {
-		msm_property_install_range(&psde->property_info, "h_decimate",
-			0x0, 0, psde->pipe_sblk->maxhdeciexp, 0,
-			PLANE_PROP_H_DECIMATE);
-	}
+	if (!master_plane_id) {
+		if (psde->pipe_sblk->maxhdeciexp) {
+			msm_property_install_range(&psde->property_info,
+					"h_decimate", 0x0, 0,
+					psde->pipe_sblk->maxhdeciexp, 0,
+					PLANE_PROP_H_DECIMATE);
+		}
 
-	if (psde->pipe_sblk->maxvdeciexp) {
-		msm_property_install_range(&psde->property_info, "v_decimate",
-				0x0, 0, psde->pipe_sblk->maxvdeciexp, 0,
-				PLANE_PROP_V_DECIMATE);
-	}
+		if (psde->pipe_sblk->maxvdeciexp) {
+			msm_property_install_range(&psde->property_info,
+					"v_decimate", 0x0, 0,
+					psde->pipe_sblk->maxvdeciexp, 0,
+					PLANE_PROP_V_DECIMATE);
+		}
 
-	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
-		msm_property_install_volatile_range(&psde->property_info,
-			"scaler_v2", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
-		msm_property_install_blob(&psde->property_info, "lut_ed", 0,
-			PLANE_PROP_SCALER_LUT_ED);
-		msm_property_install_blob(&psde->property_info, "lut_cir", 0,
-			PLANE_PROP_SCALER_LUT_CIR);
-		msm_property_install_blob(&psde->property_info, "lut_sep", 0,
-			PLANE_PROP_SCALER_LUT_SEP);
-	} else if (psde->features & SDE_SSPP_SCALER) {
-		msm_property_install_volatile_range(&psde->property_info,
-			"scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1);
-	}
+		if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+			msm_property_install_volatile_range(
+					&psde->property_info, "scaler_v2",
+					0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
+			msm_property_install_blob(&psde->property_info,
+					"lut_ed", 0, PLANE_PROP_SCALER_LUT_ED);
+			msm_property_install_blob(&psde->property_info,
+					"lut_cir", 0,
+					PLANE_PROP_SCALER_LUT_CIR);
+			msm_property_install_blob(&psde->property_info,
+					"lut_sep", 0,
+					PLANE_PROP_SCALER_LUT_SEP);
+		} else if (psde->features & SDE_SSPP_SCALER) {
+			msm_property_install_volatile_range(
+					&psde->property_info, "scaler_v1", 0x0,
+					0, ~0, 0, PLANE_PROP_SCALER_V1);
+		}
 
-	if (psde->features & BIT(SDE_SSPP_CSC)) {
-		msm_property_install_volatile_range(&psde->property_info,
-			"csc_v1", 0x0, 0, ~0, 0, PLANE_PROP_CSC_V1);
-	}
+		if (psde->features & BIT(SDE_SSPP_CSC) ||
+		    psde->features & BIT(SDE_SSPP_CSC_10BIT))
+			msm_property_install_volatile_range(
+					&psde->property_info, "csc_v1", 0x0,
+					0, ~0, 0, PLANE_PROP_CSC_V1);
 
-	if (psde->features & BIT(SDE_SSPP_HSIC)) {
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_HUE_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_HUE_ADJUST);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_SATURATION_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_SATURATION_ADJUST);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_VALUE_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_VALUE_ADJUST);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_CONTRAST_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_CONTRAST_ADJUST);
+		if (psde->features & BIT(SDE_SSPP_HSIC)) {
+			snprintf(feature_name, sizeof(feature_name), "%s%d",
+				"SDE_SSPP_HUE_V",
+				psde->pipe_sblk->hsic_blk.version >> 16);
+			msm_property_install_range(&psde->property_info,
+				feature_name, 0, 0, 0xFFFFFFFF, 0,
+				PLANE_PROP_HUE_ADJUST);
+			snprintf(feature_name, sizeof(feature_name), "%s%d",
+				"SDE_SSPP_SATURATION_V",
+				psde->pipe_sblk->hsic_blk.version >> 16);
+			msm_property_install_range(&psde->property_info,
+				feature_name, 0, 0, 0xFFFFFFFF, 0,
+				PLANE_PROP_SATURATION_ADJUST);
+			snprintf(feature_name, sizeof(feature_name), "%s%d",
+				"SDE_SSPP_VALUE_V",
+				psde->pipe_sblk->hsic_blk.version >> 16);
+			msm_property_install_range(&psde->property_info,
+				feature_name, 0, 0, 0xFFFFFFFF, 0,
+				PLANE_PROP_VALUE_ADJUST);
+			snprintf(feature_name, sizeof(feature_name), "%s%d",
+				"SDE_SSPP_CONTRAST_V",
+				psde->pipe_sblk->hsic_blk.version >> 16);
+			msm_property_install_range(&psde->property_info,
+				feature_name, 0, 0, 0xFFFFFFFF, 0,
+				PLANE_PROP_CONTRAST_ADJUST);
+		}
 	}
 
 	if (psde->features & BIT(SDE_SSPP_EXCL_RECT))
@@ -1754,6 +1889,13 @@
 	sde_kms_info_reset(info);
 
 	format_list = psde->pipe_sblk->format_list;
+
+	if (master_plane_id) {
+		sde_kms_info_add_keyint(info, "primary_smart_plane_id",
+				master_plane_id);
+		format_list = plane_formats;
+	}
+
 	if (format_list) {
 		sde_kms_info_start(info, "pixel_formats");
 		while (format_list->fourcc_format) {
@@ -2248,6 +2390,11 @@
 	return plane ? to_sde_plane(plane)->pipe : SSPP_NONE;
 }
 
+bool is_sde_plane_virtual(struct drm_plane *plane)
+{
+	return plane ? to_sde_plane(plane)->is_virtual : false;
+}
+
 static ssize_t _sde_plane_danger_read(struct file *file,
 			char __user *buff, size_t count, loff_t *ppos)
 {
@@ -2411,9 +2558,10 @@
 /* initialize plane */
 struct drm_plane *sde_plane_init(struct drm_device *dev,
 		uint32_t pipe, bool primary_plane,
-		unsigned long possible_crtcs)
+		unsigned long possible_crtcs, u32 master_plane_id)
 {
 	struct drm_plane *plane = NULL;
+	const struct sde_format_extended *format_list;
 	struct sde_plane *psde;
 	struct msm_drm_private *priv;
 	struct sde_kms *kms;
@@ -2454,6 +2602,7 @@
 	plane = &psde->base;
 	psde->pipe = pipe;
 	psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+	psde->is_virtual = (master_plane_id != 0);
 
 	/* initialize underlying h/w driver */
 	psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
@@ -2485,11 +2634,15 @@
 		}
 	}
 
-	/* add plane to DRM framework */
-	psde->nformats = sde_populate_formats(psde->pipe_sblk->format_list,
-			psde->formats,
-			0,
-			ARRAY_SIZE(psde->formats));
+	format_list = psde->pipe_sblk->format_list;
+
+	if (master_plane_id)
+		format_list = plane_formats;
+
+	psde->nformats = sde_populate_formats(plane_formats,
+				psde->formats,
+				0,
+				ARRAY_SIZE(psde->formats));
 
 	if (!psde->nformats) {
 		SDE_ERROR("[%u]no valid formats for plane\n", pipe);
@@ -2516,7 +2669,7 @@
 			PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
 			sizeof(struct sde_plane_state));
 
-	_sde_plane_install_properties(plane, kms->catalog);
+	_sde_plane_install_properties(plane, kms->catalog, master_plane_id);
 
 	/* save user friendly pipe name for later */
 	snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index bbf6af6..9d7056e 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -33,6 +33,8 @@
  * @stage:	assigned by crtc blender
  * @excl_rect:	exclusion rect values
  * @dirty:	bitmask for which pipe h/w config functions need to be updated
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
  * @pending:	whether the current update is still pending
  */
 struct sde_plane_state {
@@ -43,9 +45,21 @@
 	enum sde_stage stage;
 	struct sde_rect excl_rect;
 	uint32_t dirty;
+	uint32_t multirect_index;
+	uint32_t multirect_mode;
 	bool pending;
 };
 
+/**
+ * struct sde_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct sde_multirect_plane_states {
+	const struct drm_plane_state *r0;
+	const struct drm_plane_state *r1;
+};
+
 #define to_sde_plane_state(x) \
 	container_of(x, struct sde_plane_state, base)
 
@@ -66,6 +80,14 @@
 enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
 
 /**
+ * is_sde_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ *          false - if the plane is primary
+ */
+bool is_sde_plane_virtual(struct drm_plane *plane);
+
+/**
  * sde_plane_flush - final plane operations before commit flush
  * @plane: Pointer to drm plane structure
  */
@@ -77,10 +99,22 @@
  * @pipe:  sde hardware pipe identifier
  * @primary_plane: true if this pipe is primary plane for crtc
  * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ *                   a regular plane initialization. A non-zero primary plane
+ *                   id will be passed for a virtual pipe initialization.
+ *
  */
 struct drm_plane *sde_plane_init(struct drm_device *dev,
 		uint32_t pipe, bool primary_plane,
-		unsigned long possible_crtcs);
+		unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * sde_plane_validate_multirecti_v2 - validate the multirect planes
+ *				      against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+
+int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane);
 
 /**
  * sde_plane_wait_input_fence - wait for input fence object
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 45cef3d..c8174f9 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1243,6 +1243,26 @@
 	  These devices are hard to detect and rarely found on mainstream
 	  hardware.  If unsure, say N.
 
+config SENSORS_QPNP_ADC_VOLTAGE
+	tristate "Support for Qualcomm Technologies Inc. QPNP Voltage ADC"
+	depends on SPMI
+	help
+	  This is the VADC arbiter driver for Qualcomm Technologies Inc. QPNP ADC Chip.
+
+	  The driver supports reading the HKADC, XOADC through the ADC AMUX arbiter.
+	  The VADC includes support for the conversion sequencer. The driver supports
+	  reading the ADC through the AMUX channels for external pull-ups simultaneously.
+
+config SENSORS_QPNP_ADC_CURRENT
+	tristate "Support for Qualcomm Technologies Inc. QPNP current ADC"
+	depends on SPMI
+	help
+	  This is the IADC driver for Qualcomm Technologies Inc. QPNP ADC Chip.
+
+	  The driver supports single mode operation to read from upto seven channel
+	  configuration that include reading the external/internal Rsense, CSP_EX,
+	  CSN_EX pair along with the gain and offset calibration.
+
 source drivers/hwmon/pmbus/Kconfig
 
 config SENSORS_PWM_FAN
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index aecf4ba..7b9a113 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -166,6 +166,8 @@
 obj-$(CONFIG_SENSORS_WM831X)	+= wm831x-hwmon.o
 obj-$(CONFIG_SENSORS_WM8350)	+= wm8350-hwmon.o
 obj-$(CONFIG_SENSORS_XGENE)	+= xgene-hwmon.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_VOLTAGE)	+= qpnp-adc-voltage.o qpnp-adc-common.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_CURRENT)	+= qpnp-adc-current.o qpnp-adc-common.o
 
 obj-$(CONFIG_PMBUS)		+= pmbus/
 
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
new file mode 100644
index 0000000..33b760f
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -0,0 +1,2101 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/qpnp/qpnp-adc.h>
+
+#define KELVINMIL_DEGMIL	273160
+#define QPNP_VADC_LDO_VOLTAGE_MIN	1800000
+#define QPNP_VADC_LDO_VOLTAGE_MAX	1800000
+#define QPNP_VADC_OK_VOLTAGE_MIN	1000000
+#define QPNP_VADC_OK_VOLTAGE_MAX	1000000
+#define PMI_CHG_SCALE_1		-138890
+#define PMI_CHG_SCALE_2		391750000000
+#define QPNP_VADC_HC_VREF_CODE		0x4000
+#define QPNP_VADC_HC_VDD_REFERENCE_MV	1875
+/* Clamp negative ADC code to 0 */
+#define QPNP_VADC_HC_MAX_CODE		0x7FFF
+
+/*
+ * Units for temperature below (on x axis) is in 0.1DegC as
+ * required by the battery driver. Note the resolution used
+ * here to compute the table was done for DegC to milli-volts.
+ * In consideration to limit the size of the table for the given
+ * temperature range below, the result is linearly interpolated
+ * and provided to the battery driver in the units desired for
+ * their framework which is 0.1DegC. True resolution of 0.1DegC
+ * will result in the below table size to increase by 10 times.
+ */
+static const struct qpnp_vadc_map_pt adcmap_btm_threshold[] = {
+	{-300,	1642},
+	{-200,	1544},
+	{-100,	1414},
+	{0,	1260},
+	{10,	1244},
+	{20,	1228},
+	{30,	1212},
+	{40,	1195},
+	{50,	1179},
+	{60,	1162},
+	{70,	1146},
+	{80,	1129},
+	{90,	1113},
+	{100,	1097},
+	{110,	1080},
+	{120,	1064},
+	{130,	1048},
+	{140,	1032},
+	{150,	1016},
+	{160,	1000},
+	{170,	985},
+	{180,	969},
+	{190,	954},
+	{200,	939},
+	{210,	924},
+	{220,	909},
+	{230,	894},
+	{240,	880},
+	{250,	866},
+	{260,	852},
+	{270,	838},
+	{280,	824},
+	{290,	811},
+	{300,	798},
+	{310,	785},
+	{320,	773},
+	{330,	760},
+	{340,	748},
+	{350,	736},
+	{360,	725},
+	{370,	713},
+	{380,	702},
+	{390,	691},
+	{400,	681},
+	{410,	670},
+	{420,	660},
+	{430,	650},
+	{440,	640},
+	{450,	631},
+	{460,	622},
+	{470,	613},
+	{480,	604},
+	{490,	595},
+	{500,	587},
+	{510,	579},
+	{520,	571},
+	{530,	563},
+	{540,	556},
+	{550,	548},
+	{560,	541},
+	{570,	534},
+	{580,	527},
+	{590,	521},
+	{600,	514},
+	{610,	508},
+	{620,	502},
+	{630,	496},
+	{640,	490},
+	{650,	485},
+	{660,	281},
+	{670,	274},
+	{680,	267},
+	{690,	260},
+	{700,	254},
+	{710,	247},
+	{720,	241},
+	{730,	235},
+	{740,	229},
+	{750,	224},
+	{760,	218},
+	{770,	213},
+	{780,	208},
+	{790,	203}
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_btm_threshold[] = {
+	{-200,	1540},
+	{-180,	1517},
+	{-160,	1492},
+	{-140,	1467},
+	{-120,	1440},
+	{-100,	1412},
+	{-80,	1383},
+	{-60,	1353},
+	{-40,	1323},
+	{-20,	1292},
+	{0,	1260},
+	{20,	1228},
+	{40,	1196},
+	{60,	1163},
+	{80,	1131},
+	{100,	1098},
+	{120,	1066},
+	{140,	1034},
+	{160,	1002},
+	{180,	971},
+	{200,	941},
+	{220,	911},
+	{240,	882},
+	{260,	854},
+	{280,	826},
+	{300,	800},
+	{320,	774},
+	{340,	749},
+	{360,	726},
+	{380,	703},
+	{400,	681},
+	{420,	660},
+	{440,	640},
+	{460,	621},
+	{480,	602},
+	{500,	585},
+	{520,	568},
+	{540,	552},
+	{560,	537},
+	{580,	523},
+	{600,	510},
+	{620,	497},
+	{640,	485},
+	{660,	473},
+	{680,	462},
+	{700,	452},
+	{720,	442},
+	{740,	433},
+	{760,	424},
+	{780,	416},
+	{800,	408},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skuaa_btm_threshold[] = {
+	{-200,	1476},
+	{-180,	1450},
+	{-160,	1422},
+	{-140,	1394},
+	{-120,	1365},
+	{-100,	1336},
+	{-80,	1306},
+	{-60,	1276},
+	{-40,	1246},
+	{-20,	1216},
+	{0,	1185},
+	{20,	1155},
+	{40,	1126},
+	{60,	1096},
+	{80,	1068},
+	{100,	1040},
+	{120,	1012},
+	{140,	986},
+	{160,	960},
+	{180,	935},
+	{200,	911},
+	{220,	888},
+	{240,	866},
+	{260,	844},
+	{280,	824},
+	{300,	805},
+	{320,	786},
+	{340,	769},
+	{360,	752},
+	{380,	737},
+	{400,	722},
+	{420,	707},
+	{440,	694},
+	{460,	681},
+	{480,	669},
+	{500,	658},
+	{520,	648},
+	{540,	637},
+	{560,	628},
+	{580,	619},
+	{600,	611},
+	{620,	603},
+	{640,	595},
+	{660,	588},
+	{680,	582},
+	{700,	575},
+	{720,	569},
+	{740,	564},
+	{760,	559},
+	{780,	554},
+	{800,	549},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skug_btm_threshold[] = {
+	{-200,	1338},
+	{-180,	1307},
+	{-160,	1276},
+	{-140,	1244},
+	{-120,	1213},
+	{-100,	1182},
+	{-80,	1151},
+	{-60,	1121},
+	{-40,	1092},
+	{-20,	1063},
+	{0,	1035},
+	{20,	1008},
+	{40,	982},
+	{60,	957},
+	{80,	933},
+	{100,	910},
+	{120,	889},
+	{140,	868},
+	{160,	848},
+	{180,	830},
+	{200,	812},
+	{220,	795},
+	{240,	780},
+	{260,	765},
+	{280,	751},
+	{300,	738},
+	{320,	726},
+	{340,	714},
+	{360,	704},
+	{380,	694},
+	{400,	684},
+	{420,	675},
+	{440,	667},
+	{460,	659},
+	{480,	652},
+	{500,	645},
+	{520,	639},
+	{540,	633},
+	{560,	627},
+	{580,	622},
+	{600,	617},
+	{620,	613},
+	{640,	608},
+	{660,	604},
+	{680,	600},
+	{700,	597},
+	{720,	593},
+	{740,	590},
+	{760,	587},
+	{780,	585},
+	{800,	582},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skuh_btm_threshold[] = {
+	{-200,	1531},
+	{-180,	1508},
+	{-160,	1483},
+	{-140,	1458},
+	{-120,	1432},
+	{-100,	1404},
+	{-80,	1377},
+	{-60,	1348},
+	{-40,	1319},
+	{-20,	1290},
+	{0,	1260},
+	{20,	1230},
+	{40,	1200},
+	{60,	1171},
+	{80,	1141},
+	{100,	1112},
+	{120,	1083},
+	{140,	1055},
+	{160,	1027},
+	{180,	1000},
+	{200,	973},
+	{220,	948},
+	{240,	923},
+	{260,	899},
+	{280,	876},
+	{300,	854},
+	{320,	832},
+	{340,	812},
+	{360,	792},
+	{380,	774},
+	{400,	756},
+	{420,	739},
+	{440,	723},
+	{460,	707},
+	{480,	692},
+	{500,	679},
+	{520,	665},
+	{540,	653},
+	{560,	641},
+	{580,	630},
+	{600,	619},
+	{620,	609},
+	{640,	600},
+	{660,	591},
+	{680,	583},
+	{700,	575},
+	{720,	567},
+	{740,	560},
+	{760,	553},
+	{780,	547},
+	{800,	541},
+	{820,	535},
+	{840,	530},
+	{860,	524},
+	{880,	520},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skut1_btm_threshold[] = {
+	{-400,	1759},
+	{-350,	1742},
+	{-300,	1720},
+	{-250,	1691},
+	{-200,	1654},
+	{-150,	1619},
+	{-100,	1556},
+	{-50,	1493},
+	{0,	1422},
+	{50,	1345},
+	{100,	1264},
+	{150,	1180},
+	{200,	1097},
+	{250,	1017},
+	{300,	942},
+	{350,	873},
+	{400,	810},
+	{450,	754},
+	{500,	706},
+	{550,	664},
+	{600,	627},
+	{650,	596},
+	{700,	570},
+	{750,	547},
+	{800,	528},
+	{850,	512},
+	{900,	499},
+	{950,	487},
+	{1000,	477},
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = {
+	{1758,	-40},
+	{1742,	-35},
+	{1719,	-30},
+	{1691,	-25},
+	{1654,	-20},
+	{1608,	-15},
+	{1551,	-10},
+	{1483,	-5},
+	{1404,	0},
+	{1315,	5},
+	{1218,	10},
+	{1114,	15},
+	{1007,	20},
+	{900,	25},
+	{795,	30},
+	{696,	35},
+	{605,	40},
+	{522,	45},
+	{448,	50},
+	{383,	55},
+	{327,	60},
+	{278,	65},
+	{237,	70},
+	{202,	75},
+	{172,	80},
+	{146,	85},
+	{125,	90},
+	{107,	95},
+	{92,	100},
+	{79,	105},
+	{68,	110},
+	{59,	115},
+	{51,	120},
+	{44,	125}
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_150k_104ef_104fb[] = {
+	{1738,	-40},
+	{1714,	-35},
+	{1682,	-30},
+	{1641,	-25},
+	{1589,	-20},
+	{1526,	-15},
+	{1451,	-10},
+	{1363,	-5},
+	{1266,	0},
+	{1159,	5},
+	{1048,	10},
+	{936,	15},
+	{825,	20},
+	{720,	25},
+	{622,	30},
+	{533,	35},
+	{454,	40},
+	{385,	45},
+	{326,	50},
+	{275,	55},
+	{232,	60},
+	{195,	65},
+	{165,	70},
+	{139,	75},
+	{118,	80},
+	{100,	85},
+	{85,	90},
+	{73,	95},
+	{62,	100},
+	{53,	105},
+	{46,	110},
+	{40,	115},
+	{34,	120},
+	{30,	125}
+};
+
+static const struct qpnp_vadc_map_pt adcmap_smb_batt_therm[] = {
+	{-300,	1625},
+	{-200,	1515},
+	{-100,	1368},
+	{0,	1192},
+	{10,	1173},
+	{20,	1154},
+	{30,	1135},
+	{40,	1116},
+	{50,	1097},
+	{60,	1078},
+	{70,	1059},
+	{80,	1040},
+	{90,	1020},
+	{100,	1001},
+	{110,	982},
+	{120,	963},
+	{130,	944},
+	{140,	925},
+	{150,	907},
+	{160,	888},
+	{170,	870},
+	{180,	851},
+	{190,	833},
+	{200,	815},
+	{210,	797},
+	{220,	780},
+	{230,	762},
+	{240,	745},
+	{250,	728},
+	{260,	711},
+	{270,	695},
+	{280,	679},
+	{290,	663},
+	{300,	647},
+	{310,	632},
+	{320,	616},
+	{330,	602},
+	{340,	587},
+	{350,	573},
+	{360,	559},
+	{370,	545},
+	{380,	531},
+	{390,	518},
+	{400,	505},
+	{410,	492},
+	{420,	480},
+	{430,	465},
+	{440,	456},
+	{450,	445},
+	{460,	433},
+	{470,	422},
+	{480,	412},
+	{490,	401},
+	{500,	391},
+	{510,	381},
+	{520,	371},
+	{530,	362},
+	{540,	352},
+	{550,	343},
+	{560,	335},
+	{570,	326},
+	{580,	318},
+	{590,	309},
+	{600,	302},
+	{610,	294},
+	{620,	286},
+	{630,	279},
+	{640,	272},
+	{650,	265},
+	{660,	258},
+	{670,	252},
+	{680,	245},
+	{690,	239},
+	{700,	233},
+	{710,	227},
+	{720,	221},
+	{730,	216},
+	{740,	211},
+	{750,	205},
+	{760,	200},
+	{770,	195},
+	{780,	190},
+	{790,	186}
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_ncp03wf683[] = {
+	{1742,	-40},
+	{1718,	-35},
+	{1687,	-30},
+	{1647,	-25},
+	{1596,	-20},
+	{1534,	-15},
+	{1459,	-10},
+	{1372,	-5},
+	{1275,	0},
+	{1169,	5},
+	{1058,	10},
+	{945,	15},
+	{834,	20},
+	{729,	25},
+	{630,	30},
+	{541,	35},
+	{461,	40},
+	{392,	45},
+	{332,	50},
+	{280,	55},
+	{236,	60},
+	{199,	65},
+	{169,	70},
+	{142,	75},
+	{121,	80},
+	{102,	85},
+	{87,	90},
+	{74,	95},
+	{64,	100},
+	{55,	105},
+	{47,	110},
+	{40,	115},
+	{35,	120},
+	{30,	125}
+};
+
+/*
+ * Voltage to temperature table for 100k pull up for NTCG104EF104 with
+ * 1.875V reference.
+ */
+static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
+	{ 1831,	-40 },
+	{ 1814,	-35 },
+	{ 1791,	-30 },
+	{ 1761,	-25 },
+	{ 1723,	-20 },
+	{ 1675,	-15 },
+	{ 1616,	-10 },
+	{ 1545,	-5 },
+	{ 1463,	0 },
+	{ 1370,	5 },
+	{ 1268,	10 },
+	{ 1160,	15 },
+	{ 1049,	20 },
+	{ 937,	25 },
+	{ 828,	30 },
+	{ 726,	35 },
+	{ 630,	40 },
+	{ 544,	45 },
+	{ 467,	50 },
+	{ 399,	55 },
+	{ 340,	60 },
+	{ 290,	65 },
+	{ 247,	70 },
+	{ 209,	75 },
+	{ 179,	80 },
+	{ 153,	85 },
+	{ 130,	90 },
+	{ 112,	95 },
+	{ 96,	100 },
+	{ 82,	105 },
+	{ 71,	110 },
+	{ 62,	115 },
+	{ 53,	120 },
+	{ 46,	125 },
+};
+
+static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts,
+		uint32_t tablesize, int32_t input, int64_t *output)
+{
+	bool descending = 1;
+	uint32_t i = 0;
+
+	if (pts == NULL)
+		return -EINVAL;
+
+	/* Check if table is descending or ascending */
+	if (tablesize > 1) {
+		if (pts[0].x < pts[1].x)
+			descending = 0;
+	}
+
+	while (i < tablesize) {
+		if ((descending == 1) && (pts[i].x < input)) {
+			/*
+			 * table entry is less than measured
+			 * value and table is descending, stop.
+			 */
+			break;
+		} else if ((descending == 0) &&
+				(pts[i].x > input)) {
+			/*
+			 * table entry is greater than measured
+			 * value and table is ascending, stop.
+			 */
+			break;
+		}
+		i++;
+	}
+
+	if (i == 0)
+		*output = pts[0].y;
+	else if (i == tablesize)
+		*output = pts[tablesize-1].y;
+	else {
+		/* result is between search_index and search_index-1 */
+		/* interpolate linearly */
+		*output = (((int32_t) ((pts[i].y - pts[i-1].y)*
+			(input - pts[i-1].x))/
+			(pts[i].x - pts[i-1].x))+
+			pts[i-1].y);
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_adc_map_temp_voltage(const struct qpnp_vadc_map_pt *pts,
+		uint32_t tablesize, int32_t input, int64_t *output)
+{
+	bool descending = 1;
+	uint32_t i = 0;
+
+	if (pts == NULL)
+		return -EINVAL;
+
+	/* Check if table is descending or ascending */
+	if (tablesize > 1) {
+		if (pts[0].y < pts[1].y)
+			descending = 0;
+	}
+
+	while (i < tablesize) {
+		if ((descending == 1) && (pts[i].y < input)) {
+			/* Table entry is less than measured value. */
+			/* Table is descending, stop. */
+			break;
+		} else if ((descending == 0) && (pts[i].y > input)) {
+			/* Table entry is greater than measured value. */
+			/* Table is ascending, stop. */
+			break;
+		}
+		i++;
+	}
+
+	if (i == 0) {
+		*output = pts[0].x;
+	} else if (i == tablesize) {
+		*output = pts[tablesize-1].x;
+	} else {
+		/* result is between search_index and search_index-1 */
+		/* interpolate linearly */
+		*output = (((int32_t) ((pts[i].x - pts[i-1].x)*
+			(input - pts[i-1].y))/
+			(pts[i].y - pts[i-1].y))+
+			pts[i-1].x);
+	}
+
+	return 0;
+}
+
+static void qpnp_adc_scale_with_calib_param(int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		int64_t *scale_voltage)
+{
+	*scale_voltage = (adc_code -
+		chan_properties->adc_graph[chan_properties->calib_type].adc_gnd)
+		* chan_properties->adc_graph[chan_properties->calib_type].dx;
+	*scale_voltage = div64_s64(*scale_voltage,
+		chan_properties->adc_graph[chan_properties->calib_type].dy);
+
+	if (chan_properties->calib_type == CALIB_ABSOLUTE)
+		*scale_voltage +=
+		chan_properties->adc_graph[chan_properties->calib_type].dx;
+
+	if (*scale_voltage < 0)
+		*scale_voltage = 0;
+}
+
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t pmic_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		pmic_voltage = (int64_t) adc_code;
+		pmic_voltage *= (int64_t) (adc_properties->adc_vdd_reference
+							* 1000);
+		pmic_voltage = div64_s64(pmic_voltage,
+					QPNP_VADC_HC_VREF_CODE);
+	} else {
+		if (!chan_properties->adc_graph[CALIB_ABSOLUTE].dy)
+			return -EINVAL;
+		qpnp_adc_scale_with_calib_param(adc_code, adc_properties,
+					chan_properties, &pmic_voltage);
+	}
+
+	if (pmic_voltage > 0) {
+		/* 2mV/K */
+		adc_chan_result->measurement = pmic_voltage*
+			chan_properties->offset_gain_denominator;
+
+		do_div(adc_chan_result->measurement,
+			chan_properties->offset_gain_numerator * 2);
+	} else
+		adc_chan_result->measurement = 0;
+
+	/* Change to .001 deg C */
+	adc_chan_result->measurement -= KELVINMIL_DEGMIL;
+	adc_chan_result->physical = (int32_t) adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_pmic_therm);
+
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0, sign = 0;
+
+	/* Convert to Kelvin and account for voltage to be written as 2mV/K */
+	low_output = (param->low_temp + KELVINMIL_DEGMIL) * 2;
+	/* Convert to Kelvin and account for voltage to be written as 2mV/K */
+	high_output = (param->high_temp + KELVINMIL_DEGMIL) * 2;
+
+	if (param->adc_tm_hc) {
+		low_output *= QPNP_VADC_HC_VREF_CODE;
+		do_div(low_output, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+		high_output *= QPNP_VADC_HC_VREF_CODE;
+		do_div(high_output, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+	} else {
+		rc = qpnp_get_vadc_gain_and_offset(chip, &btm_param,
+							CALIB_ABSOLUTE);
+		if (rc < 0) {
+		pr_err("Could not acquire gain and offset\n");
+		return rc;
+		}
+
+		/* Convert to voltage threshold */
+		low_output = (low_output - QPNP_ADC_625_UV) * btm_param.dy;
+		if (low_output < 0) {
+			sign = 1;
+			low_output = -low_output;
+		}
+		do_div(low_output, QPNP_ADC_625_UV);
+		if (sign)
+			low_output = -low_output;
+		low_output += btm_param.adc_gnd;
+
+		sign = 0;
+		/* Convert to voltage threshold */
+		high_output = (high_output - QPNP_ADC_625_UV) * btm_param.dy;
+		if (high_output < 0) {
+			sign = 1;
+			high_output = -high_output;
+		}
+		do_div(high_output, QPNP_ADC_625_UV);
+		if (sign)
+			high_output = -high_output;
+		high_output += btm_param.adc_gnd;
+	}
+
+	*low_threshold = (uint32_t) low_output;
+	*high_threshold = (uint32_t) high_output;
+
+	pr_debug("high_temp:%d, low_temp:%d\n", param->high_temp,
+				param->low_temp);
+	pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+				*low_threshold);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_millidegc_pmic_voltage_thr);
+
+/* Scales the ADC code to degC using the mapping
+ * table for the XO thermistor.
+ */
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t xo_thm_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		xo_thm_voltage = (int64_t) adc_code;
+		xo_thm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
+							* 1000);
+		xo_thm_voltage = div64_s64(xo_thm_voltage,
+					QPNP_VADC_HC_VREF_CODE * 1000);
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			xo_thm_voltage, &adc_chan_result->physical);
+	} else {
+		qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &xo_thm_voltage);
+
+		if (chan_properties->calib_type == CALIB_ABSOLUTE)
+			do_div(xo_thm_voltage, 1000);
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			xo_thm_voltage, &adc_chan_result->physical);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tdkntcg_therm);
+
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_btm_threshold,
+			ARRAY_SIZE(adcmap_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skuaa_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skuaa_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skuaa_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skug_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skug_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skug_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skug_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skuh_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skuh_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skuh_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skut1_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skut1_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skut1_batt_therm);
+
+int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_smb_batt_therm,
+			ARRAY_SIZE(adcmap_smb_batt_therm),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_smb_batt_therm);
+
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t therm_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &therm_voltage);
+
+	qpnp_adc_map_voltage_temp(adcmap_150k_104ef_104fb,
+		ARRAY_SIZE(adcmap_150k_104ef_104fb),
+		therm_voltage, &adc_chan_result->physical);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_pu1);
+
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t therm_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		therm_voltage = (int64_t) adc_code;
+		therm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
+							* 1000);
+		therm_voltage = div64_s64(therm_voltage,
+					(QPNP_VADC_HC_VREF_CODE * 1000));
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			therm_voltage, &adc_chan_result->physical);
+	} else {
+		qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &therm_voltage);
+
+		if (chan_properties->calib_type == CALIB_ABSOLUTE)
+			do_div(therm_voltage, 1000);
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			therm_voltage, &adc_chan_result->physical);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_pu2);
+
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *chip,
+		const struct qpnp_adc_properties *adc_properties,
+					uint32_t reg, int64_t *result)
+{
+	int64_t adc_voltage = 0;
+	struct qpnp_vadc_linear_graph param1;
+	int negative_offset = 0;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+		if (reg > QPNP_VADC_HC_MAX_CODE)
+			reg = 0;
+		adc_voltage = (int64_t) reg;
+		adc_voltage *= QPNP_VADC_HC_VDD_REFERENCE_MV;
+		adc_voltage = div64_s64(adc_voltage,
+					QPNP_VADC_HC_VREF_CODE);
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			adc_voltage, result);
+	} else {
+		qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
+
+		adc_voltage = (reg - param1.adc_gnd) * param1.adc_vref;
+		if (adc_voltage < 0) {
+			negative_offset = 1;
+			adc_voltage = -adc_voltage;
+		}
+
+		do_div(adc_voltage, param1.dy);
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			adc_voltage, result);
+		if (negative_offset)
+			adc_voltage = -adc_voltage;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_scale_voltage_therm_pu2);
+
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
+			const struct qpnp_adc_properties *adc_properties,
+				struct qpnp_adc_tm_config *param)
+{
+	struct qpnp_vadc_linear_graph param1;
+	int rc;
+
+	if (adc_properties->adc_hc) {
+		rc = qpnp_adc_map_temp_voltage(
+			adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			param->low_thr_temp, &param->low_thr_voltage);
+		if (rc)
+			return rc;
+		param->low_thr_voltage *= QPNP_VADC_HC_VREF_CODE;
+		do_div(param->low_thr_voltage, QPNP_VADC_HC_VDD_REFERENCE_MV);
+
+		rc = qpnp_adc_map_temp_voltage(
+			adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			param->high_thr_temp, &param->high_thr_voltage);
+		if (rc)
+			return rc;
+		param->high_thr_voltage *= QPNP_VADC_HC_VREF_CODE;
+		do_div(param->high_thr_voltage, QPNP_VADC_HC_VDD_REFERENCE_MV);
+	} else {
+		qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
+
+		rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			param->low_thr_temp, &param->low_thr_voltage);
+		if (rc)
+			return rc;
+
+		param->low_thr_voltage *= param1.dy;
+		do_div(param->low_thr_voltage, param1.adc_vref);
+		param->low_thr_voltage += param1.adc_gnd;
+
+		rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			param->high_thr_temp, &param->high_thr_voltage);
+		if (rc)
+			return rc;
+
+		param->high_thr_voltage *= param1.dy;
+		do_div(param->high_thr_voltage, param1.adc_vref);
+		param->high_thr_voltage += param1.adc_gnd;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_scale_therm_voltage_pu2);
+
+int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t therm_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &therm_voltage);
+
+	qpnp_adc_map_voltage_temp(adcmap_ncp03wf683,
+		ARRAY_SIZE(adcmap_ncp03wf683),
+		therm_voltage, &adc_chan_result->physical);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_ncp03);
+
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t batt_id_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &batt_id_voltage);
+
+	adc_chan_result->physical = batt_id_voltage;
+	adc_chan_result->physical = adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_batt_id);
+
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t scale_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		scale_voltage = (int64_t) adc_code;
+		scale_voltage *= (adc_properties->adc_vdd_reference * 1000);
+		scale_voltage = div64_s64(scale_voltage,
+						QPNP_VADC_HC_VREF_CODE);
+	} else {
+		qpnp_adc_scale_with_calib_param(adc_code, adc_properties,
+					chan_properties, &scale_voltage);
+		if (!chan_properties->calib_type == CALIB_ABSOLUTE)
+			scale_voltage *= 1000;
+	}
+
+
+	scale_voltage *= chan_properties->offset_gain_denominator;
+	scale_voltage = div64_s64(scale_voltage,
+				chan_properties->offset_gain_numerator);
+	adc_chan_result->measurement = scale_voltage;
+	/*
+	 * Note: adc_chan_result->measurement is in the unit of
+	 * adc_properties.adc_reference. For generic channel processing,
+	 * channel measurement is a scale/ratio relative to the adc
+	 * reference input
+	 */
+	adc_chan_result->physical = adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_default);
+
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph usb_param;
+
+	qpnp_get_vadc_gain_and_offset(chip, &usb_param, CALIB_RATIOMETRIC);
+
+	*low_threshold = param->low_thr * usb_param.dy;
+	do_div(*low_threshold, usb_param.adc_vref);
+	*low_threshold += usb_param.adc_gnd;
+
+	*high_threshold = param->high_thr * usb_param.dy;
+	do_div(*high_threshold, usb_param.adc_vref);
+	*high_threshold += usb_param.adc_gnd;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+				param->low_thr);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_usb_scaler);
+
+int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph vbatt_param;
+	int rc = 0, sign = 0;
+	int64_t low_thr = 0, high_thr = 0;
+
+	if (param->adc_tm_hc) {
+		low_thr = (param->low_thr/param->gain_den);
+		low_thr *= param->gain_num;
+		low_thr *= QPNP_VADC_HC_VREF_CODE;
+		do_div(low_thr, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+		*low_threshold = low_thr;
+
+		high_thr = (param->high_thr/param->gain_den);
+		high_thr *= param->gain_num;
+		high_thr *= QPNP_VADC_HC_VREF_CODE;
+		do_div(high_thr, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+		*high_threshold = high_thr;
+	} else {
+		rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param,
+							CALIB_ABSOLUTE);
+		if (rc < 0)
+			return rc;
+
+		low_thr = (((param->low_thr/param->gain_den) -
+				QPNP_ADC_625_UV) * vbatt_param.dy);
+		if (low_thr < 0) {
+			sign = 1;
+			low_thr = -low_thr;
+		}
+		low_thr = low_thr * param->gain_num;
+		do_div(low_thr, QPNP_ADC_625_UV);
+		if (sign)
+			low_thr = -low_thr;
+		*low_threshold = low_thr + vbatt_param.adc_gnd;
+
+		sign = 0;
+		high_thr = (((param->high_thr/param->gain_den) -
+				QPNP_ADC_625_UV) * vbatt_param.dy);
+		if (high_thr < 0) {
+			sign = 1;
+			high_thr = -high_thr;
+		}
+		high_thr = high_thr * param->gain_num;
+		do_div(high_thr, QPNP_ADC_625_UV);
+		if (sign)
+			high_thr = -high_thr;
+		*high_threshold = high_thr + vbatt_param.adc_gnd;
+	}
+
+	pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+				param->low_thr);
+	pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_absolute_rthr);
+
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	return qpnp_adc_absolute_rthr(chip, param, low_threshold,
+							high_threshold);
+}
+EXPORT_SYMBOL(qpnp_adc_vbatt_rscaler);
+
+int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *chip,
+		const struct qpnp_vadc_chan_properties *chan_prop,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph vbatt_param;
+	int rc = 0, sign = 0;
+	int64_t low_thr = 0, high_thr = 0;
+
+	if (!chan_prop || !chan_prop->offset_gain_numerator ||
+		!chan_prop->offset_gain_denominator)
+		return -EINVAL;
+
+	rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param, CALIB_ABSOLUTE);
+	if (rc < 0)
+		return rc;
+
+	low_thr = (((param->low_thr)/(int)chan_prop->offset_gain_denominator
+					- QPNP_ADC_625_UV) * vbatt_param.dy);
+	if (low_thr < 0) {
+		sign = 1;
+		low_thr = -low_thr;
+	}
+	low_thr = low_thr * chan_prop->offset_gain_numerator;
+	do_div(low_thr, QPNP_ADC_625_UV);
+	if (sign)
+		low_thr = -low_thr;
+	*low_threshold = low_thr + vbatt_param.adc_gnd;
+
+	sign = 0;
+	high_thr = (((param->high_thr)/(int)chan_prop->offset_gain_denominator
+					- QPNP_ADC_625_UV) * vbatt_param.dy);
+	if (high_thr < 0) {
+		sign = 1;
+		high_thr = -high_thr;
+	}
+	high_thr = high_thr * chan_prop->offset_gain_numerator;
+	do_div(high_thr, QPNP_ADC_625_UV);
+	if (sign)
+		high_thr = -high_thr;
+	*high_threshold = high_thr + vbatt_param.adc_gnd;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+				param->low_thr);
+	pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_absolute_rthr);
+
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_btm_threshold,
+		ARRAY_SIZE(adcmap_btm_threshold),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_btm_threshold,
+		ARRAY_SIZE(adcmap_btm_threshold),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_btm_scaler);
+
+int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skuh_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skuh_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_qrd_skuh_btm_scaler);
+
+int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skut1_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skut1_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_qrd_skut1_btm_scaler);
+
+int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_smb_batt_therm,
+		ARRAY_SIZE(adcmap_smb_batt_therm),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_smb_batt_therm,
+		ARRAY_SIZE(adcmap_smb_batt_therm),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_smb_btm_rscaler);
+
+int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int rc = 0;
+
+	rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+			chan_properties, adc_chan_result);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+						adc_chan_result->physical);
+	adc_chan_result->physical = (int64_t) ((PMI_CHG_SCALE_1) *
+					(adc_chan_result->physical * 2));
+	adc_chan_result->physical = (int64_t) (adc_chan_result->physical +
+							PMI_CHG_SCALE_2);
+	adc_chan_result->physical = (int64_t) adc_chan_result->physical;
+	adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+								1000000);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_pmi_chg_temp);
+
+int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc)
+{
+	int rc = 0;
+
+	if (adc->hkadc_ldo) {
+		rc = regulator_enable(adc->hkadc_ldo);
+		if (rc < 0) {
+			pr_err("Failed to enable hkadc ldo\n");
+			return rc;
+		}
+	}
+
+	if (adc->hkadc_ldo_ok) {
+		rc = regulator_enable(adc->hkadc_ldo_ok);
+		if (rc < 0) {
+			pr_err("Failed to enable hkadc ok signal\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_adc_enable_voltage);
+
+void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc)
+{
+	if (adc->hkadc_ldo)
+		regulator_disable(adc->hkadc_ldo);
+
+	if (adc->hkadc_ldo_ok)
+		regulator_disable(adc->hkadc_ldo_ok);
+
+}
+EXPORT_SYMBOL(qpnp_adc_disable_voltage);
+
+void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc)
+{
+	if (adc->hkadc_ldo)
+		regulator_put(adc->hkadc_ldo);
+
+	if (adc->hkadc_ldo_ok)
+		regulator_put(adc->hkadc_ldo_ok);
+}
+EXPORT_SYMBOL(qpnp_adc_free_voltage_resource);
+
+int qpnp_adc_get_revid_version(struct device *dev)
+{
+	struct pmic_revid_data *revid_data;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(dev->of_node,
+						"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_debug("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	revid_data = get_revid_data(revid_dev_node);
+	if (IS_ERR(revid_data)) {
+		pr_debug("revid error rc = %ld\n", PTR_ERR(revid_data));
+		return -EINVAL;
+	}
+
+	if (!revid_data)
+		return -EINVAL;
+
+	if ((revid_data->rev1 == PM8941_V3P1_REV1) &&
+		(revid_data->rev2 == PM8941_V3P1_REV2) &&
+		(revid_data->rev3 == PM8941_V3P1_REV3) &&
+		(revid_data->rev4 == PM8941_V3P1_REV4) &&
+		(revid_data->pmic_subtype == PM8941_SUBTYPE))
+		return QPNP_REV_ID_8941_3_1;
+	else if ((revid_data->rev1 == PM8941_V3P0_REV1) &&
+		(revid_data->rev2 == PM8941_V3P0_REV2) &&
+		(revid_data->rev3 == PM8941_V3P0_REV3) &&
+		(revid_data->rev4 == PM8941_V3P0_REV4) &&
+		(revid_data->pmic_subtype == PM8941_SUBTYPE))
+		return QPNP_REV_ID_8941_3_0;
+	else if ((revid_data->rev1 == PM8941_V2P0_REV1) &&
+		(revid_data->rev2 == PM8941_V2P0_REV2) &&
+		(revid_data->rev3 == PM8941_V2P0_REV3) &&
+		(revid_data->rev4 == PM8941_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8941_SUBTYPE))
+		return QPNP_REV_ID_8941_2_0;
+	else if ((revid_data->rev1 == PM8226_V2P2_REV1) &&
+		(revid_data->rev2 == PM8226_V2P2_REV2) &&
+		(revid_data->rev3 == PM8226_V2P2_REV3) &&
+		(revid_data->rev4 == PM8226_V2P2_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+		return QPNP_REV_ID_8026_2_2;
+	else if ((revid_data->rev1 == PM8226_V2P1_REV1) &&
+		(revid_data->rev2 == PM8226_V2P1_REV2) &&
+		(revid_data->rev3 == PM8226_V2P1_REV3) &&
+		(revid_data->rev4 == PM8226_V2P1_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+		return QPNP_REV_ID_8026_2_1;
+	else if ((revid_data->rev1 == PM8226_V2P0_REV1) &&
+		(revid_data->rev2 == PM8226_V2P0_REV2) &&
+		(revid_data->rev3 == PM8226_V2P0_REV3) &&
+		(revid_data->rev4 == PM8226_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+		return QPNP_REV_ID_8026_2_0;
+	else if ((revid_data->rev1 == PM8226_V1P0_REV1) &&
+		(revid_data->rev2 == PM8226_V1P0_REV2) &&
+		(revid_data->rev3 == PM8226_V1P0_REV3) &&
+		(revid_data->rev4 == PM8226_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+		return QPNP_REV_ID_8026_1_0;
+	else if ((revid_data->rev1 == PM8110_V1P0_REV1) &&
+		(revid_data->rev2 == PM8110_V1P0_REV2) &&
+		(revid_data->rev3 == PM8110_V1P0_REV3) &&
+		(revid_data->rev4 == PM8110_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8110_SUBTYPE))
+		return QPNP_REV_ID_8110_1_0;
+	else if ((revid_data->rev1 == PM8110_V2P0_REV1) &&
+		(revid_data->rev2 == PM8110_V2P0_REV2) &&
+		(revid_data->rev3 == PM8110_V2P0_REV3) &&
+		(revid_data->rev4 == PM8110_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8110_SUBTYPE))
+		return QPNP_REV_ID_8110_2_0;
+	else if ((revid_data->rev1 == PM8916_V1P0_REV1) &&
+		(revid_data->rev2 == PM8916_V1P0_REV2) &&
+		(revid_data->rev3 == PM8916_V1P0_REV3) &&
+		(revid_data->rev4 == PM8916_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8916_SUBTYPE))
+		return QPNP_REV_ID_8916_1_0;
+	else if ((revid_data->rev1 == PM8916_V1P1_REV1) &&
+		(revid_data->rev2 == PM8916_V1P1_REV2) &&
+		(revid_data->rev3 == PM8916_V1P1_REV3) &&
+		(revid_data->rev4 == PM8916_V1P1_REV4) &&
+		(revid_data->pmic_subtype == PM8916_SUBTYPE))
+		return QPNP_REV_ID_8916_1_1;
+	else if ((revid_data->rev1 == PM8916_V2P0_REV1) &&
+		(revid_data->rev2 == PM8916_V2P0_REV2) &&
+		(revid_data->rev3 == PM8916_V2P0_REV3) &&
+		(revid_data->rev4 == PM8916_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8916_SUBTYPE))
+		return QPNP_REV_ID_8916_2_0;
+	else if ((revid_data->rev1 == PM8909_V1P0_REV1) &&
+		(revid_data->rev2 == PM8909_V1P0_REV2) &&
+		(revid_data->rev3 == PM8909_V1P0_REV3) &&
+		(revid_data->rev4 == PM8909_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8909_SUBTYPE))
+		return QPNP_REV_ID_8909_1_0;
+	else if ((revid_data->rev1 == PM8909_V1P1_REV1) &&
+		(revid_data->rev2 == PM8909_V1P1_REV2) &&
+		(revid_data->rev3 == PM8909_V1P1_REV3) &&
+		(revid_data->rev4 == PM8909_V1P1_REV4) &&
+		(revid_data->pmic_subtype == PM8909_SUBTYPE))
+		return QPNP_REV_ID_8909_1_1;
+	else if ((revid_data->rev4 == PM8950_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8950_SUBTYPE))
+		return QPNP_REV_ID_PM8950_1_0;
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_adc_get_revid_version);
+
+int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
+			struct qpnp_adc_drv *adc_qpnp)
+{
+	struct device_node *node = pdev->dev.of_node;
+	unsigned int base;
+	struct device_node *child;
+	struct qpnp_adc_amux *adc_channel_list;
+	struct qpnp_adc_properties *adc_prop;
+	struct qpnp_adc_amux_properties *amux_prop;
+	int count_adc_channel_list = 0, decimation = 0, rc = 0, i = 0;
+	int decimation_tm_hc = 0, fast_avg_setup_tm_hc = 0, cal_val_hc = 0;
+	bool adc_hc;
+
+	if (!node)
+		return -EINVAL;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	adc_qpnp->pdev = pdev;
+
+	adc_prop = devm_kzalloc(&pdev->dev,
+				sizeof(struct qpnp_adc_properties),
+					GFP_KERNEL);
+	if (!adc_prop)
+		return -ENOMEM;
+
+	adc_channel_list = devm_kzalloc(&pdev->dev,
+		((sizeof(struct qpnp_adc_amux)) * count_adc_channel_list),
+				GFP_KERNEL);
+	if (!adc_channel_list)
+		return -ENOMEM;
+
+	amux_prop = devm_kzalloc(&pdev->dev,
+		sizeof(struct qpnp_adc_amux_properties) +
+		sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL);
+	if (!amux_prop) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_qpnp->adc_channels = adc_channel_list;
+	adc_qpnp->amux_prop = amux_prop;
+	adc_hc = adc_qpnp->adc_hc;
+	adc_prop->adc_hc = adc_hc;
+
+	if (of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+		rc = of_property_read_u32(node, "qcom,decimation",
+						&decimation_tm_hc);
+		if (rc) {
+			pr_err("Invalid decimation property\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(node,
+			"qcom,fast-avg-setup", &fast_avg_setup_tm_hc);
+		if (rc) {
+			pr_err("Invalid fast average setup with %d\n", rc);
+			return -EINVAL;
+		}
+
+		if ((fast_avg_setup_tm_hc) > ADC_FAST_AVG_SAMPLE_16) {
+			pr_err("Max average support is 2^16\n");
+			return -EINVAL;
+		}
+	}
+
+	for_each_child_of_node(node, child) {
+		int channel_num, scaling = 0, post_scaling = 0;
+		int fast_avg_setup, calib_type = 0, rc, hw_settle_time = 0;
+		const char *calibration_param, *channel_name;
+
+		channel_name = of_get_property(child,
+				"label", NULL) ? : child->name;
+		if (!channel_name) {
+			pr_err("Invalid channel name\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child, "reg", &channel_num);
+		if (rc) {
+			pr_err("Invalid channel num\n");
+			return -EINVAL;
+		}
+
+		if (!of_device_is_compatible(node, "qcom,qpnp-iadc")) {
+			rc = of_property_read_u32(child,
+				"qcom,hw-settle-time", &hw_settle_time);
+			if (rc) {
+				pr_err("Invalid channel hw settle time property\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_u32(child,
+				"qcom,pre-div-channel-scaling", &scaling);
+			if (rc) {
+				pr_err("Invalid channel scaling property\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_u32(child,
+				"qcom,scale-function", &post_scaling);
+			if (rc) {
+				pr_err("Invalid channel post scaling property\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_string(child,
+				"qcom,calibration-type", &calibration_param);
+			if (rc) {
+				pr_err("Invalid calibration type\n");
+				return -EINVAL;
+			}
+
+			if (!strcmp(calibration_param, "absolute")) {
+				if (adc_hc)
+					calib_type = ADC_HC_ABS_CAL;
+				else
+					calib_type = CALIB_ABSOLUTE;
+			} else if (!strcmp(calibration_param, "ratiometric")) {
+				if (adc_hc)
+					calib_type = ADC_HC_RATIO_CAL;
+				else
+					calib_type = CALIB_RATIOMETRIC;
+			} else if (!strcmp(calibration_param, "no_cal")) {
+				if (adc_hc)
+					calib_type = ADC_HC_NO_CAL;
+				else {
+					pr_err("%s: Invalid calibration property\n",
+						__func__);
+					return -EINVAL;
+				}
+			} else {
+				pr_err("%s: Invalid calibration property\n",
+						__func__);
+				return -EINVAL;
+			}
+		}
+
+		/* ADC_TM_HC fast avg setting is common across channels */
+		if (!of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+			rc = of_property_read_u32(child,
+				"qcom,fast-avg-setup", &fast_avg_setup);
+			if (rc) {
+				pr_err("Invalid channel fast average setup\n");
+				return -EINVAL;
+			}
+		} else {
+			fast_avg_setup = fast_avg_setup_tm_hc;
+		}
+
+		/* ADC_TM_HC decimation setting is common across channels */
+		if (!of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+			rc = of_property_read_u32(child,
+				"qcom,decimation", &decimation);
+			if (rc) {
+				pr_err("Invalid decimation\n");
+				return -EINVAL;
+			}
+		} else {
+			decimation = decimation_tm_hc;
+		}
+
+		if (of_device_is_compatible(node, "qcom,qpnp-vadc-hc")) {
+			rc = of_property_read_u32(child, "qcom,cal-val",
+							&cal_val_hc);
+			if (rc) {
+				pr_debug("Use calibration value from timer\n");
+				adc_channel_list[i].cal_val = ADC_TIMER_CAL;
+			} else {
+				adc_channel_list[i].cal_val = cal_val_hc;
+			}
+		}
+
+		/* Individual channel properties */
+		adc_channel_list[i].name = (char *)channel_name;
+		adc_channel_list[i].channel_num = channel_num;
+		adc_channel_list[i].adc_decimation = decimation;
+		adc_channel_list[i].fast_avg_setup = fast_avg_setup;
+		if (!of_device_is_compatible(node, "qcom,qpnp-iadc")) {
+			adc_channel_list[i].chan_path_prescaling = scaling;
+			adc_channel_list[i].adc_scale_fn = post_scaling;
+			adc_channel_list[i].hw_settle_time = hw_settle_time;
+			adc_channel_list[i].calib_type = calib_type;
+		}
+		i++;
+	}
+
+	/* Get the ADC VDD reference voltage and ADC bit resolution */
+	rc = of_property_read_u32(node, "qcom,adc-vdd-reference",
+			&adc_prop->adc_vdd_reference);
+	if (rc) {
+		pr_err("Invalid adc vdd reference property\n");
+		return -EINVAL;
+	}
+	rc = of_property_read_u32(node, "qcom,adc-bit-resolution",
+			&adc_prop->bitresolution);
+	if (rc) {
+		pr_err("Invalid adc bit resolution property\n");
+		return -EINVAL;
+	}
+	adc_qpnp->adc_prop = adc_prop;
+
+	/* Get the peripheral address */
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+
+	adc_qpnp->slave = to_spmi_device(pdev->dev.parent)->usid;
+	adc_qpnp->offset = base;
+
+	/* Register the ADC peripheral interrupt */
+	adc_qpnp->adc_irq_eoc = platform_get_irq_byname(pdev,
+							"eoc-int-en-set");
+	if (adc_qpnp->adc_irq_eoc < 0) {
+		pr_err("Invalid irq\n");
+		return -ENXIO;
+	}
+
+	init_completion(&adc_qpnp->adc_rslt_completion);
+
+	if (of_get_property(node, "hkadc_ldo-supply", NULL)) {
+		adc_qpnp->hkadc_ldo = regulator_get(&pdev->dev, "hkadc_ldo");
+		if (IS_ERR(adc_qpnp->hkadc_ldo)) {
+			pr_err("hkadc_ldo-supply node not found\n");
+			return -EINVAL;
+		}
+
+		rc = regulator_set_voltage(adc_qpnp->hkadc_ldo,
+				QPNP_VADC_LDO_VOLTAGE_MIN,
+				QPNP_VADC_LDO_VOLTAGE_MAX);
+		if (rc < 0) {
+			pr_err("setting voltage for hkadc_ldo failed\n");
+			return rc;
+		}
+
+		rc = regulator_set_load(adc_qpnp->hkadc_ldo, 100000);
+		if (rc < 0) {
+			pr_err("hkadc_ldo optimum mode failed%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (of_get_property(node, "hkadc_ok-supply", NULL)) {
+		adc_qpnp->hkadc_ldo_ok = regulator_get(&pdev->dev,
+				"hkadc_ok");
+		if (IS_ERR(adc_qpnp->hkadc_ldo_ok)) {
+			pr_err("hkadc_ok node not found\n");
+			return -EINVAL;
+		}
+
+		rc = regulator_set_voltage(adc_qpnp->hkadc_ldo_ok,
+				QPNP_VADC_OK_VOLTAGE_MIN,
+				QPNP_VADC_OK_VOLTAGE_MAX);
+		if (rc < 0) {
+			pr_err("setting voltage for hkadc-ldo-ok failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_get_devicetree_data);
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
new file mode 100644
index 0000000..a597ef9
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -0,0 +1,1658 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#ifdef CONFIG_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+/* QPNP IADC register definition */
+#define QPNP_IADC_REVISION1				0x0
+#define QPNP_IADC_REVISION2				0x1
+#define QPNP_IADC_REVISION3				0x2
+#define QPNP_IADC_REVISION4				0x3
+#define QPNP_IADC_PERPH_TYPE				0x4
+#define QPNP_IADC_PERH_SUBTYPE				0x5
+
+#define QPNP_IADC_SUPPORTED_REVISION2			1
+
+#define QPNP_STATUS1					0x8
+#define QPNP_STATUS1_OP_MODE				4
+#define QPNP_STATUS1_MULTI_MEAS_EN			BIT(3)
+#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS		BIT(2)
+#define QPNP_STATUS1_REQ_STS				BIT(1)
+#define QPNP_STATUS1_EOC				BIT(0)
+#define QPNP_STATUS1_REQ_STS_EOC_MASK			0x3
+#define QPNP_STATUS2					0x9
+#define QPNP_STATUS2_CONV_SEQ_STATE_SHIFT		4
+#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG		BIT(1)
+#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS		BIT(0)
+#define QPNP_CONV_TIMEOUT_ERR				2
+
+#define QPNP_IADC_MODE_CTL				0x40
+#define QPNP_OP_MODE_SHIFT				4
+#define QPNP_USE_BMS_DATA				BIT(4)
+#define QPNP_VADC_SYNCH_EN				BIT(2)
+#define QPNP_OFFSET_RMV_EN				BIT(1)
+#define QPNP_ADC_TRIM_EN				BIT(0)
+#define QPNP_IADC_EN_CTL1				0x46
+#define QPNP_IADC_ADC_EN				BIT(7)
+#define QPNP_ADC_CH_SEL_CTL				0x48
+#define QPNP_ADC_DIG_PARAM				0x50
+#define QPNP_ADC_CLK_SEL_MASK				0x3
+#define QPNP_ADC_DEC_RATIO_SEL_MASK			0xc
+#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT		2
+
+#define QPNP_CONV_REQ					0x52
+#define QPNP_CONV_REQ_SET				BIT(7)
+#define QPNP_CONV_SEQ_CTL				0x54
+#define QPNP_CONV_SEQ_HOLDOFF_SHIFT			4
+#define QPNP_CONV_SEQ_TRIG_CTL				0x55
+#define QPNP_FAST_AVG_CTL				0x5a
+
+#define QPNP_M0_LOW_THR_LSB				0x5c
+#define QPNP_M0_LOW_THR_MSB				0x5d
+#define QPNP_M0_HIGH_THR_LSB				0x5e
+#define QPNP_M0_HIGH_THR_MSB				0x5f
+#define QPNP_M1_LOW_THR_LSB				0x69
+#define QPNP_M1_LOW_THR_MSB				0x6a
+#define QPNP_M1_HIGH_THR_LSB				0x6b
+#define QPNP_M1_HIGH_THR_MSB				0x6c
+
+#define QPNP_DATA0					0x60
+#define QPNP_DATA1					0x61
+#define QPNP_CONV_TIMEOUT_ERR				2
+
+#define QPNP_IADC_SEC_ACCESS				0xD0
+#define QPNP_IADC_SEC_ACCESS_DATA			0xA5
+#define QPNP_IADC_MSB_OFFSET				0xF2
+#define QPNP_IADC_LSB_OFFSET				0xF3
+#define QPNP_IADC_NOMINAL_RSENSE			0xF4
+#define QPNP_IADC_ATE_GAIN_CALIB_OFFSET			0xF5
+#define QPNP_INT_TEST_VAL				0xE1
+
+#define QPNP_IADC_ADC_CH_SEL_CTL			0x48
+#define QPNP_IADC_ADC_CHX_SEL_SHIFT			3
+
+#define QPNP_IADC_ADC_DIG_PARAM				0x50
+#define QPNP_IADC_CLK_SEL_SHIFT				1
+#define QPNP_IADC_DEC_RATIO_SEL				3
+
+#define QPNP_IADC_CONV_REQUEST				0x52
+#define QPNP_IADC_CONV_REQ				BIT(7)
+
+#define QPNP_IADC_DATA0					0x60
+#define QPNP_IADC_DATA1					0x61
+
+#define QPNP_ADC_CONV_TIME_MIN				2000
+#define QPNP_ADC_CONV_TIME_MAX				2100
+#define QPNP_ADC_ERR_COUNT				20
+
+#define QPNP_ADC_GAIN_NV				17857
+#define QPNP_OFFSET_CALIBRATION_SHORT_CADC_LEADS_IDEAL	0
+#define QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR		10000000
+#define QPNP_IADC_NANO_VOLTS_FACTOR			1000000
+#define QPNP_IADC_CALIB_SECONDS				300000
+#define QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT		15625
+#define QPNP_IADC_DIE_TEMP_CALIB_OFFSET			5000
+
+#define QPNP_RAW_CODE_16_BIT_MSB_MASK			0xff00
+#define QPNP_RAW_CODE_16_BIT_LSB_MASK			0xff
+#define QPNP_BIT_SHIFT_8				8
+#define QPNP_RSENSE_MSB_SIGN_CHECK			0x80
+#define QPNP_ADC_COMPLETION_TIMEOUT			HZ
+#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK			0x7
+#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_0		0
+#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2		2
+#define QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST	127
+#define QPNP_IADC_RSENSE_DEFAULT_VALUE			7800000
+#define QPNP_IADC_RSENSE_DEFAULT_TYPEB_GF		9000000
+#define QPNP_IADC_RSENSE_DEFAULT_TYPEB_SMIC		9700000
+
+struct qpnp_iadc_comp {
+	bool	ext_rsense;
+	u8	id;
+	u8	sys_gain;
+	u8	revision_dig_major;
+	u8	revision_ana_minor;
+};
+
+struct qpnp_iadc_chip {
+	struct device				*dev;
+	struct qpnp_adc_drv			*adc;
+	int32_t					rsense;
+	bool					external_rsense;
+	bool					default_internal_rsense;
+	struct device				*iadc_hwmon;
+	struct list_head			list;
+	int64_t					die_temp;
+	struct delayed_work			iadc_work;
+	bool					iadc_mode_sel;
+	struct qpnp_iadc_comp			iadc_comp;
+	struct qpnp_vadc_chip			*vadc_dev;
+	struct work_struct			trigger_completion_work;
+	bool					skip_auto_calibrations;
+	bool					iadc_poll_eoc;
+	u16					batt_id_trim_cnst_rds;
+	int					rds_trim_default_type;
+	int					max_channels_available;
+	bool					rds_trim_default_check;
+	int32_t					rsense_workaround_value;
+	struct sensor_device_attribute		sens_attr[0];
+};
+
+LIST_HEAD(qpnp_iadc_device_list);
+
+enum qpnp_iadc_rsense_rds_workaround {
+	QPNP_IADC_RDS_DEFAULT_TYPEA,
+	QPNP_IADC_RDS_DEFAULT_TYPEB,
+	QPNP_IADC_RDS_DEFAULT_TYPEC,
+};
+
+static int32_t qpnp_iadc_read_reg(struct qpnp_iadc_chip *iadc,
+						uint32_t reg, u8 *data)
+{
+	int rc;
+	uint val;
+
+	rc = regmap_read(iadc->adc->regmap, (iadc->adc->offset + reg), &val);
+	if (rc < 0) {
+		pr_err("qpnp iadc read reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+	*data = (u8)val;
+
+	return 0;
+}
+
+static int32_t qpnp_iadc_write_reg(struct qpnp_iadc_chip *iadc,
+						uint32_t reg, u8 data)
+{
+	int rc;
+	u8 *buf;
+
+	buf = &data;
+	rc = regmap_write(iadc->adc->regmap, (iadc->adc->offset + reg), *buf);
+	if (rc < 0) {
+		pr_err("qpnp iadc write reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_iadc_is_valid(struct qpnp_iadc_chip *iadc)
+{
+	struct qpnp_iadc_chip *iadc_chip = NULL;
+
+	list_for_each_entry(iadc_chip, &qpnp_iadc_device_list, list)
+		if (iadc == iadc_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static void qpnp_iadc_trigger_completion(struct work_struct *work)
+{
+	struct qpnp_iadc_chip *iadc = container_of(work,
+			struct qpnp_iadc_chip, trigger_completion_work);
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
+		return;
+
+	complete(&iadc->adc->adc_rslt_completion);
+}
+
+static irqreturn_t qpnp_iadc_isr(int irq, void *dev_id)
+{
+	struct qpnp_iadc_chip *iadc = dev_id;
+
+	schedule_work(&iadc->trigger_completion_work);
+
+	return IRQ_HANDLED;
+}
+
+static int32_t qpnp_iadc_enable(struct qpnp_iadc_chip *dev, bool state)
+{
+	int rc = 0;
+	u8 data = 0;
+
+	data = QPNP_IADC_ADC_EN;
+	if (state) {
+		rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
+					data);
+		if (rc < 0) {
+			pr_err("IADC enable failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
+					(~data & QPNP_IADC_ADC_EN));
+		if (rc < 0) {
+			pr_err("IADC disable failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_iadc_status_debug(struct qpnp_iadc_chip *dev)
+{
+	int rc = 0;
+	u8 mode = 0, status1 = 0, chan = 0, dig = 0, en = 0;
+
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_MODE_CTL, &mode);
+	if (rc < 0) {
+		pr_err("mode ctl register read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(dev, QPNP_ADC_DIG_PARAM, &dig);
+	if (rc < 0) {
+		pr_err("digital param read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_ADC_CH_SEL_CTL, &chan);
+	if (rc < 0) {
+		pr_err("channel read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(dev, QPNP_STATUS1, &status1);
+	if (rc < 0) {
+		pr_err("status1 read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(dev, QPNP_IADC_EN_CTL1, &en);
+	if (rc < 0) {
+		pr_err("en read failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("EOC not set with status:%x, dig:%x, ch:%x, mode:%x, en:%x\n",
+			status1, dig, chan, mode, en);
+
+	rc = qpnp_iadc_enable(dev, false);
+	if (rc < 0) {
+		pr_err("IADC disable failed with %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_iadc_read_conversion_result(struct qpnp_iadc_chip *iadc,
+								int16_t *data)
+{
+	uint8_t rslt_lsb, rslt_msb;
+	uint16_t rslt;
+	int32_t rc;
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA0, &rslt_lsb);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA1, &rslt_msb);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		return rc;
+	}
+
+	rslt = (rslt_msb << 8) | rslt_lsb;
+	*data = rslt;
+
+	rc = qpnp_iadc_enable(iadc, false);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+#define QPNP_IADC_PM8026_2_REV2	4
+#define QPNP_IADC_PM8026_2_REV3	2
+
+#define QPNP_COEFF_1					969000
+#define QPNP_COEFF_2					32
+#define QPNP_COEFF_3_TYPEA				1700000
+#define QPNP_COEFF_3_TYPEB				1000000
+#define QPNP_COEFF_4					100
+#define QPNP_COEFF_5					15
+#define QPNP_COEFF_6					100000
+#define QPNP_COEFF_7					21
+#define QPNP_COEFF_8					100000000
+#define QPNP_COEFF_9					38
+#define QPNP_COEFF_10					40
+#define QPNP_COEFF_11					7
+#define QPNP_COEFF_12					11
+#define QPNP_COEFF_13					37
+#define QPNP_COEFF_14					39
+#define QPNP_COEFF_15					9
+#define QPNP_COEFF_16					11
+#define QPNP_COEFF_17					851200
+#define QPNP_COEFF_18					296500
+#define QPNP_COEFF_19					222400
+#define QPNP_COEFF_20					813800
+#define QPNP_COEFF_21					1059100
+#define QPNP_COEFF_22					5000000
+#define QPNP_COEFF_23					3722500
+#define QPNP_COEFF_24					84
+#define QPNP_COEFF_25					33
+#define QPNP_COEFF_26					22
+#define QPNP_COEFF_27					53
+#define QPNP_COEFF_28					48
+
+static int32_t qpnp_iadc_comp(int64_t *result, struct qpnp_iadc_chip *iadc,
+							int64_t die_temp)
+{
+	int64_t temp_var = 0, sys_gain_coeff = 0, old;
+	int32_t coeff_a = 0, coeff_b = 0;
+	int version = 0;
+
+	version = qpnp_adc_get_revid_version(iadc->dev);
+	if (version == -EINVAL)
+		return 0;
+
+	old = *result;
+	*result = *result * 1000000;
+
+	if (iadc->iadc_comp.sys_gain > 127)
+		sys_gain_coeff = -QPNP_COEFF_6 *
+				(iadc->iadc_comp.sys_gain - 128);
+	else
+		sys_gain_coeff = QPNP_COEFF_6 *
+				iadc->iadc_comp.sys_gain;
+
+	switch (version) {
+	case QPNP_REV_ID_8941_3_1:
+		switch (iadc->iadc_comp.id) {
+		case COMP_ID_GF:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				coeff_a = QPNP_COEFF_2;
+				coeff_b = -QPNP_COEFF_3_TYPEA;
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = QPNP_COEFF_5;
+					coeff_b = QPNP_COEFF_6;
+				} else {
+					/* discharge */
+					coeff_a = -QPNP_COEFF_7;
+					coeff_b = QPNP_COEFF_6;
+				}
+			}
+			break;
+		case COMP_ID_TSMC:
+		default:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				coeff_a = QPNP_COEFF_2;
+				coeff_b = -QPNP_COEFF_3_TYPEB;
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = QPNP_COEFF_5;
+					coeff_b = QPNP_COEFF_6;
+				} else {
+					/* discharge */
+					coeff_a = -QPNP_COEFF_7;
+					coeff_b = QPNP_COEFF_6;
+				}
+			}
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_1:
+	case QPNP_REV_ID_8026_2_2:
+		/* pm8026 rev 2.1 and 2.2 */
+		switch (iadc->iadc_comp.id) {
+		case COMP_ID_GF:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_25;
+					coeff_b = 0;
+				}
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					/* discharge */
+					coeff_a = 0;
+					coeff_b = 0;
+				}
+			}
+			break;
+		case COMP_ID_TSMC:
+		default:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_26;
+					coeff_b = 0;
+				}
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					/* discharge */
+					coeff_a = 0;
+					coeff_b = 0;
+				}
+			}
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_1_0:
+		/* pm8026 rev 1.0 */
+		switch (iadc->iadc_comp.id) {
+		case COMP_ID_GF:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = QPNP_COEFF_9;
+					coeff_b = -QPNP_COEFF_17;
+				} else {
+					coeff_a = QPNP_COEFF_10;
+					coeff_b = QPNP_COEFF_18;
+				}
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = -QPNP_COEFF_11;
+					coeff_b = 0;
+				} else {
+					/* discharge */
+					coeff_a = -QPNP_COEFF_17;
+					coeff_b = -QPNP_COEFF_19;
+				}
+			}
+			break;
+		case COMP_ID_TSMC:
+		default:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = QPNP_COEFF_13;
+					coeff_b = -QPNP_COEFF_20;
+				} else {
+					coeff_a = QPNP_COEFF_14;
+					coeff_b = QPNP_COEFF_21;
+				}
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = -QPNP_COEFF_15;
+					coeff_b = 0;
+				} else {
+					/* discharge */
+					coeff_a = -QPNP_COEFF_12;
+					coeff_b = -QPNP_COEFF_19;
+				}
+			}
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8110_1_0:
+		/* pm8110 rev 1.0 */
+		switch (iadc->iadc_comp.id) {
+		case COMP_ID_GF:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = QPNP_COEFF_24;
+					coeff_b = -QPNP_COEFF_22;
+				} else {
+					coeff_a = QPNP_COEFF_24;
+					coeff_b = -QPNP_COEFF_23;
+				}
+			}
+			break;
+		case COMP_ID_SMIC:
+		default:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = QPNP_COEFF_24;
+					coeff_b = -QPNP_COEFF_22;
+				} else {
+					coeff_a = QPNP_COEFF_24;
+					coeff_b = -QPNP_COEFF_23;
+				}
+			}
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8110_2_0:
+		die_temp -= 25000;
+		/* pm8110 rev 2.0 */
+		switch (iadc->iadc_comp.id) {
+		case COMP_ID_GF:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_27;
+					coeff_b = 0;
+				}
+			}
+			break;
+		case COMP_ID_SMIC:
+		default:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_28;
+					coeff_b = 0;
+				}
+			}
+			break;
+		}
+		break;
+	default:
+	case QPNP_REV_ID_8026_2_0:
+		/* pm8026 rev 1.0 */
+		coeff_a = 0;
+		coeff_b = 0;
+		break;
+	}
+
+	temp_var = (coeff_a * die_temp) + coeff_b;
+	temp_var = div64_s64(temp_var, QPNP_COEFF_4);
+	temp_var = 1000 * (1000000 - temp_var);
+
+	if (!iadc->iadc_comp.ext_rsense) {
+		/* internal rsense */
+		*result = div64_s64(*result * 1000, temp_var);
+	}
+
+	if (iadc->iadc_comp.ext_rsense) {
+		/* external rsense */
+		sys_gain_coeff = (1000000 +
+			div64_s64(sys_gain_coeff, QPNP_COEFF_4));
+		temp_var = div64_s64(temp_var * sys_gain_coeff, 1000000);
+		*result = div64_s64(*result * 1000, temp_var);
+	}
+	pr_debug("%lld compensated into %lld, a: %d, b: %d, sys_gain: %lld\n",
+			old, *result, coeff_a, coeff_b, sys_gain_coeff);
+
+	return 0;
+}
+
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc, int64_t *result)
+{
+	return qpnp_iadc_comp(result, iadc, iadc->die_temp);
+}
+EXPORT_SYMBOL(qpnp_iadc_comp_result);
+
+static int qpnp_iadc_rds_trim_update_check(struct qpnp_iadc_chip *iadc)
+{
+	int rc = 0;
+	u8 trim2_val = 0;
+	uint smbb_batt_trm_data = 0;
+	u8 smbb_batt_trm_cnst_rds = 0;
+
+	if (!iadc->rds_trim_default_check) {
+		pr_debug("No internal rds trim check needed\n");
+		return 0;
+	}
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE, &trim2_val);
+	if (rc < 0) {
+		pr_err("qpnp adc trim2_fullscale1 reg read failed %d\n", rc);
+		return rc;
+	}
+
+	rc = regmap_read(iadc->adc->regmap, iadc->batt_id_trim_cnst_rds,
+			 &smbb_batt_trm_data);
+	if (rc < 0) {
+		pr_err("batt_id trim_cnst rds reg read failed %d\n", rc);
+		return rc;
+	}
+
+	smbb_batt_trm_cnst_rds = (u8)smbb_batt_trm_data &
+				SMBB_BAT_IF_TRIM_CNST_RDS_MASK;
+
+	pr_debug("n_trim:0x%x smb_trm:0x%02x\n", trim2_val, smbb_batt_trm_data);
+
+	if (iadc->rds_trim_default_type == QPNP_IADC_RDS_DEFAULT_TYPEA) {
+
+		if ((smbb_batt_trm_cnst_rds ==
+				SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+		(trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+			iadc->rsense_workaround_value =
+					QPNP_IADC_RSENSE_DEFAULT_VALUE;
+			iadc->default_internal_rsense = true;
+		}
+	} else if (iadc->rds_trim_default_type ==
+						QPNP_IADC_RDS_DEFAULT_TYPEB) {
+		if ((smbb_batt_trm_cnst_rds >=
+				SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+		(trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+			iadc->rsense_workaround_value =
+					QPNP_IADC_RSENSE_DEFAULT_VALUE;
+				iadc->default_internal_rsense = true;
+		} else if ((smbb_batt_trm_cnst_rds <
+				SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+			(trim2_val ==
+				QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+			if (iadc->iadc_comp.id == COMP_ID_GF) {
+				iadc->rsense_workaround_value =
+					QPNP_IADC_RSENSE_DEFAULT_TYPEB_GF;
+				iadc->default_internal_rsense = true;
+			} else if (iadc->iadc_comp.id == COMP_ID_SMIC) {
+				iadc->rsense_workaround_value =
+					QPNP_IADC_RSENSE_DEFAULT_TYPEB_SMIC;
+				iadc->default_internal_rsense = true;
+			}
+		}
+	} else if (iadc->rds_trim_default_type == QPNP_IADC_RDS_DEFAULT_TYPEC) {
+
+		if ((smbb_batt_trm_cnst_rds >
+				SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_0) &&
+		(smbb_batt_trm_cnst_rds <=
+				SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+		(trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+			iadc->rsense_workaround_value =
+					QPNP_IADC_RSENSE_DEFAULT_VALUE;
+			iadc->default_internal_rsense = true;
+		}
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_iadc_comp_info(struct qpnp_iadc_chip *iadc)
+{
+	int rc = 0;
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_INT_TEST_VAL, &iadc->iadc_comp.id);
+	if (rc < 0) {
+		pr_err("qpnp adc comp id failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2,
+					&iadc->iadc_comp.revision_dig_major);
+	if (rc < 0) {
+		pr_err("qpnp adc revision2 read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION3,
+					&iadc->iadc_comp.revision_ana_minor);
+	if (rc < 0) {
+		pr_err("qpnp adc revision3 read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_ATE_GAIN_CALIB_OFFSET,
+						&iadc->iadc_comp.sys_gain);
+	if (rc < 0) {
+		pr_err("full scale read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (iadc->external_rsense)
+		iadc->iadc_comp.ext_rsense = true;
+
+	pr_debug("fab id = %u, revision_dig_major = %u, revision_ana_minor = %u sys gain = %u, external_rsense = %d\n",
+			iadc->iadc_comp.id,
+			iadc->iadc_comp.revision_dig_major,
+			iadc->iadc_comp.revision_ana_minor,
+			iadc->iadc_comp.sys_gain,
+			iadc->iadc_comp.ext_rsense);
+	return rc;
+}
+
+static int32_t qpnp_iadc_configure(struct qpnp_iadc_chip *iadc,
+					enum qpnp_iadc_channels channel,
+					uint16_t *raw_code, uint32_t mode_sel)
+{
+	u8 qpnp_iadc_mode_reg = 0, qpnp_iadc_ch_sel_reg = 0;
+	u8 qpnp_iadc_conv_req = 0, qpnp_iadc_dig_param_reg = 0;
+	u8 status1 = 0;
+	uint32_t count = 0;
+	int32_t rc = 0;
+
+	qpnp_iadc_ch_sel_reg = channel;
+
+	qpnp_iadc_dig_param_reg |= iadc->adc->amux_prop->decimation <<
+					QPNP_IADC_DEC_RATIO_SEL;
+	if (iadc->iadc_mode_sel)
+		qpnp_iadc_mode_reg |= (QPNP_ADC_TRIM_EN | QPNP_VADC_SYNCH_EN);
+	else
+		qpnp_iadc_mode_reg |= QPNP_ADC_TRIM_EN;
+
+	qpnp_iadc_conv_req = QPNP_IADC_CONV_REQ;
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_ADC_CH_SEL_CTL,
+						qpnp_iadc_ch_sel_reg);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_ADC_DIG_PARAM,
+						qpnp_iadc_dig_param_reg);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_FAST_AVG_CTL,
+					iadc->adc->amux_prop->fast_avg_setup);
+	if (rc < 0) {
+		pr_err("qpnp adc fast averaging configure error\n");
+		return rc;
+	}
+
+	if (!iadc->iadc_poll_eoc)
+		reinit_completion(&iadc->adc->adc_rslt_completion);
+
+	rc = qpnp_iadc_enable(iadc, true);
+	if (rc)
+		return rc;
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_CONV_REQ, qpnp_iadc_conv_req);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		return rc;
+	}
+
+	if (iadc->iadc_poll_eoc) {
+		while (status1 != QPNP_STATUS1_EOC) {
+			rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
+			if (rc < 0)
+				return rc;
+			status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
+			usleep_range(QPNP_ADC_CONV_TIME_MIN,
+					QPNP_ADC_CONV_TIME_MAX);
+			count++;
+			if (count > QPNP_ADC_ERR_COUNT) {
+				pr_err("retry error exceeded\n");
+				rc = qpnp_iadc_status_debug(iadc);
+				if (rc < 0)
+					pr_err("IADC status debug failed\n");
+				rc = -EINVAL;
+				return rc;
+			}
+		}
+	} else {
+		rc = wait_for_completion_timeout(
+				&iadc->adc->adc_rslt_completion,
+				QPNP_ADC_COMPLETION_TIMEOUT);
+		if (!rc) {
+			rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
+			if (rc < 0)
+				return rc;
+			status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
+			if (status1 == QPNP_STATUS1_EOC)
+				pr_debug("End of conversion status set\n");
+			else {
+				rc = qpnp_iadc_status_debug(iadc);
+				if (rc < 0) {
+					pr_err("status debug failed %d\n", rc);
+					return rc;
+				}
+				return -EINVAL;
+			}
+		}
+	}
+
+	rc = qpnp_iadc_read_conversion_result(iadc, raw_code);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+#define IADC_CENTER	0xC000
+#define IADC_READING_RESOLUTION_N	542535
+#define IADC_READING_RESOLUTION_D	100000
+static int32_t qpnp_convert_raw_offset_voltage(struct qpnp_iadc_chip *iadc)
+{
+	s64 numerator;
+
+	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+		pr_err("raw offset errors! raw_gain:0x%x and raw_offset:0x%x\n",
+			iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
+		return -EINVAL;
+	}
+
+	numerator = iadc->adc->calib.offset_raw - IADC_CENTER;
+	numerator *= IADC_READING_RESOLUTION_N;
+	iadc->adc->calib.offset_uv = div_s64(numerator,
+						IADC_READING_RESOLUTION_D);
+
+	numerator = iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw;
+	numerator *= IADC_READING_RESOLUTION_N;
+
+	iadc->adc->calib.gain_uv = div_s64(numerator,
+						IADC_READING_RESOLUTION_D);
+
+	pr_debug("gain_uv:%d offset_uv:%d\n",
+			iadc->adc->calib.gain_uv, iadc->adc->calib.offset_uv);
+	return 0;
+}
+
+#define IADC_IDEAL_RAW_GAIN	3291
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+							bool batfet_closed)
+{
+	uint8_t rslt_lsb, rslt_msb;
+	int32_t rc = 0, version = 0;
+	uint16_t raw_data;
+	uint32_t mode_sel = 0;
+	bool iadc_offset_ch_batfet_check;
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
+		return -EPROBE_DEFER;
+
+	mutex_lock(&iadc->adc->adc_lock);
+
+	if (iadc->iadc_poll_eoc) {
+		pr_debug("acquiring iadc eoc wakelock\n");
+		pm_stay_awake(iadc->dev);
+	}
+
+	iadc->adc->amux_prop->decimation = DECIMATION_TYPE1;
+	iadc->adc->amux_prop->fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+
+	rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV,
+						&raw_data, mode_sel);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		goto fail;
+	}
+
+	iadc->adc->calib.gain_raw = raw_data;
+
+	/*
+	 * there is a features on PM8941 in the BMS where if the batfet is
+	 * opened the BMS reads from INTERNAL_RSENSE (channel 0) actually go to
+	 * OFFSET_CALIBRATION_CSP_CSN (channel 5). Hence if batfet is opened
+	 * we have to calibrate based on OFFSET_CALIBRATION_CSP_CSN even for
+	 * internal rsense.
+	 */
+	version = qpnp_adc_get_revid_version(iadc->dev);
+	if ((version == QPNP_REV_ID_8941_3_1) ||
+			(version == QPNP_REV_ID_8941_3_0) ||
+			(version == QPNP_REV_ID_8941_2_0))
+		iadc_offset_ch_batfet_check = true;
+	else
+		iadc_offset_ch_batfet_check = false;
+
+	if ((iadc_offset_ch_batfet_check && !batfet_closed) ||
+						(iadc->external_rsense)) {
+		/* external offset calculation */
+		rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN,
+						&raw_data, mode_sel);
+		if (rc < 0) {
+			pr_err("qpnp adc result read failed with %d\n", rc);
+			goto fail;
+		}
+	} else {
+		/* internal offset calculation */
+		rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2,
+						&raw_data, mode_sel);
+		if (rc < 0) {
+			pr_err("qpnp adc result read failed with %d\n", rc);
+			goto fail;
+		}
+	}
+
+	iadc->adc->calib.offset_raw = raw_data;
+	if (rc < 0) {
+		pr_err("qpnp adc offset/gain calculation failed\n");
+		goto fail;
+	}
+
+	if (iadc->iadc_comp.revision_dig_major == QPNP_IADC_PM8026_2_REV2
+		&& iadc->iadc_comp.revision_ana_minor ==
+						QPNP_IADC_PM8026_2_REV3)
+		iadc->adc->calib.gain_raw =
+			iadc->adc->calib.offset_raw + IADC_IDEAL_RAW_GAIN;
+
+	pr_debug("raw gain:0x%x, raw offset:0x%x\n",
+		iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
+
+	rc = qpnp_convert_raw_offset_voltage(iadc);
+	if (rc < 0) {
+		pr_err("qpnp raw_voltage conversion failed\n");
+		goto fail;
+	}
+
+	rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >>
+							QPNP_BIT_SHIFT_8;
+	rslt_lsb = raw_data & QPNP_RAW_CODE_16_BIT_LSB_MASK;
+
+	pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb);
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
+					QPNP_IADC_SEC_ACCESS_DATA);
+	if (rc < 0) {
+		pr_err("qpnp iadc configure error for sec access\n");
+		goto fail;
+	}
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET,
+						rslt_msb);
+	if (rc < 0) {
+		pr_err("qpnp iadc configure error for MSB write\n");
+		goto fail;
+	}
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
+					QPNP_IADC_SEC_ACCESS_DATA);
+	if (rc < 0) {
+		pr_err("qpnp iadc configure error for sec access\n");
+		goto fail;
+	}
+
+	rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET,
+						rslt_lsb);
+	if (rc < 0) {
+		pr_err("qpnp iadc configure error for LSB write\n");
+		goto fail;
+	}
+fail:
+	if (iadc->iadc_poll_eoc) {
+		pr_debug("releasing iadc eoc wakelock\n");
+		pm_relax(iadc->dev);
+	}
+	mutex_unlock(&iadc->adc->adc_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_calibrate_for_trim);
+
+static void qpnp_iadc_work(struct work_struct *work)
+{
+	struct qpnp_iadc_chip *iadc = container_of(work,
+			struct qpnp_iadc_chip, iadc_work.work);
+	int rc = 0;
+
+	if (!iadc->skip_auto_calibrations) {
+		rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+		if (rc)
+			pr_debug("periodic IADC calibration failed\n");
+	}
+
+	schedule_delayed_work(&iadc->iadc_work,
+		round_jiffies_relative(msecs_to_jiffies
+				(QPNP_IADC_CALIB_SECONDS)));
+}
+
+static int32_t qpnp_iadc_version_check(struct qpnp_iadc_chip *iadc)
+{
+	uint8_t revision;
+	int rc;
+
+	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2, &revision);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (revision < QPNP_IADC_SUPPORTED_REVISION2) {
+		pr_err("IADC Version not supported\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name)
+{
+	struct qpnp_iadc_chip *iadc;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
+
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-iadc", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(iadc, &qpnp_iadc_device_list, list)
+		if (iadc->adc->pdev->dev.of_node == node)
+			return iadc;
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qpnp_get_iadc);
+
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc, int32_t *rsense)
+{
+	uint8_t	rslt_rsense = 0;
+	int32_t	rc = 0, sign_bit = 0;
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
+		return -EPROBE_DEFER;
+
+	if (iadc->external_rsense) {
+		*rsense = iadc->rsense;
+	} else if (iadc->default_internal_rsense) {
+		*rsense = iadc->rsense_workaround_value;
+	} else {
+
+		rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE,
+							&rslt_rsense);
+		if (rc < 0) {
+			pr_err("qpnp adc rsense read failed with %d\n", rc);
+			return rc;
+		}
+
+		pr_debug("rsense:0%x\n", rslt_rsense);
+
+		if (rslt_rsense & QPNP_RSENSE_MSB_SIGN_CHECK)
+			sign_bit = 1;
+
+		rslt_rsense &= ~QPNP_RSENSE_MSB_SIGN_CHECK;
+
+		if (sign_bit)
+			*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR -
+			(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
+		else
+			*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR +
+			(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
+	}
+	pr_debug("rsense value is %d\n", *rsense);
+
+	if (*rsense == 0)
+		pr_err("incorrect rsens value:%d rslt_rsense:%d\n",
+				*rsense, rslt_rsense);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_get_rsense);
+
+static int32_t qpnp_check_pmic_temp(struct qpnp_iadc_chip *iadc)
+{
+	struct qpnp_vadc_result result_pmic_therm;
+	int64_t die_temp_offset;
+	int rc = 0;
+
+	rc = qpnp_vadc_read(iadc->vadc_dev, DIE_TEMP, &result_pmic_therm);
+	if (rc < 0)
+		return rc;
+
+	die_temp_offset = result_pmic_therm.physical -
+			iadc->die_temp;
+	if (die_temp_offset < 0)
+		die_temp_offset = -die_temp_offset;
+
+	if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
+		iadc->die_temp = result_pmic_therm.physical;
+		if (!iadc->skip_auto_calibrations) {
+			rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+			if (rc)
+				pr_err("IADC calibration failed rc = %d\n", rc);
+		}
+	}
+
+	return rc;
+}
+
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+				enum qpnp_iadc_channels channel,
+				struct qpnp_iadc_result *result)
+{
+	int32_t rc, rsense_n_ohms, sign = 0, num, mode_sel = 0;
+	int32_t rsense_u_ohms = 0;
+	int64_t result_current;
+	uint16_t raw_data;
+	int dt_index = 0;
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
+		return -EPROBE_DEFER;
+
+	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+		pr_err("raw offset errors! run iadc calibration again\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_check_pmic_temp(iadc);
+	if (rc) {
+		pr_err("Error checking pmic therm temp\n");
+		return rc;
+	}
+
+	mutex_lock(&iadc->adc->adc_lock);
+
+	while (((enum qpnp_iadc_channels)
+		iadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < iadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= iadc->max_channels_available) {
+		pr_err("not a valid IADC channel\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	iadc->adc->amux_prop->decimation =
+			iadc->adc->adc_channels[dt_index].adc_decimation;
+	iadc->adc->amux_prop->fast_avg_setup =
+			iadc->adc->adc_channels[dt_index].fast_avg_setup;
+
+	if (iadc->iadc_poll_eoc) {
+		pr_debug("acquiring iadc eoc wakelock\n");
+		pm_stay_awake(iadc->dev);
+	}
+
+	rc = qpnp_iadc_configure(iadc, channel, &raw_data, mode_sel);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		goto fail;
+	}
+
+	rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
+	pr_debug("current raw:0%x and rsense:%d\n",
+			raw_data, rsense_n_ohms);
+	rsense_u_ohms = rsense_n_ohms/1000;
+	num = raw_data - iadc->adc->calib.offset_raw;
+	if (num < 0) {
+		sign = 1;
+		num = -num;
+	}
+
+	result->result_uv = (num * QPNP_ADC_GAIN_NV)/
+		(iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
+	result_current = result->result_uv;
+	result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
+	/* Intentional fall through. Process the result w/o comp */
+	do_div(result_current, rsense_u_ohms);
+
+	if (sign) {
+		result->result_uv = -result->result_uv;
+		result_current = -result_current;
+	}
+	result_current *= -1;
+	rc = qpnp_iadc_comp_result(iadc, &result_current);
+	if (rc < 0)
+		pr_err("Error during compensating the IADC\n");
+	rc = 0;
+	result_current *= -1;
+
+	result->result_ua = (int32_t) result_current;
+fail:
+	if (iadc->iadc_poll_eoc) {
+		pr_debug("releasing iadc eoc wakelock\n");
+		pm_relax(iadc->dev);
+	}
+	mutex_unlock(&iadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_read);
+
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+					struct qpnp_iadc_calib *result)
+{
+	int rc;
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
+		return -EPROBE_DEFER;
+
+	rc = qpnp_check_pmic_temp(iadc);
+	if (rc) {
+		pr_err("Error checking pmic therm temp\n");
+		return rc;
+	}
+
+	mutex_lock(&iadc->adc->adc_lock);
+	result->gain_raw = iadc->adc->calib.gain_raw;
+	result->ideal_gain_nv = QPNP_ADC_GAIN_NV;
+	result->gain_uv = iadc->adc->calib.gain_uv;
+	result->offset_raw = iadc->adc->calib.offset_raw;
+	result->ideal_offset_uv =
+				QPNP_OFFSET_CALIBRATION_SHORT_CADC_LEADS_IDEAL;
+	result->offset_uv = iadc->adc->calib.offset_uv;
+	pr_debug("raw gain:0%x, raw offset:0%x\n",
+			result->gain_raw, result->offset_raw);
+	pr_debug("gain_uv:%d offset_uv:%d\n",
+			result->gain_uv, result->offset_uv);
+	mutex_unlock(&iadc->adc->adc_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_get_gain_and_offset);
+
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
+{
+	iadc->skip_auto_calibrations = true;
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_skip_calibration);
+
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
+{
+	iadc->skip_auto_calibrations = false;
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_resume_calibration);
+
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
+	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
+{
+	int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0;
+	int dt_index = 0;
+	uint16_t raw_data;
+	int32_t rsense_u_ohms = 0;
+	int64_t result_current;
+
+	if (qpnp_iadc_is_valid(iadc) < 0)
+		return -EPROBE_DEFER;
+
+	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+		pr_err("raw offset errors! run iadc calibration again\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iadc->adc->adc_lock);
+
+	if (iadc->iadc_poll_eoc) {
+		pr_debug("acquiring iadc eoc wakelock\n");
+		pm_stay_awake(iadc->dev);
+	}
+
+	iadc->iadc_mode_sel = true;
+
+	rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel);
+	if (rc) {
+		pr_err("Configuring VADC failed\n");
+		goto fail;
+	}
+
+	while (((enum qpnp_iadc_channels)
+		iadc->adc->adc_channels[dt_index].channel_num
+		!= i_channel) && (dt_index < iadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= iadc->max_channels_available) {
+		pr_err("not a valid IADC channel\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	iadc->adc->amux_prop->decimation =
+			iadc->adc->adc_channels[dt_index].adc_decimation;
+	iadc->adc->amux_prop->fast_avg_setup =
+			iadc->adc->adc_channels[dt_index].fast_avg_setup;
+
+	rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		goto fail_release_vadc;
+	}
+
+	rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
+	pr_debug("current raw:0%x and rsense:%d\n",
+			raw_data, rsense_n_ohms);
+	rsense_u_ohms = rsense_n_ohms/1000;
+	num = raw_data - iadc->adc->calib.offset_raw;
+	if (num < 0) {
+		sign = 1;
+		num = -num;
+	}
+
+	i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/
+		(iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
+	result_current = i_result->result_uv;
+	result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
+	/* Intentional fall through. Process the result w/o comp */
+	if (!rsense_u_ohms) {
+		pr_err("rsense error=%d\n", rsense_u_ohms);
+		goto fail_release_vadc;
+	}
+
+	do_div(result_current, rsense_u_ohms);
+
+	if (sign) {
+		i_result->result_uv = -i_result->result_uv;
+		result_current = -result_current;
+	}
+	result_current *= -1;
+	rc = qpnp_iadc_comp_result(iadc, &result_current);
+	if (rc < 0)
+		pr_err("Error during compensating the IADC\n");
+	rc = 0;
+	result_current *= -1;
+
+	i_result->result_ua = (int32_t) result_current;
+
+fail_release_vadc:
+	rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel,
+							v_result);
+	if (rc)
+		pr_err("Releasing VADC failed\n");
+fail:
+	iadc->iadc_mode_sel = false;
+
+	if (iadc->iadc_poll_eoc) {
+		pr_debug("releasing iadc eoc wakelock\n");
+		pm_relax(iadc->dev);
+	}
+	mutex_unlock(&iadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_vadc_sync_read);
+
+static ssize_t qpnp_iadc_show(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct qpnp_iadc_chip *iadc = dev_get_drvdata(dev);
+	struct qpnp_iadc_result result;
+	int rc = -1;
+
+	rc = qpnp_iadc_read(iadc, attr->index, &result);
+
+	if (rc)
+		return 0;
+
+	return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+					"Result:%d\n", result.result_ua);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+	SENSOR_ATTR(NULL, 0444, qpnp_iadc_show, NULL, 0);
+
+static int32_t qpnp_iadc_init_hwmon(struct qpnp_iadc_chip *iadc,
+						struct platform_device *pdev)
+{
+	struct device_node *child;
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0, i = 0, channel;
+
+	for_each_child_of_node(node, child) {
+		channel = iadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.index = iadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.dev_attr.attr.name =
+						iadc->adc->adc_channels[i].name;
+		memcpy(&iadc->sens_attr[i], &qpnp_adc_attr,
+						sizeof(qpnp_adc_attr));
+		sysfs_attr_init(&iadc->sens_attr[i].dev_attr.attr);
+		rc = device_create_file(&pdev->dev,
+				&iadc->sens_attr[i].dev_attr);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"device_create_file failed for dev %s\n",
+				iadc->adc->adc_channels[i].name);
+			goto hwmon_err_sens;
+		}
+		i++;
+	}
+
+	return 0;
+hwmon_err_sens:
+	pr_err("Init HWMON failed for qpnp_iadc with %d\n", rc);
+	return rc;
+}
+
+static int qpnp_iadc_probe(struct platform_device *pdev)
+{
+	struct qpnp_iadc_chip *iadc;
+	struct qpnp_adc_drv *adc_qpnp;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *child;
+	unsigned int base;
+	int rc, count_adc_channel_list = 0, i = 0;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	iadc = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_iadc_chip) +
+		(sizeof(struct sensor_device_attribute) *
+				count_adc_channel_list), GFP_KERNEL);
+	if (!iadc) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_qpnp = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_drv),
+			GFP_KERNEL);
+	if (!adc_qpnp)
+		return -ENOMEM;
+
+	adc_qpnp->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!adc_qpnp->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	iadc->dev = &(pdev->dev);
+	iadc->adc = adc_qpnp;
+
+	rc = qpnp_adc_get_devicetree_data(pdev, iadc->adc);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to read device tree\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "batt-id-trim-cnst-rds",
+				  &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find batt-id-trim-cnst-rds in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	iadc->batt_id_trim_cnst_rds = base;
+	rc = of_property_read_u32(node, "qcom,use-default-rds-trim",
+			&iadc->rds_trim_default_type);
+	if (rc)
+		pr_debug("No trim workaround needed\n");
+	else {
+		pr_debug("Use internal RDS trim workaround\n");
+		iadc->rds_trim_default_check = true;
+	}
+
+	iadc->vadc_dev = qpnp_get_vadc(&pdev->dev, "iadc");
+	if (IS_ERR(iadc->vadc_dev)) {
+		rc = PTR_ERR(iadc->vadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("vadc property missing, rc=%d\n", rc);
+		return rc;
+	}
+
+	mutex_init(&iadc->adc->adc_lock);
+
+	rc = of_property_read_u32(node, "qcom,rsense",
+			&iadc->rsense);
+	if (rc)
+		pr_debug("Defaulting to internal rsense\n");
+	else {
+		pr_debug("Use external rsense\n");
+		iadc->external_rsense = true;
+	}
+
+	iadc->iadc_poll_eoc = of_property_read_bool(node,
+						"qcom,iadc-poll-eoc");
+	if (!iadc->iadc_poll_eoc) {
+		rc = devm_request_irq(&pdev->dev, iadc->adc->adc_irq_eoc,
+				qpnp_iadc_isr, IRQF_TRIGGER_RISING,
+				"qpnp_iadc_interrupt", iadc);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			return rc;
+		}
+		enable_irq_wake(iadc->adc->adc_irq_eoc);
+	}
+
+	rc = qpnp_iadc_init_hwmon(iadc, pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to initialize qpnp hwmon adc\n");
+		return rc;
+	}
+	iadc->iadc_hwmon = hwmon_device_register(&iadc->adc->pdev->dev);
+
+	rc = qpnp_iadc_version_check(iadc);
+	if (rc) {
+		dev_err(&pdev->dev, "IADC version not supported\n");
+		goto fail;
+	}
+
+	iadc->max_channels_available = count_adc_channel_list;
+	INIT_WORK(&iadc->trigger_completion_work, qpnp_iadc_trigger_completion);
+	INIT_DELAYED_WORK(&iadc->iadc_work, qpnp_iadc_work);
+	rc = qpnp_iadc_comp_info(iadc);
+	if (rc) {
+		dev_err(&pdev->dev, "abstracting IADC comp info failed!\n");
+		goto fail;
+	}
+
+	rc = qpnp_iadc_rds_trim_update_check(iadc);
+	if (rc) {
+		dev_err(&pdev->dev, "Rds trim update failed!\n");
+		goto fail;
+	}
+
+	dev_set_drvdata(&pdev->dev, iadc);
+	list_add(&iadc->list, &qpnp_iadc_device_list);
+	rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+	if (rc)
+		dev_err(&pdev->dev, "failed to calibrate for USR trim\n");
+
+	if (iadc->iadc_poll_eoc)
+		device_init_wakeup(iadc->dev, 1);
+
+	schedule_delayed_work(&iadc->iadc_work,
+			round_jiffies_relative(msecs_to_jiffies
+					(QPNP_IADC_CALIB_SECONDS)));
+	return 0;
+fail:
+	for_each_child_of_node(node, child) {
+		device_remove_file(&pdev->dev, &iadc->sens_attr[i].dev_attr);
+		i++;
+	}
+	hwmon_device_unregister(iadc->iadc_hwmon);
+
+	return rc;
+}
+
+static int qpnp_iadc_remove(struct platform_device *pdev)
+{
+	struct qpnp_iadc_chip *iadc = dev_get_drvdata(&pdev->dev);
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *child;
+	int i = 0;
+
+	cancel_delayed_work(&iadc->iadc_work);
+	for_each_child_of_node(node, child) {
+		device_remove_file(&pdev->dev, &iadc->sens_attr[i].dev_attr);
+		i++;
+	}
+	hwmon_device_unregister(iadc->iadc_hwmon);
+	if (iadc->iadc_poll_eoc)
+		pm_relax(iadc->dev);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id qpnp_iadc_match_table[] = {
+	{	.compatible = "qcom,qpnp-iadc",
+	},
+	{}
+};
+
+static struct platform_driver qpnp_iadc_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-iadc",
+		.of_match_table = qpnp_iadc_match_table,
+	},
+	.probe		= qpnp_iadc_probe,
+	.remove		= qpnp_iadc_remove,
+};
+
+static int __init qpnp_iadc_init(void)
+{
+	return platform_driver_register(&qpnp_iadc_driver);
+}
+module_init(qpnp_iadc_init);
+
+static void __exit qpnp_iadc_exit(void)
+{
+	platform_driver_unregister(&qpnp_iadc_driver);
+}
+module_exit(qpnp_iadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC current ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
new file mode 100644
index 0000000..6cd63b2
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -0,0 +1,2908 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+
+/* QPNP VADC register definition */
+#define QPNP_VADC_REVISION1				0x0
+#define QPNP_VADC_REVISION2				0x1
+#define QPNP_VADC_REVISION3				0x2
+#define QPNP_VADC_REVISION4				0x3
+#define QPNP_VADC_PERPH_TYPE				0x4
+#define QPNP_VADC_PERH_SUBTYPE				0x5
+
+#define QPNP_VADC_SUPPORTED_REVISION2			1
+
+#define QPNP_VADC_STATUS1					0x8
+#define QPNP_VADC_STATUS1_OP_MODE				4
+#define QPNP_VADC_STATUS1_MEAS_INTERVAL_EN_STS			BIT(2)
+#define QPNP_VADC_STATUS1_REQ_STS				BIT(1)
+#define QPNP_VADC_STATUS1_EOC					BIT(0)
+#define QPNP_VADC_STATUS1_REQ_STS_EOC_MASK			0x3
+#define QPNP_VADC_STATUS1_OP_MODE_MASK				0x18
+#define QPNP_VADC_MEAS_INT_MODE					0x2
+#define QPNP_VADC_MEAS_INT_MODE_MASK				0x10
+
+#define QPNP_VADC_STATUS2					0x9
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE				6
+#define QPNP_VADC_STATUS2_FIFO_NOT_EMPTY_FLAG			BIT(1)
+#define QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS			BIT(0)
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT			4
+#define QPNP_VADC_CONV_TIMEOUT_ERR				2
+
+#define QPNP_VADC_MODE_CTL					0x40
+#define QPNP_VADC_OP_MODE_SHIFT					3
+#define QPNP_VADC_VREF_XO_THM_FORCE				BIT(2)
+#define QPNP_VADC_AMUX_TRIM_EN					BIT(1)
+#define QPNP_VADC_TRIM_EN					BIT(0)
+#define QPNP_VADC_EN_CTL1					0x46
+#define QPNP_VADC_EN						BIT(7)
+#define QPNP_VADC_CH_SEL_CTL					0x48
+#define QPNP_VADC_DIG_PARAM					0x50
+#define QPNP_VADC_DIG_DEC_RATIO_SEL_SHIFT			3
+#define QPNP_VADC_HW_SETTLE_DELAY				0x51
+#define QPNP_VADC_CONV_REQ					0x52
+#define QPNP_VADC_CONV_REQ_SET					BIT(7)
+#define QPNP_VADC_CONV_SEQ_CTL					0x54
+#define QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT				4
+#define QPNP_VADC_CONV_SEQ_TRIG_CTL				0x55
+#define QPNP_VADC_MEAS_INTERVAL_CTL				0x57
+#define QPNP_VADC_MEAS_INTERVAL_OP_CTL				0x59
+#define QPNP_VADC_MEAS_INTERVAL_OP_SET				BIT(7)
+
+#define QPNP_VADC_CONV_SEQ_FALLING_EDGE				0x0
+#define QPNP_VADC_CONV_SEQ_RISING_EDGE				0x1
+#define QPNP_VADC_CONV_SEQ_EDGE_SHIFT				7
+#define QPNP_VADC_FAST_AVG_CTL					0x5a
+
+#define QPNP_VADC_LOW_THR_LSB					0x5c
+#define QPNP_VADC_LOW_THR_MSB					0x5d
+#define QPNP_VADC_HIGH_THR_LSB					0x5e
+#define QPNP_VADC_HIGH_THR_MSB					0x5f
+#define QPNP_VADC_ACCESS					0xd0
+#define QPNP_VADC_ACCESS_DATA					0xa5
+#define QPNP_VADC_PERH_RESET_CTL3				0xda
+#define QPNP_FOLLOW_OTST2_RB					BIT(3)
+#define QPNP_FOLLOW_WARM_RB					BIT(2)
+#define QPNP_FOLLOW_SHUTDOWN1_RB				BIT(1)
+#define QPNP_FOLLOW_SHUTDOWN2_RB				BIT(0)
+
+#define QPNP_INT_TEST_VAL					0xE1
+
+#define QPNP_VADC_DATA0						0x60
+#define QPNP_VADC_DATA1						0x61
+#define QPNP_VADC_CONV_TIMEOUT_ERR				2
+#define QPNP_VADC_CONV_TIME_MIN					1000
+#define QPNP_VADC_CONV_TIME_MAX					1100
+#define QPNP_ADC_COMPLETION_TIMEOUT				HZ
+#define QPNP_VADC_ERR_COUNT					20
+#define QPNP_OP_MODE_SHIFT					3
+
+#define QPNP_VADC_THR_LSB_MASK(val)				(val & 0xff)
+#define QPNP_VADC_THR_MSB_MASK(val)			((val & 0xff00) >> 8)
+#define QPNP_MIN_TIME						2000
+#define QPNP_MAX_TIME						2000
+#define QPNP_RETRY						100
+#define QPNP_VADC_ABSOLUTE_RECALIB_OFFSET			8
+#define QPNP_VADC_RATIOMETRIC_RECALIB_OFFSET			12
+#define QPNP_VADC_RECALIB_MAXCNT				10
+#define QPNP_VADC_OFFSET_DUMP					8
+#define QPNP_VADC_REG_DUMP					14
+
+/* QPNP VADC refreshed register set */
+#define QPNP_VADC_HC1_STATUS1					0x8
+
+#define QPNP_VADC_HC1_DATA_HOLD_CTL				0x3f
+#define QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD			BIT(1)
+
+#define QPNP_VADC_HC1_ADC_DIG_PARAM				0x42
+#define QPNP_VADC_HC1_CAL_VAL					BIT(6)
+#define QPNP_VADC_HC1_CAL_VAL_SHIFT				6
+#define QPNP_VADC_HC1_CAL_SEL_MASK				0x30
+#define QPNP_VADC_HC1_CAL_SEL_SHIFT				4
+#define QPNP_VADC_HC1_DEC_RATIO_SEL				0xc
+#define QPNP_VADC_HC1_DEC_RATIO_SHIFT				2
+#define QPNP_VADC_HC1_FAST_AVG_CTL				0x43
+#define QPNP_VADC_HC1_FAST_AVG_SAMPLES_MASK			0x7
+#define QPNP_VADC_HC1_ADC_CH_SEL_CTL				0x44
+#define QPNP_VADC_HC1_DELAY_CTL					0x45
+#define QPNP_VADC_HC1_DELAY_CTL_MASK				0xf
+#define QPNP_VADC_MC1_EN_CTL1					0x46
+#define QPNP_VADC_HC1_ADC_EN					BIT(7)
+#define QPNP_VADC_MC1_CONV_REQ					0x47
+#define QPNP_VADC_HC1_CONV_REQ_START				BIT(7)
+
+#define QPNP_VADC_HC1_VBAT_MIN_THR0				0x48
+#define QPNP_VADC_HC1_VBAT_MIN_THR1				0x49
+
+#define QPNP_VADC_HC1_DATA0					0x50
+#define QPNP_VADC_HC1_DATA1					0x51
+#define QPNP_VADC_HC1_DATA_CHECK_USR				0x8000
+
+#define QPNP_VADC_HC1_VBAT_MIN_DATA0				0x52
+#define QPNP_VADC_MC1_VBAT_MIN_DATA1				0x53
+
+/*
+ * Conversion time varies between 213uS to 6827uS based on the decimation,
+ * clock rate, fast average samples with no measurement in queue.
+ */
+#define QPNP_VADC_HC1_CONV_TIME_MIN_US				213
+#define QPNP_VADC_HC1_CONV_TIME_MAX_US				214
+#define QPNP_VADC_HC1_ERR_COUNT					1600
+
+struct qpnp_vadc_mode_state {
+	bool				meas_int_mode;
+	bool				meas_int_request_in_queue;
+	bool				vadc_meas_int_enable;
+	struct qpnp_adc_tm_btm_param	*param;
+	struct qpnp_adc_amux		vadc_meas_amux;
+};
+
+struct qpnp_vadc_thermal_data {
+	bool thermal_node;
+	int thermal_chan;
+	enum qpnp_vadc_channels vadc_channel;
+	struct thermal_zone_device *tz_dev;
+	struct qpnp_vadc_chip *vadc_dev;
+};
+
+struct qpnp_vadc_chip {
+	struct device			*dev;
+	struct qpnp_adc_drv		*adc;
+	struct list_head		list;
+	struct device			*vadc_hwmon;
+	bool				vadc_init_calib;
+	int				max_channels_available;
+	bool				vadc_iadc_sync_lock;
+	u8				id;
+	struct work_struct		trigger_completion_work;
+	bool				vadc_poll_eoc;
+	bool				vadc_recalib_check;
+	u8				revision_ana_minor;
+	u8				revision_dig_major;
+	struct work_struct		trigger_high_thr_work;
+	struct work_struct		trigger_low_thr_work;
+	struct qpnp_vadc_mode_state	*state_copy;
+	struct qpnp_vadc_thermal_data	*vadc_therm_chan;
+	struct power_supply		*vadc_chg_vote;
+	bool				vadc_hc;
+	int				vadc_debug_count;
+	struct sensor_device_attribute	sens_attr[0];
+};
+
+LIST_HEAD(qpnp_vadc_device_list);
+
+static struct qpnp_vadc_scale_fn vadc_scale_fn[] = {
+	[SCALE_DEFAULT] = {qpnp_adc_scale_default},
+	[SCALE_BATT_THERM] = {qpnp_adc_scale_batt_therm},
+	[SCALE_PMIC_THERM] = {qpnp_adc_scale_pmic_therm},
+	[SCALE_XOTHERM] = {qpnp_adc_tdkntcg_therm},
+	[SCALE_THERM_100K_PULLUP] = {qpnp_adc_scale_therm_pu2},
+	[SCALE_THERM_150K_PULLUP] = {qpnp_adc_scale_therm_pu1},
+	[SCALE_QRD_BATT_THERM] = {qpnp_adc_scale_qrd_batt_therm},
+	[SCALE_QRD_SKUAA_BATT_THERM] = {qpnp_adc_scale_qrd_skuaa_batt_therm},
+	[SCALE_SMB_BATT_THERM] = {qpnp_adc_scale_smb_batt_therm},
+	[SCALE_QRD_SKUG_BATT_THERM] = {qpnp_adc_scale_qrd_skug_batt_therm},
+	[SCALE_QRD_SKUH_BATT_THERM] = {qpnp_adc_scale_qrd_skuh_batt_therm},
+	[SCALE_NCP_03WF683_THERM] = {qpnp_adc_scale_therm_ncp03},
+	[SCALE_QRD_SKUT1_BATT_THERM] = {qpnp_adc_scale_qrd_skut1_batt_therm},
+	[SCALE_PMI_CHG_TEMP] = {qpnp_adc_scale_pmi_chg_temp},
+};
+
+static struct qpnp_vadc_rscale_fn adc_vadc_rscale_fn[] = {
+	[SCALE_RVADC_ABSOLUTE] = {qpnp_vadc_absolute_rthr},
+};
+
+static int32_t qpnp_vadc_calib_device(struct qpnp_vadc_chip *vadc);
+
+static int32_t qpnp_vadc_read_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+						u8 *data, int len)
+{
+	int rc;
+
+	rc = regmap_bulk_read(vadc->adc->regmap,
+		(vadc->adc->offset + reg), data, len);
+	if (rc < 0) {
+		pr_err("qpnp adc read reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_write_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+						u8 *buf, int len)
+{
+	int rc;
+
+	rc = regmap_bulk_write(vadc->adc->regmap,
+		(vadc->adc->offset + reg), buf, len);
+	if (rc < 0) {
+		pr_err("qpnp adc write reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_vadc_is_valid(struct qpnp_vadc_chip *vadc)
+{
+	struct qpnp_vadc_chip *vadc_chip = NULL;
+
+	list_for_each_entry(vadc_chip, &qpnp_vadc_device_list, list)
+		if (vadc == vadc_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static int32_t qpnp_vadc_warm_rst_configure(struct qpnp_vadc_chip *vadc)
+{
+	int rc = 0;
+	u8 data = 0, buf = 0;
+
+	buf = QPNP_VADC_ACCESS_DATA;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, &buf, 1);
+	if (rc < 0) {
+		pr_err("VADC write access failed\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, &data, 1);
+	if (rc < 0) {
+		pr_err("VADC perh reset ctl3 read failed\n");
+		return rc;
+	}
+
+	buf = QPNP_VADC_ACCESS_DATA;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, &buf, 1);
+	if (rc < 0) {
+		pr_err("VADC write access failed\n");
+		return rc;
+	}
+
+	data |= QPNP_FOLLOW_WARM_RB;
+
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, &data, 1);
+	if (rc < 0) {
+		pr_err("VADC perh reset ctl3 write failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_mode_select(struct qpnp_vadc_chip *vadc, u8 mode_ctl)
+{
+	int rc;
+
+	mode_ctl |= (QPNP_VADC_TRIM_EN | QPNP_VADC_AMUX_TRIM_EN);
+
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MODE_CTL, &mode_ctl, 1);
+	if (rc < 0)
+		pr_err("vadc write mode selection err:%d\n", rc);
+
+	return rc;
+}
+
+static int32_t qpnp_vadc_enable(struct qpnp_vadc_chip *vadc, bool state)
+{
+	int rc = 0;
+	u8 data = 0;
+
+	data = QPNP_VADC_EN;
+	if (state) {
+		if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok) {
+			rc = qpnp_adc_enable_voltage(vadc->adc);
+			if (rc) {
+				pr_err("failed enabling VADC LDO\n");
+				return rc;
+			}
+		}
+
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1, &data, 1);
+		if (rc < 0) {
+			pr_err("VADC enable failed\n");
+			return rc;
+		}
+	} else {
+		data = (~data & QPNP_VADC_EN);
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1, &data, 1);
+		if (rc < 0) {
+			pr_err("VADC disable failed\n");
+			return rc;
+		}
+
+		if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok)
+			qpnp_adc_disable_voltage(vadc->adc);
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_status_debug(struct qpnp_vadc_chip *vadc)
+{
+	int rc = 0, i = 0;
+	u8 buf[8], offset = 0;
+
+	if (vadc->vadc_debug_count < 3) {
+		for (i = 0; i < QPNP_VADC_REG_DUMP; i++) {
+			rc = qpnp_vadc_read_reg(vadc, offset, buf, 8);
+			if (rc) {
+				pr_err("debug register dump failed\n");
+				return rc;
+			}
+			offset += QPNP_VADC_OFFSET_DUMP;
+			pr_err("row%d: 0%x 0%x 0%x 0%x 0%x 0%x 0%x 0%x\n",
+				i, buf[0], buf[1], buf[2], buf[3], buf[4],
+				buf[5], buf[6], buf[7]);
+		}
+	} else
+		pr_debug("VADC peripheral dumps got printed before\n");
+
+	vadc->vadc_debug_count++;
+
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc < 0) {
+		pr_err("VADC disable failed with %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_vadc_hc_check_conversion_status(struct qpnp_vadc_chip *vadc)
+{
+	int rc = 0, count = 0;
+	u8 status1 = 0;
+
+	while (status1 != QPNP_VADC_STATUS1_EOC) {
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0)
+			return rc;
+		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		if (status1 == QPNP_VADC_STATUS1_EOC)
+			break;
+		usleep_range(QPNP_VADC_HC1_CONV_TIME_MIN_US,
+				QPNP_VADC_HC1_CONV_TIME_MAX_US);
+		count++;
+		if (count > QPNP_VADC_HC1_ERR_COUNT) {
+			pr_err("retry error exceeded\n");
+			rc = qpnp_vadc_status_debug(vadc);
+			if (rc < 0)
+				pr_err("VADC disable failed with %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_vadc_hc_read_data(struct qpnp_vadc_chip *vadc, int *data)
+{
+	int rc = 0;
+	u8 buf = 0, rslt_lsb = 0, rslt_msb = 0;
+
+	/* Set hold bit */
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+	if (rc) {
+		pr_err("debug register dump failed\n");
+		return rc;
+	}
+	buf |= QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+	if (rc) {
+		pr_err("debug register dump failed\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA0, &rslt_lsb, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed for data0\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA1, &rslt_msb, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed for data1\n");
+		return rc;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+	if (*data == QPNP_VADC_HC1_DATA_CHECK_USR) {
+		pr_err("Invalid data :0x%x\n", *data);
+		return -EINVAL;
+	}
+
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc) {
+		pr_err("VADC disable failed\n");
+		return rc;
+	}
+
+	/* De-assert hold bit */
+	buf &= ~QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+	if (rc)
+		pr_err("de-asserting hold bit failed\n");
+
+	return rc;
+}
+
+static void qpnp_vadc_hc_update_adc_dig_param(struct qpnp_vadc_chip *vadc,
+			struct qpnp_adc_amux_properties *amux_prop, u8 *data)
+{
+	/* Update CAL value */
+	*data &= ~QPNP_VADC_HC1_CAL_VAL;
+	*data |= (amux_prop->cal_val << QPNP_VADC_HC1_CAL_VAL_SHIFT);
+
+	/* Update CAL select */
+	*data &= ~QPNP_VADC_HC1_CAL_SEL_MASK;
+	*data |= (amux_prop->calib_type << QPNP_VADC_HC1_CAL_SEL_SHIFT);
+
+	/* Update Decimation ratio select */
+	*data &= ~QPNP_VADC_HC1_DEC_RATIO_SEL;
+	*data |= (amux_prop->decimation << QPNP_VADC_HC1_DEC_RATIO_SHIFT);
+
+	pr_debug("VADC_DIG_PARAM value:0x%x\n", *data);
+}
+
+static int qpnp_vadc_hc_configure(struct qpnp_vadc_chip *vadc,
+				struct qpnp_adc_amux_properties *amux_prop)
+{
+	int rc = 0;
+	u8 buf[6];
+
+	/* Read registers 0x42 through 0x46 */
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+	if (rc < 0) {
+		pr_err("qpnp adc configure block read failed\n");
+		return rc;
+	}
+
+	/* ADC Digital param selection */
+	qpnp_vadc_hc_update_adc_dig_param(vadc, amux_prop, &buf[0]);
+
+	/* Update fast average sample value */
+	buf[1] &= (u8) ~QPNP_VADC_HC1_FAST_AVG_SAMPLES_MASK;
+	buf[1] |= amux_prop->fast_avg_setup;
+
+	/* Select ADC channel */
+	buf[2] = amux_prop->amux_channel;
+
+	/* Select hw settle delay for the channel */
+	buf[3] &= (u8) ~QPNP_VADC_HC1_DELAY_CTL_MASK;
+	buf[3] |= amux_prop->hw_settle_time;
+
+	/* Select ADC enable */
+	buf[4] |= QPNP_VADC_HC1_ADC_EN;
+
+	/* Select CONV request */
+	buf[5] |= QPNP_VADC_HC1_CONV_REQ_START;
+
+	if (!vadc->vadc_poll_eoc)
+		reinit_completion(&vadc->adc->adc_rslt_completion);
+
+	pr_debug("dig:0x%x, fast_avg:0x%x, channel:0x%x, hw_settle:0x%x\n",
+		buf[0], buf[1], buf[2], buf[3]);
+
+	/* Block register write from 0x42 through 0x46 */
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+	if (rc < 0) {
+		pr_err("qpnp adc block register configure failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result)
+{
+	int rc = 0, scale_type, amux_prescaling, dt_index = 0, calib_type = 0;
+	struct qpnp_adc_amux_properties amux_prop;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	mutex_lock(&vadc->adc->adc_lock);
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_err("not a valid VADC channel:%d\n", channel);
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	if (!vadc->vadc_init_calib) {
+		rc = qpnp_vadc_calib_device(vadc);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		} else {
+			vadc->vadc_init_calib = true;
+		}
+	}
+
+	calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	if (calib_type >= ADC_HC_CAL_SEL_NONE) {
+		pr_err("not a valid calib_type\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	amux_prop.decimation =
+			vadc->adc->adc_channels[dt_index].adc_decimation;
+	amux_prop.calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	amux_prop.cal_val = vadc->adc->adc_channels[dt_index].cal_val;
+	amux_prop.fast_avg_setup =
+			vadc->adc->adc_channels[dt_index].fast_avg_setup;
+	amux_prop.amux_channel = channel;
+	amux_prop.hw_settle_time =
+			vadc->adc->adc_channels[dt_index].hw_settle_time;
+
+	rc = qpnp_vadc_hc_configure(vadc, &amux_prop);
+	if (rc < 0) {
+		pr_err("Configuring VADC channel failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	if (vadc->vadc_poll_eoc) {
+		rc = qpnp_vadc_hc_check_conversion_status(vadc);
+		if (rc < 0) {
+			pr_err("polling mode conversion failed\n");
+			goto fail_unlock;
+		}
+	} else {
+		rc = wait_for_completion_timeout(
+					&vadc->adc->adc_rslt_completion,
+					QPNP_ADC_COMPLETION_TIMEOUT);
+		if (!rc) {
+			rc = qpnp_vadc_hc_check_conversion_status(vadc);
+			if (rc < 0) {
+				pr_err("interrupt mode conversion failed\n");
+				goto fail_unlock;
+			}
+			pr_debug("End of conversion status set\n");
+		}
+	}
+
+	rc = qpnp_vadc_hc_read_data(vadc, &result->adc_code);
+	if (rc) {
+		pr_err("qpnp vadc read adc code failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+	scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+	/* Note: Scaling functions for VADC_HC do not need offset/gain */
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+	pr_debug("channel=0x%x, adc_code=0x%x adc_result=%lld\n",
+			channel, result->adc_code, result->physical);
+
+fail_unlock:
+	mutex_unlock(&vadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_hc_read);
+
+static int32_t qpnp_vadc_configure(struct qpnp_vadc_chip *vadc,
+			struct qpnp_adc_amux_properties *chan_prop)
+{
+	u8 decimation = 0, conv_sequence = 0, conv_sequence_trig = 0;
+	u8 mode_ctrl = 0, meas_int_op_ctl_data = 0, buf = 0;
+	int rc = 0;
+
+	/* Mode selection */
+	mode_ctrl |= ((chan_prop->mode_sel << QPNP_VADC_OP_MODE_SHIFT) |
+			(QPNP_VADC_TRIM_EN | QPNP_VADC_AMUX_TRIM_EN));
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MODE_CTL, &mode_ctrl, 1);
+	if (rc < 0) {
+		pr_err("Mode configure write error\n");
+		return rc;
+	}
+
+	/* Channel selection */
+	buf = chan_prop->amux_channel;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CH_SEL_CTL, &buf, 1);
+	if (rc < 0) {
+		pr_err("Channel configure error\n");
+		return rc;
+	}
+
+	/* Digital parameter setup */
+	decimation = chan_prop->decimation <<
+				QPNP_VADC_DIG_DEC_RATIO_SEL_SHIFT;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_DIG_PARAM, &decimation, 1);
+	if (rc < 0) {
+		pr_err("Digital parameter configure write error\n");
+		return rc;
+	}
+
+	/* HW settling time delay */
+	buf = chan_prop->hw_settle_time;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HW_SETTLE_DELAY, &buf, 1);
+	if (rc < 0) {
+		pr_err("HW settling time setup error\n");
+		return rc;
+	}
+
+	pr_debug("mode:%d, channel:%d, decimation:%d, hw_settle:%d\n",
+		mode_ctrl, chan_prop->amux_channel, decimation,
+					chan_prop->hw_settle_time);
+
+	if (chan_prop->mode_sel == (ADC_OP_NORMAL_MODE <<
+					QPNP_VADC_OP_MODE_SHIFT)) {
+		/* Normal measurement mode */
+		buf = chan_prop->fast_avg_setup;
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_FAST_AVG_CTL,
+								&buf, 1);
+		if (rc < 0) {
+			pr_err("Fast averaging configure error\n");
+			return rc;
+		}
+		/* Ensure MEAS_INTERVAL_OP_CTL is set to 0 */
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_OP_CTL,
+						&meas_int_op_ctl_data, 1);
+		if (rc < 0) {
+			pr_err("Measurement interval OP configure error\n");
+			return rc;
+		}
+	} else if (chan_prop->mode_sel == (ADC_OP_CONVERSION_SEQUENCER <<
+					QPNP_VADC_OP_MODE_SHIFT)) {
+		/* Conversion sequence mode */
+		conv_sequence = ((ADC_SEQ_HOLD_100US <<
+				QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT) |
+				ADC_CONV_SEQ_TIMEOUT_5MS);
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_CTL,
+							&conv_sequence, 1);
+		if (rc < 0) {
+			pr_err("Conversion sequence error\n");
+			return rc;
+		}
+
+		conv_sequence_trig = ((QPNP_VADC_CONV_SEQ_RISING_EDGE <<
+				QPNP_VADC_CONV_SEQ_EDGE_SHIFT) |
+				chan_prop->trigger_channel);
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_TRIG_CTL,
+							&conv_sequence_trig, 1);
+		if (rc < 0) {
+			pr_err("Conversion trigger error\n");
+			return rc;
+		}
+	} else if (chan_prop->mode_sel == ADC_OP_MEASUREMENT_INTERVAL) {
+		buf = QPNP_VADC_MEAS_INTERVAL_OP_SET;
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_OP_CTL,
+					&buf, 1);
+		if (rc < 0) {
+			pr_err("Measurement interval OP configure error\n");
+			return rc;
+		}
+	}
+
+	if (!vadc->vadc_poll_eoc)
+		reinit_completion(&vadc->adc->adc_rslt_completion);
+
+	rc = qpnp_vadc_enable(vadc, true);
+	if (rc)
+		return rc;
+
+	if (!vadc->vadc_iadc_sync_lock) {
+		/* Request conversion */
+		buf = QPNP_VADC_CONV_REQ_SET;
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_REQ, &buf, 1);
+		if (rc < 0) {
+			pr_err("Request conversion failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_read_conversion_result(struct qpnp_vadc_chip *vadc,
+								int32_t *data)
+{
+	uint8_t rslt_lsb, rslt_msb;
+	int rc = 0, status = 0;
+
+	status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA0, &rslt_lsb, 1);
+	if (status < 0) {
+		pr_err("qpnp adc result read failed for data0\n");
+		goto fail;
+	}
+
+	status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA1, &rslt_msb, 1);
+	if (status < 0) {
+		pr_err("qpnp adc result read failed for data1\n");
+		goto fail;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+fail:
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc)
+		return rc;
+
+	return status;
+}
+
+static int32_t qpnp_vadc_read_status(struct qpnp_vadc_chip *vadc, int mode_sel)
+{
+	u8 status1, status2, status2_conv_seq_state;
+	u8 status_err = QPNP_VADC_CONV_TIMEOUT_ERR;
+	int rc;
+
+	switch (mode_sel) {
+	case (ADC_OP_CONVERSION_SEQUENCER << QPNP_VADC_OP_MODE_SHIFT):
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc) {
+			pr_err("qpnp_vadc read mask interrupt failed\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS2, &status2, 1);
+		if (rc) {
+			pr_err("qpnp_vadc read mask interrupt failed\n");
+			return rc;
+		}
+
+		if (!(status2 & ~QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS) &&
+			(status1 & (~QPNP_VADC_STATUS1_REQ_STS |
+						QPNP_VADC_STATUS1_EOC))) {
+			rc = status_err;
+			return rc;
+		}
+
+		status2_conv_seq_state = status2 >>
+					QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT;
+		if (status2_conv_seq_state != ADC_CONV_SEQ_IDLE) {
+			pr_err("qpnp vadc seq error with status %d\n",
+						status2);
+			rc = -EINVAL;
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void qpnp_vadc_work(struct work_struct *work)
+{
+	struct qpnp_vadc_chip *vadc = container_of(work,
+			struct qpnp_vadc_chip, trigger_completion_work);
+
+	if (qpnp_vadc_is_valid(vadc) < 0)
+		return;
+
+	complete(&vadc->adc->adc_rslt_completion);
+}
+
+static void qpnp_vadc_low_thr_fn(struct work_struct *work)
+{
+	struct qpnp_vadc_chip *vadc = container_of(work,
+			struct qpnp_vadc_chip, trigger_low_thr_work);
+
+	vadc->state_copy->meas_int_mode = false;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	vadc->state_copy->param->threshold_notification(
+			ADC_TM_LOW_STATE,
+			vadc->state_copy->param->btm_ctx);
+}
+
+static void qpnp_vadc_high_thr_fn(struct work_struct *work)
+{
+	struct qpnp_vadc_chip *vadc = container_of(work,
+			struct qpnp_vadc_chip, trigger_high_thr_work);
+
+	vadc->state_copy->meas_int_mode = false;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	vadc->state_copy->param->threshold_notification(
+			ADC_TM_HIGH_STATE,
+			vadc->state_copy->param->btm_ctx);
+}
+
+static irqreturn_t qpnp_vadc_isr(int irq, void *dev_id)
+{
+	struct qpnp_vadc_chip *vadc = dev_id;
+
+	schedule_work(&vadc->trigger_completion_work);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_vadc_low_thr_isr(int irq, void *data)
+{
+	struct qpnp_vadc_chip *vadc = data;
+	u8 mode_ctl = 0, mode = 0;
+	int rc = 0;
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_MODE_CTL, &mode, 1);
+	if (rc < 0) {
+		pr_err("mode ctl register read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (!(mode & QPNP_VADC_MEAS_INT_MODE_MASK)) {
+		pr_debug("Spurious VADC threshold 0x%x\n", mode);
+		return IRQ_HANDLED;
+	}
+
+	mode_ctl = ADC_OP_NORMAL_MODE;
+	/* Set measurement in single measurement mode */
+	qpnp_vadc_mode_select(vadc, mode_ctl);
+	qpnp_vadc_enable(vadc, false);
+	schedule_work(&vadc->trigger_low_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_vadc_high_thr_isr(int irq, void *data)
+{
+	struct qpnp_vadc_chip *vadc = data;
+	u8 mode_ctl = 0, mode = 0;
+	int rc = 0;
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_MODE_CTL, &mode, 1);
+	if (rc < 0) {
+		pr_err("mode ctl register read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (!(mode & QPNP_VADC_MEAS_INT_MODE_MASK)) {
+		pr_debug("Spurious VADC threshold 0x%x\n", mode);
+		return IRQ_HANDLED;
+	}
+
+	mode_ctl = ADC_OP_NORMAL_MODE;
+	/* Set measurement in single measurement mode */
+	qpnp_vadc_mode_select(vadc, mode_ctl);
+	qpnp_vadc_enable(vadc, false);
+	schedule_work(&vadc->trigger_high_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static int32_t qpnp_vadc_version_check(struct qpnp_vadc_chip *dev)
+{
+	uint8_t revision;
+	int rc;
+
+	rc = qpnp_vadc_read_reg(dev, QPNP_VADC_REVISION2, &revision, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (revision < QPNP_VADC_SUPPORTED_REVISION2) {
+		pr_err("VADC Version not supported\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int32_t
+	qpnp_vadc_channel_post_scaling_calib_check(struct qpnp_vadc_chip *vadc,
+								int channel)
+{
+	int version, rc = 0;
+
+	version = qpnp_adc_get_revid_version(vadc->dev);
+
+	if (version == QPNP_REV_ID_PM8950_1_0) {
+		if ((channel == LR_MUX7_HW_ID) ||
+			(channel == P_MUX2_1_1) ||
+			(channel == LR_MUX3_XO_THERM) ||
+			(channel == LR_MUX3_BUF_XO_THERM_BUF) ||
+			(channel == P_MUX4_1_1)) {
+			vadc->adc->amux_prop->chan_prop->calib_type =
+								CALIB_ABSOLUTE;
+			return rc;
+		}
+	}
+
+	return -EINVAL;
+}
+
+#define QPNP_VBAT_COEFF_1	3000
+#define QPNP_VBAT_COEFF_2	45810000
+#define QPNP_VBAT_COEFF_3	100000
+#define QPNP_VBAT_COEFF_4	3500
+#define QPNP_VBAT_COEFF_5	80000000
+#define QPNP_VBAT_COEFF_6	4400
+#define QPNP_VBAT_COEFF_7	32200000
+#define QPNP_VBAT_COEFF_8	3880
+#define QPNP_VBAT_COEFF_9	5770
+#define QPNP_VBAT_COEFF_10	3660
+#define QPNP_VBAT_COEFF_11	5320
+#define QPNP_VBAT_COEFF_12	8060000
+#define QPNP_VBAT_COEFF_13	102640000
+#define QPNP_VBAT_COEFF_14	22220000
+#define QPNP_VBAT_COEFF_15	83060000
+#define QPNP_VBAT_COEFF_16	2810
+#define QPNP_VBAT_COEFF_17	5260
+#define QPNP_VBAT_COEFF_18	8027
+#define QPNP_VBAT_COEFF_19	2347
+#define QPNP_VBAT_COEFF_20	6043
+#define QPNP_VBAT_COEFF_21	1914
+#define QPNP_VBAT_OFFSET_SMIC	9446
+#define QPNP_VBAT_OFFSET_GF	9441
+#define QPNP_OCV_OFFSET_SMIC	4596
+#define QPNP_OCV_OFFSET_GF	5896
+#define QPNP_VBAT_COEFF_22	6800
+#define QPNP_VBAT_COEFF_23	3500
+#define QPNP_VBAT_COEFF_24	4360
+#define QPNP_VBAT_COEFF_25	8060
+#define QPNP_VBAT_COEFF_26	7895
+#define QPNP_VBAT_COEFF_27	5658
+#define QPNP_VBAT_COEFF_28	5760
+#define QPNP_VBAT_COEFF_29	7900
+#define QPNP_VBAT_COEFF_30	5660
+#define QPNP_VBAT_COEFF_31	3620
+#define QPNP_VBAT_COEFF_32	1230
+#define QPNP_VBAT_COEFF_33	5760
+#define QPNP_VBAT_COEFF_34	4080
+#define QPNP_VBAT_COEFF_35	7000
+#define QPNP_VBAT_COEFF_36	3040
+#define QPNP_VBAT_COEFF_37	3850
+#define QPNP_VBAT_COEFF_38	5000
+#define QPNP_VBAT_COEFF_39	2610
+#define QPNP_VBAT_COEFF_40	4190
+#define QPNP_VBAT_COEFF_41	5800
+#define QPNP_VBAT_COEFF_42	2620
+#define QPNP_VBAT_COEFF_43	4030
+#define QPNP_VBAT_COEFF_44	3230
+#define QPNP_VBAT_COEFF_45	3450
+#define QPNP_VBAT_COEFF_46	2120
+#define QPNP_VBAT_COEFF_47	3560
+#define QPNP_VBAT_COEFF_48	2190
+#define QPNP_VBAT_COEFF_49	4180
+#define QPNP_VBAT_COEFF_50	27800000
+#define QPNP_VBAT_COEFF_51	5110
+#define QPNP_VBAT_COEFF_52	34444000
+
+static int32_t qpnp_ocv_comp(int64_t *result,
+			struct qpnp_vadc_chip *vadc, int64_t die_temp)
+{
+	int64_t temp_var = 0, offset = 0;
+	int64_t old = *result;
+	int version;
+
+	version = qpnp_adc_get_revid_version(vadc->dev);
+	if (version == -EINVAL)
+		return 0;
+
+	if (version == QPNP_REV_ID_8026_2_2) {
+		if (die_temp > 25000)
+			return 0;
+	}
+
+	switch (version) {
+	case QPNP_REV_ID_8941_3_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			 temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_4));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_1_0:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_10))
+			- QPNP_VBAT_COEFF_14));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_8))
+			+ QPNP_VBAT_COEFF_12));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_10));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_8));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_2:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			*result -= QPNP_VBAT_COEFF_22;
+			temp_var = (die_temp - 25000) *
+					QPNP_VBAT_COEFF_24;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_COEFF_22;
+			temp_var = (die_temp - 25000) *
+					QPNP_VBAT_COEFF_25;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8110_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			*result -= QPNP_OCV_OFFSET_SMIC;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_18;
+			else
+				temp_var = QPNP_VBAT_COEFF_19;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_OCV_OFFSET_GF;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_20;
+			else
+				temp_var = QPNP_VBAT_COEFF_21;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_26;
+			else
+				temp_var = QPNP_VBAT_COEFF_27;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		default:
+		case COMP_ID_GF:
+			offset = QPNP_OCV_OFFSET_GF;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_26;
+			else
+				temp_var = QPNP_VBAT_COEFF_27;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_1:
+		switch (vadc->id) {
+		/* FAB_ID is zero */
+		case COMP_ID_GF:
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_29;
+			else
+				temp_var = QPNP_VBAT_COEFF_30;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		/* FAB_ID is non-zero */
+		default:
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_31;
+			else
+				temp_var = (-QPNP_VBAT_COEFF_32);
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			offset = (-QPNP_VBAT_COEFF_38);
+			if (die_temp < 0)
+				temp_var = die_temp * QPNP_VBAT_COEFF_36;
+			else if (die_temp > 40000)
+				temp_var = ((die_temp - 40000) *
+						(-QPNP_VBAT_COEFF_37));
+			break;
+		case COMP_ID_TSMC:
+			if (die_temp < 10000)
+				temp_var = ((die_temp - 10000) *
+						QPNP_VBAT_COEFF_41);
+			else if (die_temp > 50000)
+				temp_var = ((die_temp - 50000) *
+						(-QPNP_VBAT_COEFF_42));
+			break;
+		default:
+		case COMP_ID_GF:
+			if (die_temp < 20000)
+				temp_var = ((die_temp - 20000) *
+						QPNP_VBAT_COEFF_45);
+			else if (die_temp > 40000)
+				temp_var = ((die_temp - 40000) *
+						(-QPNP_VBAT_COEFF_46));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			temp_var = (-QPNP_VBAT_COEFF_50);
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_1:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			temp_var = (QPNP_VBAT_COEFF_52);
+			break;
+		}
+		break;
+	default:
+		temp_var = 0;
+		break;
+	}
+
+	temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3);
+
+	temp_var = 1000000 + temp_var;
+
+	*result = *result * temp_var;
+
+	if (offset)
+		*result -= offset;
+
+	*result = div64_s64(*result, 1000000);
+	pr_debug("%lld compensated into %lld\n", old, *result);
+
+	return 0;
+}
+
+static int32_t qpnp_vbat_sns_comp(int64_t *result,
+			struct qpnp_vadc_chip *vadc, int64_t die_temp)
+{
+	int64_t temp_var = 0, offset = 0;
+	int64_t old = *result;
+	int version;
+
+	version = qpnp_adc_get_revid_version(vadc->dev);
+	if (version == -EINVAL)
+		return 0;
+
+	if (version != QPNP_REV_ID_8941_3_1) {
+		/* min(die_temp_c, 60_degC) */
+		if (die_temp > 60000)
+			die_temp = 60000;
+	}
+
+	switch (version) {
+	case QPNP_REV_ID_8941_3_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
+			break;
+		default:
+		case COMP_ID_GF:
+			/* min(die_temp_c, 60_degC) */
+			if (die_temp > 60000)
+				die_temp = 60000;
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_1_0:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_11))
+			+ QPNP_VBAT_COEFF_15));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_9))
+			+ QPNP_VBAT_COEFF_13));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_11));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_9));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_2:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			*result -= QPNP_VBAT_COEFF_23;
+			temp_var = 0;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_COEFF_23;
+			temp_var = 0;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8110_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			*result -= QPNP_VBAT_OFFSET_SMIC;
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_17));
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_OFFSET_GF;
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_16));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_28));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_28));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_1:
+		switch (vadc->id) {
+		/* FAB_ID is zero */
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_33));
+			break;
+		/* FAB_ID is non-zero */
+		default:
+			offset = QPNP_VBAT_COEFF_35;
+			if (die_temp > 50000) {
+				temp_var = ((die_temp - 25000) *
+				(QPNP_VBAT_COEFF_34));
+			}
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 0) {
+				temp_var = (die_temp *
+					QPNP_VBAT_COEFF_39);
+			} else if (die_temp > 40000) {
+				temp_var = ((die_temp - 40000) *
+				(-QPNP_VBAT_COEFF_40));
+			}
+			break;
+		case COMP_ID_TSMC:
+			if (die_temp < 10000)
+				temp_var = ((die_temp - 10000) *
+					QPNP_VBAT_COEFF_43);
+			else if (die_temp > 50000)
+				temp_var = ((die_temp - 50000) *
+						(-QPNP_VBAT_COEFF_44));
+			break;
+		default:
+		case COMP_ID_GF:
+			if (die_temp < 20000)
+				temp_var = ((die_temp - 20000) *
+					QPNP_VBAT_COEFF_47);
+			else if (die_temp > 40000)
+				temp_var = ((die_temp - 40000) *
+						(-QPNP_VBAT_COEFF_48));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 30000)
+				temp_var = (-QPNP_VBAT_COEFF_50);
+			else if (die_temp > 30000)
+				temp_var = (((die_temp - 30000) *
+					(-QPNP_VBAT_COEFF_49)) +
+					(-QPNP_VBAT_COEFF_50));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_1:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 30000)
+				temp_var = (QPNP_VBAT_COEFF_52);
+			else if (die_temp > 30000)
+				temp_var = (((die_temp - 30000) *
+					(-QPNP_VBAT_COEFF_51)) +
+					(QPNP_VBAT_COEFF_52));
+			break;
+		}
+		break;
+	default:
+		temp_var = 0;
+		break;
+	}
+
+	temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3);
+
+	temp_var = 1000000 + temp_var;
+
+	*result = *result * temp_var;
+
+	if (offset)
+		*result -= offset;
+
+	*result = div64_s64(*result, 1000000);
+	pr_debug("%lld compensated into %lld\n", old, *result);
+
+	return 0;
+}
+
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *vadc,
+					int64_t *result, bool is_pon_ocv)
+{
+	struct qpnp_vadc_result die_temp_result;
+	int rc = 0;
+
+	rc = qpnp_vadc_is_valid(vadc);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+			DIE_TEMP, &die_temp_result);
+	if (rc < 0) {
+		pr_err("Error reading die_temp\n");
+		return rc;
+	}
+
+	pr_debug("die-temp = %lld\n", die_temp_result.physical);
+
+	if (is_pon_ocv)
+		rc = qpnp_ocv_comp(result, vadc, die_temp_result.physical);
+	else
+		rc = qpnp_vbat_sns_comp(result, vadc,
+				die_temp_result.physical);
+
+	if (rc < 0)
+		pr_err("Error with vbat compensation\n");
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vbat_sns_comp_result);
+
+static void qpnp_vadc_625mv_channel_sel(struct qpnp_vadc_chip *vadc,
+				uint32_t *ref_channel_sel)
+{
+	uint32_t dt_index = 0;
+
+	/* Check if the buffered 625mV channel exists */
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= SPARE1) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_debug("Use default 625mV ref channel\n");
+		*ref_channel_sel = REF_625MV;
+	} else {
+		pr_debug("Use buffered 625mV ref channel\n");
+		*ref_channel_sel = SPARE1;
+	}
+}
+
+int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{
+	struct qpnp_adc_amux_properties conv;
+	int rc, count = 0, calib_read = 0;
+	u8 status1 = 0;
+
+	if (vadc->vadc_hc) {
+		if (calib_type == ADC_HC_ABS_CAL)
+			conv.amux_channel = VADC_CALIB_VREF_1P25;
+		else if (calib_type == CALIB_RATIOMETRIC)
+			conv.amux_channel = VADC_CALIB_VREF;
+	} else {
+		if (calib_type == CALIB_ABSOLUTE)
+			conv.amux_channel = REF_125V;
+		else if (calib_type == CALIB_RATIOMETRIC)
+			conv.amux_channel = VDD_VADC;
+	}
+
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+	conv.cal_val = calib_type;
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+
+	while (status1 != QPNP_VADC_STATUS1_EOC) {
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0)
+			return rc;
+		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+				QPNP_VADC_CONV_TIME_MAX);
+		count++;
+		if (count > QPNP_VADC_ERR_COUNT) {
+			rc = -ENODEV;
+			goto calib_fail;
+		}
+	}
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_read_data(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp vadc read adc code failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_read_conversion_result(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp adc read adc failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+
+	*calib_data = calib_read;
+calib_fail:
+	return rc;
+}
+
+
+int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{
+	struct qpnp_adc_amux_properties conv;
+	int rc, count = 0, calib_read = 0;
+	u8 status1 = 0;
+	uint32_t ref_channel_sel = 0;
+
+	if (vadc->vadc_hc) {
+		conv.amux_channel = VADC_VREF_GND;
+	} else {
+		if (calib_type == CALIB_ABSOLUTE) {
+			qpnp_vadc_625mv_channel_sel(vadc, &ref_channel_sel);
+			conv.amux_channel = ref_channel_sel;
+		} else if (calib_type == CALIB_RATIOMETRIC)
+			conv.amux_channel = GND_REF;
+	}
+
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+	conv.cal_val = calib_type;
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+
+	while (status1 != QPNP_VADC_STATUS1_EOC) {
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0)
+			return rc;
+		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+				QPNP_VADC_CONV_TIME_MAX);
+		count++;
+		if (count > QPNP_VADC_ERR_COUNT) {
+			rc = -ENODEV;
+			goto calib_fail;
+		}
+	}
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_read_data(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp vadc read adc code failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_read_conversion_result(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp adc read adc failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+	*calib_data = calib_read;
+calib_fail:
+	return rc;
+}
+
+static int32_t qpnp_vadc_calib_device(struct qpnp_vadc_chip *vadc)
+{
+	int rc, calib_read_1 = 0, calib_read_2 = 0;
+	enum qpnp_adc_calib_type calib_type;
+
+	if (vadc->vadc_hc)
+		calib_type = ADC_HC_ABS_CAL;
+	else
+		calib_type = CALIB_ABSOLUTE;
+
+	rc = qpnp_vadc_calib_vref(vadc, calib_type, &calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc absolute vref calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	rc = qpnp_vadc_calib_gnd(vadc, calib_type, &calib_read_2);
+	if (rc) {
+		pr_err("qpnp adc absolute gnd calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	pr_debug("absolute reference raw: 1.25V:0x%x, 625mV/GND:0x%x\n",
+				calib_read_1, calib_read_2);
+
+	if (calib_read_1 == calib_read_2) {
+		pr_err("absolute reference raw: 1.25V:0x%x625mV:0x%x\n",
+			calib_read_2, calib_read_1);
+		rc = -EINVAL;
+		goto calib_fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dy =
+				(calib_read_1 - calib_read_2);
+
+	if (calib_type == CALIB_ABSOLUTE)
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx
+						= QPNP_ADC_625_UV;
+	else if (calib_type == ADC_HC_ABS_CAL)
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx
+						= QPNP_ADC_1P25_UV;
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_vref =
+					calib_read_1;
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_gnd =
+					calib_read_2;
+
+	calib_read_1 = 0;
+	calib_read_2 = 0;
+	rc = qpnp_vadc_calib_vref(vadc, CALIB_RATIOMETRIC, &calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc ratiometric vref calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	rc = qpnp_vadc_calib_gnd(vadc, CALIB_RATIOMETRIC, &calib_read_2);
+	if (rc) {
+		pr_err("qpnp adc ratiometric gnd calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	pr_debug("ratiometric reference raw: VDD:0x%x GND:0x%x\n",
+				calib_read_1, calib_read_2);
+
+	if (calib_read_1 == calib_read_2) {
+		pr_err("ratiometric reference raw: VDD:0x%x GND:0x%x\n",
+				calib_read_1, calib_read_2);
+		rc = -EINVAL;
+		goto calib_fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy =
+					(calib_read_1 - calib_read_2);
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx =
+					vadc->adc->adc_prop->adc_vdd_reference;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_vref
+					= calib_read_1;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd
+					= calib_read_2;
+
+calib_fail:
+	return rc;
+}
+
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *vadc,
+				struct qpnp_vadc_linear_graph *param,
+				enum qpnp_adc_calib_type calib_type)
+{
+	int rc = 0;
+	struct qpnp_vadc_result result;
+
+	rc = qpnp_vadc_is_valid(vadc);
+	if (rc < 0)
+		return rc;
+
+	if (!vadc->vadc_init_calib) {
+		if (vadc->vadc_hc) {
+			rc = qpnp_vadc_hc_read(vadc, VADC_CALIB_VREF_1P25,
+								&result);
+			if (rc) {
+				pr_debug("vadc read failed with rc = %d\n", rc);
+				return rc;
+			}
+		} else {
+			rc = qpnp_vadc_read(vadc, REF_125V, &result);
+			if (rc) {
+				pr_debug("vadc read failed with rc = %d\n", rc);
+				return rc;
+			}
+		}
+	}
+
+	switch (calib_type) {
+	case CALIB_RATIOMETRIC:
+	param->dy =
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy;
+	param->dx =
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx;
+	param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+	param->adc_gnd =
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd;
+	break;
+	case CALIB_ABSOLUTE:
+	case ADC_HC_ABS_CAL:
+	param->dy =
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dy;
+	param->dx =
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx;
+	param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+	param->adc_gnd =
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_gnd;
+	break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_get_vadc_gain_and_offset);
+
+static int32_t qpnp_vadc_wait_for_req_sts_check(struct qpnp_vadc_chip *vadc)
+{
+	u8 status1 = 0;
+	int rc, count = 0;
+
+	/* Re-enable the peripheral */
+	rc = qpnp_vadc_enable(vadc, true);
+	if (rc) {
+		pr_err("vadc re-enable peripheral failed with %d\n", rc);
+		return rc;
+	}
+
+	/* The VADC_TM bank needs to be disabled for new conversion request */
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("vadc read status1 failed with %d\n", rc);
+		return rc;
+	}
+
+	/* Disable the bank if a conversion is occurring */
+	while ((status1 & QPNP_VADC_STATUS1_REQ_STS) && (count < QPNP_RETRY)) {
+		/* Wait time is based on the optimum sampling rate
+		 * and adding enough time buffer to account for ADC conversions
+		 * occurring on different peripheral banks
+		 */
+		usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0) {
+			pr_err("vadc disable failed with %d\n", rc);
+			return rc;
+		}
+		count++;
+	}
+
+	if (count >= QPNP_RETRY)
+		pr_err("QPNP vadc status req bit did not fall low!!\n");
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+
+	/* Disable the peripheral */
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc < 0)
+		pr_err("vadc peripheral disable failed with %d\n", rc);
+
+	return rc;
+}
+
+static int32_t qpnp_vadc_manage_meas_int_requests(struct qpnp_vadc_chip *chip)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+	int rc = 0, dt_index = 0;
+	u8 mode_ctl = 0;
+
+	pr_debug("meas_int_mode:0x%x, mode_ctl:%0x\n",
+		vadc->state_copy->meas_int_mode, mode_ctl);
+
+	if (vadc->state_copy->meas_int_mode) {
+		pr_debug("meas interval in progress. Procced to disable it\n");
+		/* measurement interval in progress. Proceed to disable it */
+		mode_ctl = ADC_OP_NORMAL_MODE;
+		rc = qpnp_vadc_mode_select(vadc, mode_ctl);
+		if (rc < 0) {
+			pr_err("NORM mode select failed with %d\n", rc);
+			return rc;
+		}
+
+		/* Disable bank */
+		rc = qpnp_vadc_enable(vadc, false);
+		if (rc) {
+			pr_err("Disable bank failed with %d\n", rc);
+			return rc;
+		}
+
+		/* Check if a conversion is in progress */
+		rc = qpnp_vadc_wait_for_req_sts_check(vadc);
+		if (rc < 0) {
+			pr_err("req_sts check failed with %d\n", rc);
+			return rc;
+		}
+
+		vadc->state_copy->meas_int_mode = false;
+		vadc->state_copy->meas_int_request_in_queue = true;
+	} else if (vadc->state_copy->meas_int_request_in_queue) {
+		/* put the meas interval back in queue */
+		pr_debug("put meas interval back in queue\n");
+		vadc->adc->amux_prop->amux_channel =
+				vadc->state_copy->vadc_meas_amux.channel_num;
+		while ((vadc->adc->adc_channels[dt_index].channel_num
+			!= vadc->adc->amux_prop->amux_channel) &&
+			(dt_index < vadc->max_channels_available))
+			dt_index++;
+		if (dt_index >= vadc->max_channels_available) {
+			pr_err("not a valid VADC channel\n");
+			rc = -EINVAL;
+			return rc;
+		}
+
+		vadc->adc->amux_prop->decimation =
+			vadc->adc->amux_prop->decimation;
+		vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->amux_prop->hw_settle_time;
+		vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->amux_prop->fast_avg_setup;
+		vadc->adc->amux_prop->mode_sel = ADC_OP_MEASUREMENT_INTERVAL;
+		rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+		if (rc) {
+			pr_err("vadc configure failed with %d\n", rc);
+			return rc;
+		}
+
+		vadc->state_copy->meas_int_mode = true;
+		vadc->state_copy->meas_int_request_in_queue = false;
+	}
+	dev_set_drvdata(vadc->dev, vadc);
+
+	return 0;
+}
+
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name)
+{
+	struct qpnp_vadc_chip *vadc;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
+
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-vadc", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(vadc, &qpnp_vadc_device_list, list)
+		if (vadc->adc->pdev->dev.of_node == node)
+			return vadc;
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qpnp_get_vadc);
+
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_trigger trigger_channel,
+					enum qpnp_vadc_channels channel,
+					struct qpnp_vadc_result *result)
+{
+	int rc = 0, scale_type, amux_prescaling, dt_index = 0, calib_type = 0;
+	uint32_t ref_channel, count = 0, local_idx = 0;
+	int32_t vref_calib = 0, gnd_calib = 0, new_vref_calib = 0, offset = 0;
+	int32_t calib_offset = 0;
+	u8 status1 = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	mutex_lock(&vadc->adc->adc_lock);
+
+	if (vadc->state_copy->vadc_meas_int_enable)
+		qpnp_vadc_manage_meas_int_requests(vadc);
+
+	if (channel == REF_625MV) {
+		qpnp_vadc_625mv_channel_sel(vadc, &ref_channel);
+		channel = ref_channel;
+	}
+
+	vadc->adc->amux_prop->amux_channel = channel;
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_err("not a valid VADC channel\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	if (calib_type >= CALIB_NONE) {
+		pr_err("not a valid calib_type\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+	calib_offset = (calib_type == CALIB_ABSOLUTE) ?
+		QPNP_VADC_ABSOLUTE_RECALIB_OFFSET :
+		QPNP_VADC_RATIOMETRIC_RECALIB_OFFSET;
+	rc = qpnp_vadc_version_check(vadc);
+	if (rc)
+		goto fail_unlock;
+	if (vadc->vadc_recalib_check) {
+		rc = qpnp_vadc_calib_vref(vadc, calib_type, &vref_calib);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		}
+	} else if (!vadc->vadc_init_calib) {
+		rc = qpnp_vadc_calib_device(vadc);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		} else {
+			vadc->vadc_init_calib = true;
+		}
+	}
+
+recalibrate:
+	status1 = 0;
+	vadc->adc->amux_prop->decimation =
+			vadc->adc->adc_channels[dt_index].adc_decimation;
+	vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->adc_channels[dt_index].hw_settle_time;
+	vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->adc_channels[dt_index].fast_avg_setup;
+
+	if (trigger_channel < ADC_SEQ_NONE)
+		vadc->adc->amux_prop->mode_sel = (ADC_OP_CONVERSION_SEQUENCER
+						<< QPNP_VADC_OP_MODE_SHIFT);
+	else if (trigger_channel == ADC_SEQ_NONE)
+		vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+						<< QPNP_VADC_OP_MODE_SHIFT);
+	else {
+		pr_err("Invalid trigger channel:%d\n", trigger_channel);
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->trigger_channel = trigger_channel;
+
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+	if (rc) {
+		pr_err("qpnp vadc configure failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	if (vadc->vadc_poll_eoc) {
+		while (status1 != QPNP_VADC_STATUS1_EOC) {
+			rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1,
+							&status1, 1);
+			if (rc < 0)
+				goto fail_unlock;
+			status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+			if (status1 == QPNP_VADC_STATUS1_EOC)
+				break;
+			usleep_range(QPNP_VADC_CONV_TIME_MIN,
+					QPNP_VADC_CONV_TIME_MAX);
+			count++;
+			if (count > QPNP_VADC_ERR_COUNT) {
+				pr_err("retry error exceeded\n");
+				rc = qpnp_vadc_status_debug(vadc);
+				if (rc < 0)
+					pr_err("VADC disable failed\n");
+				rc = -EINVAL;
+				goto fail_unlock;
+			}
+		}
+	} else {
+		rc = wait_for_completion_timeout(
+					&vadc->adc->adc_rslt_completion,
+					QPNP_ADC_COMPLETION_TIMEOUT);
+		if (!rc) {
+			rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1,
+							&status1, 1);
+			if (rc < 0)
+				goto fail_unlock;
+			status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+			if (status1 == QPNP_VADC_STATUS1_EOC)
+				pr_debug("End of conversion status set\n");
+			else {
+				rc = qpnp_vadc_status_debug(vadc);
+				if (rc < 0)
+					pr_err("VADC disable failed\n");
+				rc = -EINVAL;
+				goto fail_unlock;
+			}
+		}
+	}
+
+	if (trigger_channel < ADC_SEQ_NONE) {
+		rc = qpnp_vadc_read_status(vadc,
+					vadc->adc->amux_prop->mode_sel);
+		if (rc)
+			pr_debug("Conversion sequence timed out - %d\n", rc);
+	}
+
+	rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
+	if (rc) {
+		pr_err("qpnp vadc read adc code failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	if (vadc->vadc_recalib_check) {
+		rc = qpnp_vadc_calib_gnd(vadc, calib_type, &gnd_calib);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		}
+		rc = qpnp_vadc_calib_vref(vadc, calib_type, &new_vref_calib);
+		if (rc < 0) {
+			pr_err("qpnp vadc calib read failed with %d\n", rc);
+			goto fail_unlock;
+		}
+
+		if (local_idx >= QPNP_VADC_RECALIB_MAXCNT) {
+			pr_err("invalid recalib count=%d\n", local_idx);
+			rc = -EINVAL;
+			goto fail_unlock;
+		}
+		pr_debug(
+			"chan=%d, calib=%s, vref_calib=0x%x, gnd_calib=0x%x, new_vref_calib=0x%x\n",
+			channel,
+			((calib_type == CALIB_ABSOLUTE) ?
+			"ABSOLUTE" : "RATIOMETRIC"),
+			vref_calib, gnd_calib, new_vref_calib);
+
+		offset = (new_vref_calib - vref_calib);
+		if (offset < 0)
+			offset = -offset;
+		if (offset <= calib_offset) {
+			pr_debug(
+				"qpnp vadc recalibration not required,offset:%d\n",
+								offset);
+			local_idx = 0;
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dy =
+						(vref_calib - gnd_calib);
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx =
+			(calib_type == CALIB_ABSOLUTE) ? QPNP_ADC_625_UV :
+					vadc->adc->adc_prop->adc_vdd_reference;
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_vref
+								= vref_calib;
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_gnd
+								= gnd_calib;
+		} else {
+			vref_calib = new_vref_calib;
+			local_idx = local_idx + 1;
+			if (local_idx >= QPNP_VADC_RECALIB_MAXCNT) {
+				pr_err(
+				"qpnp_vadc recalibration failed, count=%d",
+								local_idx);
+			} else {
+				pr_debug(
+				"qpnp vadc recalibration requested,offset:%d\n",
+								offset);
+				offset = 0;
+				goto recalibrate;
+			}
+		}
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+	vadc->adc->amux_prop->chan_prop->calib_type =
+		vadc->adc->adc_channels[dt_index].calib_type;
+
+	scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+	if ((qpnp_vadc_channel_post_scaling_calib_check(vadc, channel)) < 0)
+		pr_debug("Post scaling calib type not updated\n");
+
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+	pr_debug("channel=%d, adc_code=%d adc_result=%lld\n",
+			channel, result->adc_code, result->physical);
+
+fail_unlock:
+	if (vadc->state_copy->vadc_meas_int_enable)
+		qpnp_vadc_manage_meas_int_requests(vadc);
+
+	mutex_unlock(&vadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_conv_seq_request);
+
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result)
+{
+	struct qpnp_vadc_result die_temp_result;
+	int rc = 0;
+	enum power_supply_property prop;
+	union power_supply_propval ret = {0, };
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_read(vadc, channel, result);
+		if (rc < 0) {
+			pr_err("Error reading vadc_hc channel %d\n", channel);
+			return rc;
+		}
+
+		return 0;
+	}
+
+	if (channel == VBAT_SNS) {
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				channel, result);
+		if (rc < 0) {
+			pr_err("Error reading vbatt\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				DIE_TEMP, &die_temp_result);
+		if (rc < 0) {
+			pr_err("Error reading die_temp\n");
+			return rc;
+		}
+
+		rc = qpnp_vbat_sns_comp(&result->physical, vadc,
+						die_temp_result.physical);
+		if (rc < 0)
+			pr_err("Error with vbat compensation\n");
+
+		return 0;
+	} else if (channel == SPARE2) {
+		/* chg temp channel */
+		if (!vadc->vadc_chg_vote) {
+			vadc->vadc_chg_vote =
+				power_supply_get_by_name("battery");
+			if (!vadc->vadc_chg_vote) {
+				pr_err("no vadc_chg_vote found\n");
+				return -EINVAL;
+			}
+		}
+
+		prop = POWER_SUPPLY_PROP_FORCE_TLIM;
+		ret.intval = 1;
+
+		rc = power_supply_set_property(vadc->vadc_chg_vote,
+								prop, &ret);
+		if (rc) {
+			pr_err("error enabling the charger circuitry vote\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				channel, result);
+		if (rc < 0)
+			pr_err("Error reading die_temp\n");
+
+		ret.intval = 0;
+		rc = power_supply_set_property(vadc->vadc_chg_vote,
+								prop, &ret);
+		if (rc) {
+			pr_err("error enabling the charger circuitry vote\n");
+			return rc;
+		}
+
+		return 0;
+	} else
+		return qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				channel, result);
+}
+EXPORT_SYMBOL(qpnp_vadc_read);
+
+static void qpnp_vadc_lock(struct qpnp_vadc_chip *vadc)
+{
+	mutex_lock(&vadc->adc->adc_lock);
+}
+
+static void qpnp_vadc_unlock(struct qpnp_vadc_chip *vadc)
+{
+	mutex_unlock(&vadc->adc->adc_lock);
+}
+
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel)
+{
+	int rc = 0, dt_index = 0, calib_type = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	qpnp_vadc_lock(vadc);
+
+
+	vadc->adc->amux_prop->amux_channel = channel;
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_err("not a valid VADC channel\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	if (!vadc->vadc_init_calib) {
+		rc = qpnp_vadc_version_check(vadc);
+		if (rc)
+			goto fail;
+
+		rc = qpnp_vadc_calib_device(vadc);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail;
+		} else
+			vadc->vadc_init_calib = true;
+	}
+
+	vadc->adc->amux_prop->decimation =
+			vadc->adc->adc_channels[dt_index].adc_decimation;
+	vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->adc_channels[dt_index].hw_settle_time;
+	vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->adc_channels[dt_index].fast_avg_setup;
+	vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+					<< QPNP_VADC_OP_MODE_SHIFT);
+	vadc->vadc_iadc_sync_lock = true;
+
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+	if (rc) {
+		pr_err("qpnp vadc configure failed with %d\n", rc);
+		goto fail;
+	}
+
+	return rc;
+fail:
+	vadc->vadc_iadc_sync_lock = false;
+	qpnp_vadc_unlock(vadc);
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_iadc_sync_request);
+
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *vadc,
+					enum qpnp_vadc_channels channel,
+						struct qpnp_vadc_result *result)
+{
+	int rc = 0, scale_type, amux_prescaling, dt_index = 0;
+
+	vadc->adc->amux_prop->amux_channel = channel;
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
+	if (rc) {
+		pr_err("qpnp vadc read adc code failed with %d\n", rc);
+		goto fail;
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+	scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_NONE) {
+		rc = -EBADF;
+		goto fail;
+	}
+
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+fail:
+	vadc->vadc_iadc_sync_lock = false;
+	qpnp_vadc_unlock(vadc);
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_iadc_sync_complete_request);
+
+static int32_t qpnp_vadc_thr_update(struct qpnp_vadc_chip *vadc,
+					int32_t high_thr, int32_t low_thr)
+{
+	int rc = 0;
+	u8 buf = 0;
+
+	pr_debug("client requested high:%d and low:%d\n",
+		high_thr, low_thr);
+
+	buf = QPNP_VADC_THR_LSB_MASK(low_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_LOW_THR_LSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("low threshold lsb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	buf = QPNP_VADC_THR_MSB_MASK(low_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_LOW_THR_MSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("low threshold msb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	buf = QPNP_VADC_THR_LSB_MASK(high_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HIGH_THR_LSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("high threshold lsb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	buf = QPNP_VADC_THR_MSB_MASK(high_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HIGH_THR_MSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("high threshold msb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("client requested high:%d and low:%d\n", high_thr, low_thr);
+
+	return rc;
+}
+
+int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{
+	uint32_t channel, scale_type = 0;
+	uint32_t low_thr = 0, high_thr = 0;
+	int rc = 0, idx = 0, amux_prescaling = 0;
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+	u8 buf = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	if (!vadc->state_copy->vadc_meas_int_enable) {
+		pr_err("Recurring measurement interval not available\n");
+		return -EINVAL;
+	}
+
+	if (param->threshold_notification == NULL) {
+		pr_debug("No notification for high/low temp??\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&vadc->adc->adc_lock);
+
+	channel = param->channel;
+	while (idx < vadc->max_channels_available) {
+		if (vadc->adc->adc_channels[idx].channel_num == channel)
+			break;
+		idx++;
+	}
+
+	if (idx >= vadc->max_channels_available)  {
+		pr_err("not a valid VADC channel\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	scale_type = vadc->adc->adc_channels[idx].adc_scale_fn;
+	if (scale_type >= SCALE_RVADC_SCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[idx].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+	vadc->adc->amux_prop->chan_prop->calib_type =
+		vadc->adc->adc_channels[idx].calib_type;
+
+	pr_debug("channel:%d, scale_type:%d, dt_idx:%d",
+					channel, scale_type, idx);
+	vadc->adc->amux_prop->amux_channel = channel;
+	vadc->adc->amux_prop->decimation =
+			vadc->adc->adc_channels[idx].adc_decimation;
+	vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->adc_channels[idx].hw_settle_time;
+	vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->adc_channels[idx].fast_avg_setup;
+	vadc->adc->amux_prop->mode_sel = ADC_OP_MEASUREMENT_INTERVAL;
+	adc_vadc_rscale_fn[scale_type].chan(vadc,
+			vadc->adc->amux_prop->chan_prop, param,
+			&low_thr, &high_thr);
+
+	if (param->timer_interval >= ADC_MEAS1_INTERVAL_NONE) {
+		pr_err("Invalid timer interval :%d\n", param->timer_interval);
+		goto fail_unlock;
+	}
+
+	buf = param->timer_interval;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_CTL, &buf, 1);
+	if (rc) {
+		pr_err("vadc meas timer failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	rc = qpnp_vadc_thr_update(vadc, high_thr, low_thr);
+	if (rc) {
+		pr_err("vadc thr update failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+	if (rc) {
+		pr_err("vadc configure failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	vadc->state_copy->meas_int_mode = true;
+	vadc->state_copy->param = param;
+	vadc->state_copy->vadc_meas_amux.channel_num = channel;
+	vadc->state_copy->vadc_meas_amux.adc_decimation =
+				vadc->adc->amux_prop->decimation;
+	vadc->state_copy->vadc_meas_amux.hw_settle_time =
+				vadc->adc->amux_prop->hw_settle_time;
+	vadc->state_copy->vadc_meas_amux.fast_avg_setup =
+				vadc->adc->amux_prop->fast_avg_setup;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	dev_set_drvdata(vadc->dev, vadc);
+
+fail_unlock:
+	mutex_unlock(&vadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_channel_monitor);
+
+int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+	u8 mode_ctl = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	if (!vadc->state_copy->vadc_meas_int_enable) {
+		pr_err("Recurring measurement interval not available\n");
+		return -EINVAL;
+	}
+
+	vadc->state_copy->meas_int_mode = false;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	dev_set_drvdata(vadc->dev, vadc);
+	mode_ctl = ADC_OP_NORMAL_MODE;
+	/* Set measurement in single measurement mode */
+	qpnp_vadc_mode_select(vadc, mode_ctl);
+	qpnp_vadc_enable(vadc, false);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_end_channel_monitor);
+
+static ssize_t qpnp_adc_show(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(dev);
+	struct qpnp_vadc_result result;
+	int rc = -1;
+
+	rc = qpnp_vadc_read(vadc, attr->index, &result);
+
+	if (rc) {
+		pr_err("VADC read error with %d\n", rc);
+		return 0;
+	}
+
+	return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+		"Result:%lld Raw:%x\n", result.physical, result.adc_code);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+	SENSOR_ATTR(NULL, 0444, qpnp_adc_show, NULL, 0);
+
+static int32_t qpnp_vadc_init_hwmon(struct qpnp_vadc_chip *vadc,
+					struct platform_device *pdev)
+{
+	struct device_node *child;
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0, i = 0, channel;
+
+	for_each_child_of_node(node, child) {
+		channel = vadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.index = vadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.dev_attr.attr.name =
+						vadc->adc->adc_channels[i].name;
+		memcpy(&vadc->sens_attr[i], &qpnp_adc_attr,
+						sizeof(qpnp_adc_attr));
+		sysfs_attr_init(&vadc->sens_attr[i].dev_attr.attr);
+		rc = device_create_file(&pdev->dev,
+				&vadc->sens_attr[i].dev_attr);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"device_create_file failed for dev %s\n",
+				vadc->adc->adc_channels[i].name);
+			goto hwmon_err_sens;
+		}
+		i++;
+	}
+
+	return 0;
+hwmon_err_sens:
+	pr_err("Init HWMON failed for qpnp_adc with %d\n", rc);
+	return rc;
+}
+
+static int qpnp_vadc_get_temp(struct thermal_zone_device *thermal,
+			     int *temp)
+{
+	struct qpnp_vadc_thermal_data *vadc_therm = thermal->devdata;
+	struct qpnp_vadc_chip *vadc = vadc_therm->vadc_dev;
+	struct qpnp_vadc_result result;
+	int rc = 0;
+
+	rc = qpnp_vadc_read(vadc,
+				vadc_therm->vadc_channel, &result);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("VADC read error with %d\n", rc);
+		return rc;
+	}
+
+	*temp = result.physical;
+
+	return rc;
+}
+
+static struct thermal_zone_device_ops qpnp_vadc_thermal_ops = {
+	.get_temp = qpnp_vadc_get_temp,
+};
+
+static int32_t qpnp_vadc_init_thermal(struct qpnp_vadc_chip *vadc,
+					struct platform_device *pdev)
+{
+	struct device_node *child;
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0, i = 0;
+	bool thermal_node = false;
+
+	if (node == NULL)
+		goto thermal_err_sens;
+	for_each_child_of_node(node, child) {
+		char name[QPNP_THERMALNODE_NAME_LENGTH];
+
+		vadc->vadc_therm_chan[i].vadc_channel =
+			vadc->adc->adc_channels[i].channel_num;
+		vadc->vadc_therm_chan[i].thermal_chan = i;
+		thermal_node = of_property_read_bool(child,
+					"qcom,vadc-thermal-node");
+		if (thermal_node) {
+			/* Register with the thermal zone */
+			vadc->vadc_therm_chan[i].thermal_node = true;
+			snprintf(name, sizeof(name), "%s",
+				vadc->adc->adc_channels[i].name);
+			vadc->vadc_therm_chan[i].vadc_dev = vadc;
+			vadc->vadc_therm_chan[i].tz_dev =
+				thermal_zone_device_register(name,
+				0, 0, &vadc->vadc_therm_chan[i],
+				&qpnp_vadc_thermal_ops, NULL, 0, 0);
+			if (IS_ERR(vadc->vadc_therm_chan[i].tz_dev)) {
+				pr_err("thermal device register failed.\n");
+				goto thermal_err_sens;
+			}
+		}
+		i++;
+		thermal_node = false;
+	}
+	return 0;
+thermal_err_sens:
+	pr_err("Init HWMON failed for qpnp_adc with %d\n", rc);
+	return rc;
+}
+
+static const struct of_device_id qpnp_vadc_match_table[] = {
+	{	.compatible = "qcom,qpnp-vadc",
+	},
+	{	.compatible = "qcom,qpnp-vadc-hc",
+	},
+	{}
+};
+
+static int qpnp_vadc_probe(struct platform_device *pdev)
+{
+	struct qpnp_vadc_chip *vadc;
+	struct qpnp_adc_drv *adc_qpnp;
+	struct qpnp_vadc_thermal_data *adc_thermal;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *child;
+	const struct of_device_id *id;
+	int rc, count_adc_channel_list = 0, i = 0;
+	u8 fab_id = 0;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_node(qpnp_vadc_match_table, node);
+	if (id == NULL) {
+		pr_err("qpnp_vadc_match of_node prop not present\n");
+		return -ENODEV;
+	}
+
+	vadc = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_vadc_chip) +
+		(sizeof(struct sensor_device_attribute) *
+				count_adc_channel_list), GFP_KERNEL);
+	if (!vadc) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	vadc->dev = &(pdev->dev);
+	adc_qpnp = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_drv),
+			GFP_KERNEL);
+	if (!adc_qpnp)
+		return -ENOMEM;
+
+	adc_qpnp->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!adc_qpnp->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	vadc->state_copy = devm_kzalloc(&pdev->dev,
+			sizeof(struct qpnp_vadc_mode_state), GFP_KERNEL);
+	if (!vadc->state_copy)
+		return -ENOMEM;
+
+	vadc->adc = adc_qpnp;
+	adc_thermal = devm_kzalloc(&pdev->dev,
+			(sizeof(struct qpnp_vadc_thermal_data) *
+				count_adc_channel_list), GFP_KERNEL);
+	if (!adc_thermal) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	vadc->vadc_therm_chan = adc_thermal;
+	if (!strcmp(id->compatible, "qcom,qpnp-vadc-hc")) {
+		vadc->vadc_hc = true;
+		vadc->adc->adc_hc = true;
+	}
+
+	rc = qpnp_adc_get_devicetree_data(pdev, vadc->adc);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to read device tree\n");
+		return rc;
+	}
+	mutex_init(&vadc->adc->adc_lock);
+
+	rc = qpnp_vadc_init_hwmon(vadc, pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to initialize qpnp hwmon adc\n");
+		return rc;
+	}
+	vadc->vadc_hwmon = hwmon_device_register(&vadc->adc->pdev->dev);
+	rc = qpnp_vadc_init_thermal(vadc, pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to initialize qpnp thermal adc\n");
+		return rc;
+	}
+	vadc->vadc_init_calib = false;
+	vadc->max_channels_available = count_adc_channel_list;
+	rc = qpnp_vadc_read_reg(vadc, QPNP_INT_TEST_VAL, &fab_id, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc comp id failed with %d\n", rc);
+		goto err_setup;
+	}
+	vadc->id = fab_id;
+	pr_debug("fab_id = %d\n", fab_id);
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_REVISION2,
+				&vadc->revision_dig_major, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc dig_major rev read failed with %d\n", rc);
+		goto err_setup;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_REVISION3,
+				&vadc->revision_ana_minor, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc ana_minor rev read failed with %d\n", rc);
+		goto err_setup;
+	}
+
+	rc = qpnp_vadc_warm_rst_configure(vadc);
+	if (rc < 0) {
+		pr_err("Setting perp reset on warm reset failed %d\n", rc);
+		goto err_setup;
+	}
+
+	INIT_WORK(&vadc->trigger_completion_work, qpnp_vadc_work);
+
+	vadc->vadc_recalib_check = of_property_read_bool(node,
+						"qcom,vadc-recalib-check");
+
+	vadc->vadc_poll_eoc = of_property_read_bool(node,
+						"qcom,vadc-poll-eoc");
+	if (!vadc->vadc_poll_eoc) {
+		rc = devm_request_irq(&pdev->dev, vadc->adc->adc_irq_eoc,
+				qpnp_vadc_isr, IRQF_TRIGGER_RISING,
+				"qpnp_vadc_interrupt", vadc);
+		if (rc) {
+			dev_err(&pdev->dev,
+			"failed to request adc irq with error %d\n", rc);
+			goto err_setup;
+		} else {
+			enable_irq_wake(vadc->adc->adc_irq_eoc);
+		}
+	} else
+		device_init_wakeup(vadc->dev, 1);
+
+	vadc->state_copy->vadc_meas_int_enable = of_property_read_bool(node,
+						"qcom,vadc-meas-int-mode");
+	if (vadc->state_copy->vadc_meas_int_enable) {
+		vadc->adc->adc_high_thr_irq = platform_get_irq_byname(pdev,
+								      "high-thr-en-set");
+		if (vadc->adc->adc_high_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto err_setup;
+		}
+
+		vadc->adc->adc_low_thr_irq = platform_get_irq_byname(pdev,
+								     "low-thr-en-set");
+		if (vadc->adc->adc_low_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto err_setup;
+		}
+
+		rc = devm_request_irq(&pdev->dev, vadc->adc->adc_high_thr_irq,
+					qpnp_vadc_high_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_vadc_high_interrupt", vadc);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto err_setup;
+		} else {
+			enable_irq_wake(vadc->adc->adc_high_thr_irq);
+		}
+
+		rc = devm_request_irq(&pdev->dev, vadc->adc->adc_low_thr_irq,
+					qpnp_vadc_low_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_vadc_low_interrupt", vadc);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto err_setup;
+		} else {
+			enable_irq_wake(vadc->adc->adc_low_thr_irq);
+		}
+		INIT_WORK(&vadc->trigger_high_thr_work,
+						qpnp_vadc_high_thr_fn);
+		INIT_WORK(&vadc->trigger_low_thr_work, qpnp_vadc_low_thr_fn);
+	}
+
+	vadc->vadc_iadc_sync_lock = false;
+	dev_set_drvdata(&pdev->dev, vadc);
+	list_add(&vadc->list, &qpnp_vadc_device_list);
+
+	return 0;
+
+err_setup:
+	for_each_child_of_node(node, child) {
+		device_remove_file(&pdev->dev, &vadc->sens_attr[i].dev_attr);
+		if (vadc->vadc_therm_chan[i].thermal_node)
+			thermal_zone_device_unregister(
+					vadc->vadc_therm_chan[i].tz_dev);
+		i++;
+	}
+	hwmon_device_unregister(vadc->vadc_hwmon);
+
+	return rc;
+}
+
+static int qpnp_vadc_remove(struct platform_device *pdev)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(&pdev->dev);
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *child;
+	int i = 0;
+
+	for_each_child_of_node(node, child) {
+		device_remove_file(&pdev->dev, &vadc->sens_attr[i].dev_attr);
+		if (vadc->vadc_therm_chan[i].thermal_node)
+			thermal_zone_device_unregister(
+					vadc->vadc_therm_chan[i].tz_dev);
+		i++;
+	}
+	hwmon_device_unregister(vadc->vadc_hwmon);
+	list_del(&vadc->list);
+	if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok)
+		qpnp_adc_free_voltage_resource(vadc->adc);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static int qpnp_vadc_suspend_noirq(struct device *dev)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(dev);
+	u8 status = 0;
+
+	qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status, 1);
+	if (((status & QPNP_VADC_STATUS1_OP_MODE_MASK) >>
+		QPNP_VADC_OP_MODE_SHIFT) == QPNP_VADC_MEAS_INT_MODE) {
+		pr_debug("Meas interval in progress\n");
+	} else if (vadc->vadc_poll_eoc) {
+		status &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		pr_debug("vadc conversion status=%d\n", status);
+		if (status != QPNP_VADC_STATUS1_EOC) {
+			pr_err(
+				"Aborting suspend, adc conversion requested while suspending\n");
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops qpnp_vadc_pm_ops = {
+	.suspend_noirq	= qpnp_vadc_suspend_noirq,
+};
+
+static struct platform_driver qpnp_vadc_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-vadc",
+		.of_match_table	= qpnp_vadc_match_table,
+		.pm		= &qpnp_vadc_pm_ops,
+	},
+	.probe		= qpnp_vadc_probe,
+	.remove		= qpnp_vadc_remove,
+};
+
+static int __init qpnp_vadc_init(void)
+{
+	return platform_driver_register(&qpnp_vadc_driver);
+}
+module_init(qpnp_vadc_init);
+
+static void __exit qpnp_vadc_exit(void)
+{
+	platform_driver_unregister(&qpnp_vadc_driver);
+}
+module_exit(qpnp_vadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Voltage ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 2efd8d0..00d99e2 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -417,6 +417,21 @@
 	  die temperature, battery voltage, battery current, input voltage,
 	  input current, and OTG current.
 
+config QCOM_RRADC
+	tristate "Qualcomm Technologies Inc. PMIC Round robin ADC"
+	depends on SPMI
+	select REGMAP_SPMI
+	help
+	  This is the PMIC Round Robin ADC driver.
+
+	  The driver supports multiple channels read used for telemetry
+	  and supports clients to read batt_id, batt_therm, PMIC die
+	  temperature, USB_IN and DC_IN voltage and current.
+	  The RRADC is a 10-bit ADC.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called qcom-rradc.
+
 config ROCKCHIP_SARADC
 	tristate "Rockchip SARADC driver"
 	depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 1da80bd..8c7f822 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -40,6 +40,7 @@
 obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
 obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
 obj-$(CONFIG_QCOM_TADC) += qcom-tadc.o
+obj-$(CONFIG_QCOM_RRADC) += qcom-rradc.o
 obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
 obj-$(CONFIG_STX104) += stx104.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
new file mode 100644
index 0000000..302cf14
--- /dev/null
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -0,0 +1,998 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "RRADC: %s: " fmt, __func__
+
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+#define FG_ADC_RR_EN_CTL			0x46
+#define FG_ADC_RR_SKIN_TEMP_LSB			0x50
+#define FG_ADC_RR_SKIN_TEMP_MSB			0x51
+#define FG_ADC_RR_RR_ADC_CTL			0x52
+#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK	0x8
+#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL	BIT(3)
+#define FG_ADC_RR_ADC_LOG			0x53
+#define FG_ADC_RR_ADC_LOG_CLR_CTRL		BIT(0)
+
+#define FG_ADC_RR_FAKE_BATT_LOW_LSB		0x58
+#define FG_ADC_RR_FAKE_BATT_LOW_MSB		0x59
+#define FG_ADC_RR_FAKE_BATT_HIGH_LSB		0x5A
+#define FG_ADC_RR_FAKE_BATT_HIGH_MSB		0x5B
+
+#define FG_ADC_RR_BATT_ID_CTRL			0x60
+#define FG_ADC_RR_BATT_ID_TRIGGER		0x61
+#define FG_ADC_RR_BATT_ID_TRIGGER_CTL		BIT(0)
+#define FG_ADC_RR_BATT_ID_STS			0x62
+#define FG_ADC_RR_BATT_ID_CFG			0x63
+#define FG_ADC_RR_BATT_ID_5_LSB			0x66
+#define FG_ADC_RR_BATT_ID_5_MSB			0x67
+#define FG_ADC_RR_BATT_ID_15_LSB		0x68
+#define FG_ADC_RR_BATT_ID_15_MSB		0x69
+#define FG_ADC_RR_BATT_ID_150_LSB		0x6A
+#define FG_ADC_RR_BATT_ID_150_MSB		0x6B
+
+#define FG_ADC_RR_BATT_THERM_CTRL		0x70
+#define FG_ADC_RR_BATT_THERM_TRIGGER		0x71
+#define FG_ADC_RR_BATT_THERM_STS		0x72
+#define FG_ADC_RR_BATT_THERM_CFG		0x73
+#define FG_ADC_RR_BATT_THERM_LSB		0x74
+#define FG_ADC_RR_BATT_THERM_MSB		0x75
+#define FG_ADC_RR_BATT_THERM_FREQ		0x76
+
+#define FG_ADC_RR_AUX_THERM_CTRL		0x80
+#define FG_ADC_RR_AUX_THERM_TRIGGER		0x81
+#define FG_ADC_RR_AUX_THERM_STS			0x82
+#define FG_ADC_RR_AUX_THERM_CFG			0x83
+#define FG_ADC_RR_AUX_THERM_LSB			0x84
+#define FG_ADC_RR_AUX_THERM_MSB			0x85
+
+#define FG_ADC_RR_SKIN_HOT			0x86
+#define FG_ADC_RR_SKIN_TOO_HOT			0x87
+
+#define FG_ADC_RR_AUX_THERM_C1			0x88
+#define FG_ADC_RR_AUX_THERM_C2			0x89
+#define FG_ADC_RR_AUX_THERM_C3			0x8A
+#define FG_ADC_RR_AUX_THERM_HALF_RANGE		0x8B
+
+#define FG_ADC_RR_USB_IN_V_CTRL			0x90
+#define FG_ADC_RR_USB_IN_V_TRIGGER		0x91
+#define FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK	0x80
+#define FG_ADC_RR_USB_IN_V_EVERY_CYCLE		BIT(7)
+#define FG_ADC_RR_USB_IN_V_STS			0x92
+#define FG_ADC_RR_USB_IN_V_LSB			0x94
+#define FG_ADC_RR_USB_IN_V_MSB			0x95
+#define FG_ADC_RR_USB_IN_I_CTRL			0x98
+#define FG_ADC_RR_USB_IN_I_TRIGGER		0x99
+#define FG_ADC_RR_USB_IN_I_STS			0x9A
+#define FG_ADC_RR_USB_IN_I_LSB			0x9C
+#define FG_ADC_RR_USB_IN_I_MSB			0x9D
+
+#define FG_ADC_RR_DC_IN_V_CTRL			0xA0
+#define FG_ADC_RR_DC_IN_V_TRIGGER		0xA1
+#define FG_ADC_RR_DC_IN_V_STS			0xA2
+#define FG_ADC_RR_DC_IN_V_LSB			0xA4
+#define FG_ADC_RR_DC_IN_V_MSB			0xA5
+#define FG_ADC_RR_DC_IN_I_CTRL			0xA8
+#define FG_ADC_RR_DC_IN_I_TRIGGER		0xA9
+#define FG_ADC_RR_DC_IN_I_STS			0xAA
+#define FG_ADC_RR_DC_IN_I_LSB			0xAC
+#define FG_ADC_RR_DC_IN_I_MSB			0xAD
+
+#define FG_ADC_RR_PMI_DIE_TEMP_CTRL		0xB0
+#define FG_ADC_RR_PMI_DIE_TEMP_TRIGGER		0xB1
+#define FG_ADC_RR_PMI_DIE_TEMP_STS		0xB2
+#define FG_ADC_RR_PMI_DIE_TEMP_CFG		0xB3
+#define FG_ADC_RR_PMI_DIE_TEMP_LSB		0xB4
+#define FG_ADC_RR_PMI_DIE_TEMP_MSB		0xB5
+
+#define FG_ADC_RR_CHARGER_TEMP_CTRL		0xB8
+#define FG_ADC_RR_CHARGER_TEMP_TRIGGER		0xB9
+#define FG_ADC_RR_CHARGER_TEMP_STS		0xBA
+#define FG_ADC_RR_CHARGER_TEMP_CFG		0xBB
+#define FG_ADC_RR_CHARGER_TEMP_LSB		0xBC
+#define FG_ADC_RR_CHARGER_TEMP_MSB		0xBD
+#define FG_ADC_RR_CHARGER_HOT			0xBE
+#define FG_ADC_RR_CHARGER_TOO_HOT		0xBF
+
+#define FG_ADC_RR_GPIO_CTRL			0xC0
+#define FG_ADC_RR_GPIO_TRIGGER			0xC1
+#define FG_ADC_RR_GPIO_STS			0xC2
+#define FG_ADC_RR_GPIO_LSB			0xC4
+#define FG_ADC_RR_GPIO_MSB			0xC5
+
+#define FG_ADC_RR_ATEST_CTRL			0xC8
+#define FG_ADC_RR_ATEST_TRIGGER			0xC9
+#define FG_ADC_RR_ATEST_STS			0xCA
+#define FG_ADC_RR_ATEST_LSB			0xCC
+#define FG_ADC_RR_ATEST_MSB			0xCD
+#define FG_ADC_RR_SEC_ACCESS			0xD0
+
+#define FG_ADC_RR_PERPH_RESET_CTL2		0xD9
+#define FG_ADC_RR_PERPH_RESET_CTL3		0xDA
+#define FG_ADC_RR_PERPH_RESET_CTL4		0xDB
+#define FG_ADC_RR_INT_TEST1			0xE0
+#define FG_ADC_RR_INT_TEST_VAL			0xE1
+
+#define FG_ADC_RR_TM_TRIGGER_CTRLS		0xE2
+#define FG_ADC_RR_TM_ADC_CTRLS			0xE3
+#define FG_ADC_RR_TM_CNL_CTRL			0xE4
+#define FG_ADC_RR_TM_BATT_ID_CTRL		0xE5
+#define FG_ADC_RR_TM_THERM_CTRL			0xE6
+#define FG_ADC_RR_TM_CONV_STS			0xE7
+#define FG_ADC_RR_TM_ADC_READ_LSB		0xE8
+#define FG_ADC_RR_TM_ADC_READ_MSB		0xE9
+#define FG_ADC_RR_TM_ATEST_MUX_1		0xEA
+#define FG_ADC_RR_TM_ATEST_MUX_2		0xEB
+#define FG_ADC_RR_TM_REFERENCES			0xED
+#define FG_ADC_RR_TM_MISC_CTL			0xEE
+#define FG_ADC_RR_TM_RR_CTRL			0xEF
+
+#define FG_ADC_RR_BATT_ID_5_MA			5
+#define FG_ADC_RR_BATT_ID_15_MA			15
+#define FG_ADC_RR_BATT_ID_150_MA		150
+#define FG_ADC_RR_BATT_ID_RANGE			820
+
+#define FG_ADC_BITS				10
+#define FG_MAX_ADC_READINGS			(1 << FG_ADC_BITS)
+#define FG_ADC_RR_FS_VOLTAGE_MV			2500
+
+/* BATT_THERM 0.25K/LSB */
+#define FG_ADC_RR_BATT_THERM_LSB_K		4
+
+#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM		5000000
+#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN		3
+#define FG_ADC_RR_DIE_TEMP_OFFSET		601400
+#define FG_ADC_RR_DIE_TEMP_SLOPE		2
+#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC	25000
+
+#define FAB_ID_GF				0x30
+#define FAB_ID_SMIC				0x11
+#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV		1303168
+#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C	3784
+#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV	1338433
+#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C	3655
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC	25000
+#define FG_ADC_RR_CHG_THRESHOLD_SCALE		4
+
+#define FG_ADC_RR_VOLT_INPUT_FACTOR		8
+#define FG_ADC_RR_CURR_INPUT_FACTOR		2000
+#define FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL	1886
+#define FG_ADC_SCALE_MILLI_FACTOR		1000
+#define FG_ADC_KELVINMIL_CELSIUSMIL		273150
+
+#define FG_ADC_RR_GPIO_FS_RANGE			5000
+#define FG_RR_ADC_COHERENT_CHECK_RETRY		5
+#define FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN	16
+#define FG_RR_ADC_STS_CHANNEL_READING_MASK	0x3
+#define FG_RR_ADC_STS_CHANNEL_STS		0x2
+
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US	50000
+#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US	51000
+#define FG_RR_CONV_MAX_RETRY_CNT		50
+
+/*
+ * The channel number is not a physical index in hardware,
+ * rather it's a list of supported channels and an index to
+ * select the respective channel properties such as scaling
+ * the result. Add any new additional channels supported by
+ * the RR ADC before RR_ADC_MAX.
+ */
+enum rradc_channel_id {
+	RR_ADC_BATT_ID = 0,
+	RR_ADC_BATT_THERM,
+	RR_ADC_SKIN_TEMP,
+	RR_ADC_USBIN_I,
+	RR_ADC_USBIN_V,
+	RR_ADC_DCIN_I,
+	RR_ADC_DCIN_V,
+	RR_ADC_DIE_TEMP,
+	RR_ADC_CHG_TEMP,
+	RR_ADC_GPIO,
+	RR_ADC_CHG_HOT_TEMP,
+	RR_ADC_CHG_TOO_HOT_TEMP,
+	RR_ADC_SKIN_HOT_TEMP,
+	RR_ADC_SKIN_TOO_HOT_TEMP,
+	RR_ADC_MAX
+};
+
+struct rradc_chip {
+	struct device			*dev;
+	struct mutex			lock;
+	struct regmap			*regmap;
+	u16				base;
+	struct iio_chan_spec		*iio_chans;
+	unsigned int			nchannels;
+	struct rradc_chan_prop		*chan_props;
+	struct device_node		*revid_dev_node;
+	struct pmic_revid_data		*pmic_fab_id;
+};
+
+struct rradc_channels {
+	const char			*datasheet_name;
+	enum iio_chan_type		type;
+	long				info_mask;
+	u8				lsb;
+	u8				msb;
+	u8				sts;
+	int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
+					u16 adc_code, int *result);
+};
+
+struct rradc_chan_prop {
+	enum rradc_channel_id		channel;
+	uint32_t			channel_data;
+	int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
+					u16 adc_code, int *result);
+};
+
+static int rradc_masked_write(struct rradc_chip *rr_adc, u16 offset, u8 mask,
+						u8 val)
+{
+	int rc;
+
+	rc = regmap_update_bits(rr_adc->regmap, rr_adc->base + offset,
+								mask, val);
+	if (rc) {
+		pr_err("spmi write failed: addr=%03X, rc=%d\n", offset, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int rradc_read(struct rradc_chip *rr_adc, u16 offset, u8 *data, int len)
+{
+	int rc = 0, retry_cnt = 0, i = 0;
+	u8 data_check[FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN];
+	bool coherent_err = false;
+
+	if (len > FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN) {
+		pr_err("Increase the buffer length\n");
+		return -EINVAL;
+	}
+
+	while (retry_cnt < FG_RR_ADC_COHERENT_CHECK_RETRY) {
+		rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+							data, len);
+		if (rc < 0) {
+			pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+			return rc;
+		}
+
+		rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+							data_check, len);
+		if (rc < 0) {
+			pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+			return rc;
+		}
+
+		for (i = 0; i < len; i++) {
+			if (data[i] != data_check[i])
+				coherent_err = true;
+		}
+
+		if (coherent_err) {
+			retry_cnt++;
+			coherent_err = false;
+			pr_debug("retry_cnt:%d\n", retry_cnt);
+		} else {
+			break;
+		}
+	}
+
+	if (retry_cnt == FG_RR_ADC_COHERENT_CHECK_RETRY)
+		pr_err("Retry exceeded for coherrency check\n");
+
+	return rc;
+}
+
+static int rradc_post_process_batt_id(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_ohms)
+{
+	uint32_t current_value;
+	int64_t r_id;
+
+	current_value = prop->channel_data;
+	r_id = ((int64_t)adc_code * FG_ADC_RR_FS_VOLTAGE_MV);
+	r_id = div64_s64(r_id, (FG_MAX_ADC_READINGS * current_value));
+	*result_ohms = (r_id * FG_ADC_SCALE_MILLI_FACTOR);
+
+	return 0;
+}
+
+static int rradc_post_process_therm(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t temp;
+
+	/* K = code/4 */
+	temp = div64_s64(adc_code, FG_ADC_RR_BATT_THERM_LSB_K);
+	temp *= FG_ADC_SCALE_MILLI_FACTOR;
+	*result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL;
+
+	return 0;
+}
+
+static int rradc_post_process_volt(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_uv)
+{
+	int64_t uv = 0;
+
+	/* 8x input attenuation; 2.5V ADC full scale */
+	uv = ((int64_t)adc_code * FG_ADC_RR_VOLT_INPUT_FACTOR);
+	uv *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+	uv = div64_s64(uv, FG_MAX_ADC_READINGS);
+	*result_uv = uv;
+
+	return 0;
+}
+
+static int rradc_post_process_curr(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_ua)
+{
+	int64_t ua = 0, scale = 0;
+
+	if (!prop)
+		return -EINVAL;
+
+	if (prop->channel == RR_ADC_USBIN_I)
+		scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL;
+	else
+		scale = FG_ADC_RR_CURR_INPUT_FACTOR;
+
+	/* scale * V/A; 2.5V ADC full scale */
+	ua = ((int64_t)adc_code * scale);
+	ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+	ua = div64_s64(ua, (FG_MAX_ADC_READINGS * 1000));
+	*result_ua = ua;
+
+	return 0;
+}
+
+static int rradc_post_process_die_temp(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t temp = 0;
+
+	temp = ((int64_t)adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+	temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+					FG_MAX_ADC_READINGS));
+	temp -= FG_ADC_RR_DIE_TEMP_OFFSET;
+	temp = div64_s64(temp, FG_ADC_RR_DIE_TEMP_SLOPE);
+	temp += FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC;
+	*result_millidegc = temp;
+
+	return 0;
+}
+
+static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t uv = 0, offset = 0, slope = 0;
+
+	if (chip->revid_dev_node) {
+		switch (chip->pmic_fab_id->fab_id) {
+		case FAB_ID_GF:
+			offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+			slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+			break;
+		case FAB_ID_SMIC:
+			offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+			slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		pr_err("No temperature scaling coefficients\n");
+		return -EINVAL;
+	}
+
+	uv = (int64_t) adc_code * FG_ADC_RR_CHG_THRESHOLD_SCALE;
+	uv = uv * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+	uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+					FG_MAX_ADC_READINGS));
+	uv = offset - uv;
+	uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+	uv = uv + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+	*result_millidegc = uv;
+
+	return 0;
+}
+
+static int rradc_post_process_skin_temp_hot(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t temp = 0;
+
+	temp = (int64_t) adc_code;
+	temp = div64_s64(temp, 2);
+	temp = temp - 30;
+	temp *= FG_ADC_SCALE_MILLI_FACTOR;
+	*result_millidegc = temp;
+
+	return 0;
+}
+
+static int rradc_post_process_chg_temp(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t uv = 0, offset = 0, slope = 0;
+
+	if (chip->revid_dev_node) {
+		switch (chip->pmic_fab_id->fab_id) {
+		case FAB_ID_GF:
+			offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+			slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+			break;
+		case FAB_ID_SMIC:
+			offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+			slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		pr_err("No temperature scaling coefficients\n");
+		return -EINVAL;
+	}
+
+	uv = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+	uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+					FG_MAX_ADC_READINGS));
+	uv = offset - uv;
+	uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+	uv += FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+	*result_millidegc = uv;
+
+	return 0;
+}
+
+static int rradc_post_process_gpio(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_mv)
+{
+	int64_t mv = 0;
+
+	/* 5V ADC full scale, 10 bit */
+	mv = ((int64_t)adc_code * FG_ADC_RR_GPIO_FS_RANGE);
+	mv = div64_s64(mv, FG_MAX_ADC_READINGS);
+	*result_mv = mv;
+
+	return 0;
+}
+
+#define RR_ADC_CHAN(_dname, _type, _mask, _scale, _lsb, _msb, _sts)	\
+	{								\
+		.datasheet_name = (_dname),				\
+		.type = _type,						\
+		.info_mask = _mask,					\
+		.scale = _scale,					\
+		.lsb = _lsb,						\
+		.msb = _msb,						\
+		.sts = _sts,						\
+	},								\
+
+#define RR_ADC_CHAN_TEMP(_dname, _scale, mask, _lsb, _msb, _sts)	\
+	RR_ADC_CHAN(_dname, IIO_TEMP,					\
+		mask,							\
+		_scale, _lsb, _msb, _sts)				\
+
+#define RR_ADC_CHAN_VOLT(_dname, _scale, _lsb, _msb, _sts)		\
+	RR_ADC_CHAN(_dname, IIO_VOLTAGE,				\
+		  BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
+		  _scale, _lsb, _msb, _sts)				\
+
+#define RR_ADC_CHAN_CURRENT(_dname, _scale, _lsb, _msb, _sts)		\
+	RR_ADC_CHAN(_dname, IIO_CURRENT,				\
+		  BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
+		  _scale, _lsb, _msb, _sts)				\
+
+#define RR_ADC_CHAN_RESISTANCE(_dname, _scale, _lsb, _msb, _sts)	\
+	RR_ADC_CHAN(_dname, IIO_RESISTANCE,				\
+		  BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
+		  _scale, _lsb, _msb, _sts)				\
+
+static const struct rradc_channels rradc_chans[] = {
+	RR_ADC_CHAN_RESISTANCE("batt_id", rradc_post_process_batt_id,
+			FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB,
+			FG_ADC_RR_BATT_ID_STS)
+	RR_ADC_CHAN_TEMP("batt_therm", &rradc_post_process_therm,
+			BIT(IIO_CHAN_INFO_RAW),
+			FG_ADC_RR_BATT_THERM_LSB, FG_ADC_RR_BATT_THERM_MSB,
+			FG_ADC_RR_BATT_THERM_STS)
+	RR_ADC_CHAN_TEMP("skin_temp", &rradc_post_process_therm,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB,
+			FG_ADC_RR_AUX_THERM_STS)
+	RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_curr,
+			FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB,
+			FG_ADC_RR_USB_IN_I_STS)
+	RR_ADC_CHAN_VOLT("usbin_v", &rradc_post_process_volt,
+			FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB,
+			FG_ADC_RR_USB_IN_V_STS)
+	RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_curr,
+			FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB,
+			FG_ADC_RR_DC_IN_I_STS)
+	RR_ADC_CHAN_VOLT("dcin_v", &rradc_post_process_volt,
+			FG_ADC_RR_DC_IN_V_LSB, FG_ADC_RR_DC_IN_V_MSB,
+			FG_ADC_RR_DC_IN_V_STS)
+	RR_ADC_CHAN_TEMP("die_temp", &rradc_post_process_die_temp,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_PMI_DIE_TEMP_LSB, FG_ADC_RR_PMI_DIE_TEMP_MSB,
+			FG_ADC_RR_PMI_DIE_TEMP_STS)
+	RR_ADC_CHAN_TEMP("chg_temp", &rradc_post_process_chg_temp,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_CHARGER_TEMP_LSB, FG_ADC_RR_CHARGER_TEMP_MSB,
+			FG_ADC_RR_CHARGER_TEMP_STS)
+	RR_ADC_CHAN_VOLT("gpio", &rradc_post_process_gpio,
+			FG_ADC_RR_GPIO_LSB, FG_ADC_RR_GPIO_MSB,
+			FG_ADC_RR_GPIO_STS)
+	RR_ADC_CHAN_TEMP("chg_temp_hot", &rradc_post_process_chg_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_CHARGER_HOT, FG_ADC_RR_CHARGER_HOT,
+			FG_ADC_RR_CHARGER_TEMP_STS)
+	RR_ADC_CHAN_TEMP("chg_temp_too_hot", &rradc_post_process_chg_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_CHARGER_TOO_HOT, FG_ADC_RR_CHARGER_TOO_HOT,
+			FG_ADC_RR_CHARGER_TEMP_STS)
+	RR_ADC_CHAN_TEMP("skin_temp_hot", &rradc_post_process_skin_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_SKIN_HOT, FG_ADC_RR_SKIN_HOT,
+			FG_ADC_RR_AUX_THERM_STS)
+	RR_ADC_CHAN_TEMP("skin_temp_too_hot", &rradc_post_process_skin_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_SKIN_TOO_HOT, FG_ADC_RR_SKIN_TOO_HOT,
+			FG_ADC_RR_AUX_THERM_STS)
+};
+
+static int rradc_enable_continuous_mode(struct rradc_chip *chip)
+{
+	int rc = 0;
+
+	/* Clear channel log */
+	rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+			FG_ADC_RR_ADC_LOG_CLR_CTRL,
+			FG_ADC_RR_ADC_LOG_CLR_CTRL);
+	if (rc < 0) {
+		pr_err("log ctrl update to clear failed:%d\n", rc);
+		return rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+		FG_ADC_RR_ADC_LOG_CLR_CTRL, 0);
+	if (rc < 0) {
+		pr_err("log ctrl update to not clear failed:%d\n", rc);
+		return rc;
+	}
+
+	/* Switch to continuous mode */
+	rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+		FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK,
+		FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL);
+	if (rc < 0) {
+		pr_err("Update to continuous mode failed:%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int rradc_disable_continuous_mode(struct rradc_chip *chip)
+{
+	int rc = 0;
+
+	/* Switch to non continuous mode */
+	rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+			FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK, 0);
+	if (rc < 0) {
+		pr_err("Update to non-continuous mode failed:%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
+		struct rradc_chan_prop *prop, u8 *buf, u16 status)
+{
+	int rc = 0, retry_cnt = 0, mask = 0;
+
+	switch (prop->channel) {
+	case RR_ADC_BATT_ID:
+		/* BATT_ID STS bit does not get set initially */
+		mask = FG_RR_ADC_STS_CHANNEL_STS;
+		break;
+	default:
+		mask = FG_RR_ADC_STS_CHANNEL_READING_MASK;
+		break;
+	}
+
+	while (((buf[0] & mask) != mask) &&
+			(retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
+		pr_debug("%s is not ready; nothing to read:0x%x\n",
+			rradc_chans[prop->channel].datasheet_name, buf[0]);
+		usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
+				FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+		retry_cnt++;
+		rc = rradc_read(chip, status, buf, 1);
+		if (rc < 0) {
+			pr_err("status read failed:%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (retry_cnt >= FG_RR_CONV_MAX_RETRY_CNT)
+		rc = -ENODATA;
+
+	return rc;
+}
+
+static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u8 *buf)
+{
+	int rc = 0;
+	u16 status = 0;
+
+	rc = rradc_enable_continuous_mode(chip);
+	if (rc < 0) {
+		pr_err("Failed to switch to continuous mode\n");
+		return rc;
+	}
+
+	status = rradc_chans[prop->channel].sts;
+	rc = rradc_read(chip, status, buf, 1);
+	if (rc < 0) {
+		pr_err("status read failed:%d\n", rc);
+		return rc;
+	}
+
+	rc = rradc_check_status_ready_with_retry(chip, prop,
+						buf, status);
+	if (rc < 0) {
+		pr_err("Status read failed:%d\n", rc);
+		return rc;
+	}
+
+	rc = rradc_disable_continuous_mode(chip);
+	if (rc < 0) {
+		pr_err("Failed to switch to non continuous mode\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int rradc_do_conversion(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 *data)
+{
+	int rc = 0, bytes_to_read = 0;
+	u8 buf[6];
+	u16 offset = 0, batt_id_5 = 0, batt_id_15 = 0, batt_id_150 = 0;
+	u16 status = 0;
+
+	mutex_lock(&chip->lock);
+
+	switch (prop->channel) {
+	case RR_ADC_BATT_ID:
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+		if (rc < 0) {
+			pr_err("BATT_ID trigger set failed:%d\n", rc);
+			goto fail;
+		}
+
+		rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+		if (rc < 0) {
+			pr_err("Error reading in continuous mode:%d\n", rc);
+			goto fail;
+		}
+
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
+		if (rc < 0) {
+			pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+			goto fail;
+		}
+		break;
+	case RR_ADC_USBIN_V:
+		/* Force conversion every cycle */
+		rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE);
+		if (rc < 0) {
+			pr_err("Force every cycle update failed:%d\n", rc);
+			goto fail;
+		}
+
+		rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+		if (rc < 0) {
+			pr_err("Error reading in continuous mode:%d\n", rc);
+			goto fail;
+		}
+
+		/* Restore usb_in trigger */
+		rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
+		if (rc < 0) {
+			pr_err("Restore every cycle update failed:%d\n", rc);
+			goto fail;
+		}
+		break;
+	case RR_ADC_CHG_HOT_TEMP:
+	case RR_ADC_CHG_TOO_HOT_TEMP:
+	case RR_ADC_SKIN_HOT_TEMP:
+	case RR_ADC_SKIN_TOO_HOT_TEMP:
+		pr_debug("Read only the data registers\n");
+		break;
+	default:
+		status = rradc_chans[prop->channel].sts;
+		rc = rradc_read(chip, status, buf, 1);
+		if (rc < 0) {
+			pr_err("status read failed:%d\n", rc);
+			goto fail;
+		}
+
+		rc = rradc_check_status_ready_with_retry(chip, prop,
+						buf, status);
+		if (rc < 0) {
+			pr_debug("Status read failed:%d\n", rc);
+			rc = -ENODATA;
+			goto fail;
+		}
+		break;
+	}
+
+	offset = rradc_chans[prop->channel].lsb;
+	if (prop->channel == RR_ADC_BATT_ID)
+		bytes_to_read = 6;
+	else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+		(prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP))
+		bytes_to_read = 1;
+	else
+		bytes_to_read = 2;
+
+	buf[0] = 0;
+	rc = rradc_read(chip, offset, buf, bytes_to_read);
+	if (rc) {
+		pr_err("read data failed\n");
+		goto fail;
+	}
+
+	if (prop->channel == RR_ADC_BATT_ID) {
+		batt_id_150 = (buf[5] << 8) | buf[4];
+		batt_id_15 = (buf[3] << 8) | buf[2];
+		batt_id_5 = (buf[1] << 8) | buf[0];
+		if ((!batt_id_150) && (!batt_id_15) && (!batt_id_5)) {
+			pr_err("Invalid batt_id values with all zeros\n");
+			rc = -EINVAL;
+			goto fail;
+		}
+
+		if (batt_id_150 <= FG_ADC_RR_BATT_ID_RANGE) {
+			pr_debug("Batt_id_150 is chosen\n");
+			*data = batt_id_150;
+			prop->channel_data = FG_ADC_RR_BATT_ID_150_MA;
+		} else if (batt_id_15 <= FG_ADC_RR_BATT_ID_RANGE) {
+			pr_debug("Batt_id_15 is chosen\n");
+			*data = batt_id_15;
+			prop->channel_data = FG_ADC_RR_BATT_ID_15_MA;
+		} else {
+			pr_debug("Batt_id_5 is chosen\n");
+			*data = batt_id_5;
+			prop->channel_data = FG_ADC_RR_BATT_ID_5_MA;
+		}
+	} else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+		(prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP)) {
+		*data = buf[0];
+	} else {
+		*data = (buf[1] << 8) | buf[0];
+	}
+fail:
+	mutex_unlock(&chip->lock);
+
+	return rc;
+}
+
+static int rradc_read_raw(struct iio_dev *indio_dev,
+			 struct iio_chan_spec const *chan, int *val, int *val2,
+			 long mask)
+{
+	struct rradc_chip *chip = iio_priv(indio_dev);
+	struct rradc_chan_prop *prop;
+	u16 adc_code;
+	int rc = 0;
+
+	if (chan->address >= RR_ADC_MAX) {
+		pr_err("Invalid channel index:%ld\n", chan->address);
+		return -EINVAL;
+	}
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED:
+		prop = &chip->chan_props[chan->address];
+		rc = rradc_do_conversion(chip, prop, &adc_code);
+		if (rc)
+			break;
+
+		prop->scale(chip, prop, adc_code, val);
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_RAW:
+		prop = &chip->chan_props[chan->address];
+		rc = rradc_do_conversion(chip, prop, &adc_code);
+		if (rc)
+			break;
+
+		*val = (int) adc_code;
+
+		return IIO_VAL_INT;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct iio_info rradc_info = {
+	.read_raw	= &rradc_read_raw,
+	.driver_module	= THIS_MODULE,
+};
+
+static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
+{
+	const struct rradc_channels *rradc_chan;
+	struct iio_chan_spec *iio_chan;
+	unsigned int i = 0, base;
+	int rc = 0;
+	struct rradc_chan_prop prop;
+
+	chip->nchannels = RR_ADC_MAX;
+	chip->iio_chans = devm_kcalloc(chip->dev, chip->nchannels,
+				       sizeof(*chip->iio_chans), GFP_KERNEL);
+	if (!chip->iio_chans)
+		return -ENOMEM;
+
+	chip->chan_props = devm_kcalloc(chip->dev, chip->nchannels,
+				       sizeof(*chip->chan_props), GFP_KERNEL);
+	if (!chip->chan_props)
+		return -ENOMEM;
+
+	/* Get the peripheral address */
+	rc = of_property_read_u32(node, "reg", &base);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			node->name, rc);
+		return rc;
+	}
+
+	chip->base = base;
+	chip->revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (chip->revid_dev_node) {
+		chip->pmic_fab_id = get_revid_data(chip->revid_dev_node);
+		if (IS_ERR(chip->pmic_fab_id)) {
+			rc = PTR_ERR(chip->pmic_fab_id);
+			if (rc != -EPROBE_DEFER)
+				pr_err("Unable to get pmic_revid rc=%d\n", rc);
+			return rc;
+		}
+
+		if (!chip->pmic_fab_id)
+			return -EINVAL;
+
+		if (chip->pmic_fab_id->fab_id == -EINVAL) {
+			rc = chip->pmic_fab_id->fab_id;
+			pr_debug("Unable to read fabid rc=%d\n", rc);
+		}
+	}
+
+	iio_chan = chip->iio_chans;
+
+	for (i = 0; i < RR_ADC_MAX; i++) {
+		prop.channel = i;
+		prop.scale = rradc_chans[i].scale;
+		/* Private channel data used for selecting batt_id */
+		prop.channel_data = 0;
+		chip->chan_props[i] = prop;
+
+		rradc_chan = &rradc_chans[i];
+
+		iio_chan->channel = prop.channel;
+		iio_chan->datasheet_name = rradc_chan->datasheet_name;
+		iio_chan->extend_name = rradc_chan->datasheet_name;
+		iio_chan->info_mask_separate = rradc_chan->info_mask;
+		iio_chan->type = rradc_chan->type;
+		iio_chan->address = i;
+		iio_chan++;
+	}
+
+	return 0;
+}
+
+static int rradc_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct iio_dev *indio_dev;
+	struct rradc_chip *chip;
+	int rc = 0;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	chip = iio_priv(indio_dev);
+	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	chip->dev = dev;
+	mutex_init(&chip->lock);
+
+	rc = rradc_get_dt_data(chip, node);
+	if (rc)
+		return rc;
+
+	indio_dev->dev.parent = dev;
+	indio_dev->dev.of_node = node;
+	indio_dev->name = pdev->name;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->info = &rradc_info;
+	indio_dev->channels = chip->iio_chans;
+	indio_dev->num_channels = chip->nchannels;
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id rradc_match_table[] = {
+	{ .compatible = "qcom,rradc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, rradc_match_table);
+
+static struct platform_driver rradc_driver = {
+	.driver		= {
+		.name		= "qcom-rradc",
+		.of_match_table	= rradc_match_table,
+	},
+	.probe = rradc_probe,
+};
+module_platform_driver(rradc_driver);
+
+MODULE_DESCRIPTION("QPNP PMIC RR ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 1749037..92fd916 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -54,7 +54,7 @@
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-14nm.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-v3.o
-obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qrbtc-msmskunk.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qrbtc-sdm845.o
 obj-$(CONFIG_PHY_TUSB1210)		+= phy-tusb1210.o
 obj-$(CONFIG_PHY_BRCM_SATA)		+= phy-brcm-sata.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
diff --git a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.c
similarity index 65%
rename from drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c
rename to drivers/phy/phy-qcom-ufs-qrbtc-sdm845.c
index 61f1232..6834f6a 100644
--- a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c
+++ b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.c
@@ -12,12 +12,12 @@
  *
  */
 
-#include "phy-qcom-ufs-qrbtc-msmskunk.h"
+#include "phy-qcom-ufs-qrbtc-sdm845.h"
 
-#define UFS_PHY_NAME "ufs_phy_qrbtc_msmskunk"
+#define UFS_PHY_NAME "ufs_phy_qrbtc_sdm845"
 
 static
-int ufs_qcom_phy_qrbtc_msmskunk_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+int ufs_qcom_phy_qrbtc_sdm845_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 					bool is_rate_B)
 {
 	int err;
@@ -44,7 +44,7 @@
 }
 
 static int
-ufs_qcom_phy_qrbtc_msmskunk_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+ufs_qcom_phy_qrbtc_sdm845_is_pcs_ready(struct ufs_qcom_phy *phy_common)
 {
 	int err = 0;
 	u32 val;
@@ -68,7 +68,7 @@
 	return err;
 }
 
-static void ufs_qcom_phy_qrbtc_msmskunk_start_serdes(struct ufs_qcom_phy *phy)
+static void ufs_qcom_phy_qrbtc_sdm845_start_serdes(struct ufs_qcom_phy *phy)
 {
 	u32 temp;
 
@@ -82,29 +82,29 @@
 	mb();
 }
 
-static int ufs_qcom_phy_qrbtc_msmskunk_init(struct phy *generic_phy)
+static int ufs_qcom_phy_qrbtc_sdm845_init(struct phy *generic_phy)
 {
 	return 0;
 }
 
-struct phy_ops ufs_qcom_phy_qrbtc_msmskunk_phy_ops = {
-	.init		= ufs_qcom_phy_qrbtc_msmskunk_init,
+struct phy_ops ufs_qcom_phy_qrbtc_sdm845_phy_ops = {
+	.init		= ufs_qcom_phy_qrbtc_sdm845_init,
 	.exit		= ufs_qcom_phy_exit,
 	.owner		= THIS_MODULE,
 };
 
-struct ufs_qcom_phy_specific_ops phy_qrbtc_msmskunk_ops = {
-	.calibrate_phy		= ufs_qcom_phy_qrbtc_msmskunk_phy_calibrate,
-	.start_serdes		= ufs_qcom_phy_qrbtc_msmskunk_start_serdes,
+struct ufs_qcom_phy_specific_ops phy_qrbtc_sdm845_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qrbtc_sdm845_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qrbtc_sdm845_start_serdes,
 	.is_physical_coding_sublayer_ready =
-				ufs_qcom_phy_qrbtc_msmskunk_is_pcs_ready,
+				ufs_qcom_phy_qrbtc_sdm845_is_pcs_ready,
 };
 
-static int ufs_qcom_phy_qrbtc_msmskunk_probe(struct platform_device *pdev)
+static int ufs_qcom_phy_qrbtc_sdm845_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy;
-	struct ufs_qcom_phy_qrbtc_msmskunk *phy;
+	struct ufs_qcom_phy_qrbtc_sdm845 *phy;
 	int err = 0;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -114,7 +114,7 @@
 	}
 
 	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
-		&ufs_qcom_phy_qrbtc_msmskunk_phy_ops, &phy_qrbtc_msmskunk_ops);
+		&ufs_qcom_phy_qrbtc_sdm845_phy_ops, &phy_qrbtc_sdm845_ops);
 
 	if (!generic_phy) {
 		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
@@ -132,7 +132,7 @@
 	return err;
 }
 
-static int ufs_qcom_phy_qrbtc_msmskunk_remove(struct platform_device *pdev)
+static int ufs_qcom_phy_qrbtc_sdm845_remove(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy = to_phy(dev);
@@ -147,23 +147,23 @@
 	return err;
 }
 
-static const struct of_device_id ufs_qcom_phy_qrbtc_msmskunk_of_match[] = {
-	{.compatible = "qcom,ufs-phy-qrbtc-msmskunk"},
+static const struct of_device_id ufs_qcom_phy_qrbtc_sdm845_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qrbtc-sdm845"},
 	{},
 };
-MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qrbtc_msmskunk_of_match);
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qrbtc_sdm845_of_match);
 
-static struct platform_driver ufs_qcom_phy_qrbtc_msmskunk_driver = {
-	.probe = ufs_qcom_phy_qrbtc_msmskunk_probe,
-	.remove = ufs_qcom_phy_qrbtc_msmskunk_remove,
+static struct platform_driver ufs_qcom_phy_qrbtc_sdm845_driver = {
+	.probe = ufs_qcom_phy_qrbtc_sdm845_probe,
+	.remove = ufs_qcom_phy_qrbtc_sdm845_remove,
 	.driver = {
-		.of_match_table = ufs_qcom_phy_qrbtc_msmskunk_of_match,
-		.name = "ufs_qcom_phy_qrbtc_msmskunk",
+		.of_match_table = ufs_qcom_phy_qrbtc_sdm845_of_match,
+		.name = "ufs_qcom_phy_qrbtc_sdm845",
 		.owner = THIS_MODULE,
 	},
 };
 
-module_platform_driver(ufs_qcom_phy_qrbtc_msmskunk_driver);
+module_platform_driver(ufs_qcom_phy_qrbtc_sdm845_driver);
 
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QRBTC MSMSKUNK");
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QRBTC SDM845");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.h
similarity index 97%
rename from drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h
rename to drivers/phy/phy-qcom-ufs-qrbtc-sdm845.h
index 2597576..ddcf4192 100644
--- a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h
+++ b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.h
@@ -12,8 +12,8 @@
  *
  */
 
-#ifndef UFS_QCOM_PHY_QRBTC_MSMSKUNK_H_
-#define UFS_QCOM_PHY_QRBTC_MSMSKUNK_H_
+#ifndef UFS_QCOM_PHY_QRBTC_SDM845_H_
+#define UFS_QCOM_PHY_QRBTC_SDM845_H_
 
 #include "phy-qcom-ufs-i.h"
 
@@ -166,14 +166,14 @@
 
 
 /*
- * This structure represents the qrbtc-msmskunk specific phy.
+ * This structure represents the qrbtc-sdm845 specific phy.
  * common_cfg MUST remain the first field in this structure
  * in case extra fields are added. This way, when calling
  * get_ufs_qcom_phy() of generic phy, we can extract the
  * common phy structure (struct ufs_qcom_phy) out of it
  * regardless of the relevant specific phy.
  */
-struct ufs_qcom_phy_qrbtc_msmskunk {
+struct ufs_qcom_phy_qrbtc_sdm845 {
 	struct ufs_qcom_phy common_cfg;
 };
 
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 1058e5e..5222936 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -79,23 +79,23 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm TLMM block found on the Qualcomm 8916 platform.
 
-config PINCTRL_MSMSKUNK
-	tristate "Qualcomm Technologies Inc MSMSKUNK pin controller driver"
+config PINCTRL_SDM845
+	tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
 	depends on GPIOLIB && OF
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
-	  Technologies Inc MSMSKUNK platform.
+	  Technologies Inc SDM845 platform.
 
-config PINCTRL_SDMBAT
-	tristate "Qualcomm Technologies Inc SDMBAT pin controller driver"
+config PINCTRL_SDM830
+	tristate "Qualcomm Technologies Inc SDM830 pin controller driver"
 	depends on GPIOLIB && OF
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
-	  Technologies Inc SDMBAT platform.
+	  Technologies Inc SDM830 platform.
 
 
 config PINCTRL_MSM8996
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index fd52c43..c66ee3c 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -15,5 +15,5 @@
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
-obj-$(CONFIG_PINCTRL_MSMSKUNK) += pinctrl-msmskunk.o
-obj-$(CONFIG_PINCTRL_SDMBAT) += pinctrl-sdmbat.o
+obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
+obj-$(CONFIG_PINCTRL_SDM830) += pinctrl-sdm830.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sdmbat.c b/drivers/pinctrl/qcom/pinctrl-sdm830.c
similarity index 97%
rename from drivers/pinctrl/qcom/pinctrl-sdmbat.c
rename to drivers/pinctrl/qcom/pinctrl-sdm830.c
index 3e4fdda..fc3d0ad 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdmbat.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm830.c
@@ -118,7 +118,7 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-static const struct pinctrl_pin_desc sdmbat_pins[] = {
+static const struct pinctrl_pin_desc sdm830_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
 	PINCTRL_PIN(2, "GPIO_2"),
@@ -403,7 +403,7 @@
 static const unsigned int sdc2_cmd_pins[] = { 151 };
 static const unsigned int sdc2_data_pins[] = { 152 };
 
-enum sdmbat_functions {
+enum sdm830_functions {
 	msm_mux_qup0,
 	msm_mux_gpio,
 	msm_mux_reserved0,
@@ -1680,7 +1680,7 @@
 	"gpio123",
 };
 
-static const struct msm_function sdmbat_functions[] = {
+static const struct msm_function sdm830_functions[] = {
 	FUNCTION(qup0),
 	FUNCTION(gpio),
 	FUNCTION(reserved0),
@@ -1996,7 +1996,7 @@
 	FUNCTION(reserved123),
 };
 
-static const struct msm_pingroup sdmbat_groups[] = {
+static const struct msm_pingroup sdm830_groups[] = {
 	PINGROUP(0, SOUTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
 	PINGROUP(1, SOUTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
 	PINGROUP(2, SOUTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
@@ -2236,48 +2236,48 @@
 	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
 };
 
-static const struct msm_pinctrl_soc_data sdmbat_pinctrl = {
-	.pins = sdmbat_pins,
-	.npins = ARRAY_SIZE(sdmbat_pins),
-	.functions = sdmbat_functions,
-	.nfunctions = ARRAY_SIZE(sdmbat_functions),
-	.groups = sdmbat_groups,
-	.ngroups = ARRAY_SIZE(sdmbat_groups),
+static const struct msm_pinctrl_soc_data sdm830_pinctrl = {
+	.pins = sdm830_pins,
+	.npins = ARRAY_SIZE(sdm830_pins),
+	.functions = sdm830_functions,
+	.nfunctions = ARRAY_SIZE(sdm830_functions),
+	.groups = sdm830_groups,
+	.ngroups = ARRAY_SIZE(sdm830_groups),
 	.ngpios = 136,
 };
 
-static int sdmbat_pinctrl_probe(struct platform_device *pdev)
+static int sdm830_pinctrl_probe(struct platform_device *pdev)
 {
-	return msm_pinctrl_probe(pdev, &sdmbat_pinctrl);
+	return msm_pinctrl_probe(pdev, &sdm830_pinctrl);
 }
 
-static const struct of_device_id sdmbat_pinctrl_of_match[] = {
-	{ .compatible = "qcom,sdmbat-pinctrl", },
+static const struct of_device_id sdm830_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm830-pinctrl", },
 	{ },
 };
 
-static struct platform_driver sdmbat_pinctrl_driver = {
+static struct platform_driver sdm830_pinctrl_driver = {
 	.driver = {
-		.name = "sdmbat-pinctrl",
+		.name = "sdm830-pinctrl",
 		.owner = THIS_MODULE,
-		.of_match_table = sdmbat_pinctrl_of_match,
+		.of_match_table = sdm830_pinctrl_of_match,
 	},
-	.probe = sdmbat_pinctrl_probe,
+	.probe = sdm830_pinctrl_probe,
 	.remove = msm_pinctrl_remove,
 };
 
-static int __init sdmbat_pinctrl_init(void)
+static int __init sdm830_pinctrl_init(void)
 {
-	return platform_driver_register(&sdmbat_pinctrl_driver);
+	return platform_driver_register(&sdm830_pinctrl_driver);
 }
-arch_initcall(sdmbat_pinctrl_init);
+arch_initcall(sdm830_pinctrl_init);
 
-static void __exit sdmbat_pinctrl_exit(void)
+static void __exit sdm830_pinctrl_exit(void)
 {
-	platform_driver_unregister(&sdmbat_pinctrl_driver);
+	platform_driver_unregister(&sdm830_pinctrl_driver);
 }
-module_exit(sdmbat_pinctrl_exit);
+module_exit(sdm830_pinctrl_exit);
 
-MODULE_DESCRIPTION("QTI sdmbat pinctrl driver");
+MODULE_DESCRIPTION("QTI sdm830 pinctrl driver");
 MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, sdmbat_pinctrl_of_match);
+MODULE_DEVICE_TABLE(of, sdm830_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-msmskunk.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
similarity index 97%
rename from drivers/pinctrl/qcom/pinctrl-msmskunk.c
rename to drivers/pinctrl/qcom/pinctrl-sdm845.c
index e203b2d..b237a6d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msmskunk.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -116,7 +116,7 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-static const struct pinctrl_pin_desc msmskunk_pins[] = {
+static const struct pinctrl_pin_desc sdm845_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
 	PINCTRL_PIN(2, "GPIO_2"),
@@ -429,7 +429,7 @@
 static const unsigned int sdc2_cmd_pins[] = { 151 };
 static const unsigned int sdc2_data_pins[] = { 152 };
 
-enum msmskunk_functions {
+enum sdm845_functions {
 	msm_mux_gpio,
 	msm_mux_qup0,
 	msm_mux_reserved0,
@@ -1815,7 +1815,7 @@
 	"gpio96",
 };
 
-static const struct msm_function msmskunk_functions[] = {
+static const struct msm_function sdm845_functions[] = {
 	FUNCTION(gpio),
 	FUNCTION(qup0),
 	FUNCTION(reserved0),
@@ -2158,7 +2158,7 @@
 	FUNCTION(sdc40),
 };
 
-static const struct msm_pingroup msmskunk_groups[] = {
+static const struct msm_pingroup sdm845_groups[] = {
 	PINGROUP(0, NORTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
 	PINGROUP(1, NORTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
 	PINGROUP(2, NORTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
@@ -2421,48 +2421,48 @@
 	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
 };
 
-static const struct msm_pinctrl_soc_data msmskunk_pinctrl = {
-	.pins = msmskunk_pins,
-	.npins = ARRAY_SIZE(msmskunk_pins),
-	.functions = msmskunk_functions,
-	.nfunctions = ARRAY_SIZE(msmskunk_functions),
-	.groups = msmskunk_groups,
-	.ngroups = ARRAY_SIZE(msmskunk_groups),
+static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
+	.pins = sdm845_pins,
+	.npins = ARRAY_SIZE(sdm845_pins),
+	.functions = sdm845_functions,
+	.nfunctions = ARRAY_SIZE(sdm845_functions),
+	.groups = sdm845_groups,
+	.ngroups = ARRAY_SIZE(sdm845_groups),
 	.ngpios = 150,
 };
 
-static int msmskunk_pinctrl_probe(struct platform_device *pdev)
+static int sdm845_pinctrl_probe(struct platform_device *pdev)
 {
-	return msm_pinctrl_probe(pdev, &msmskunk_pinctrl);
+	return msm_pinctrl_probe(pdev, &sdm845_pinctrl);
 }
 
-static const struct of_device_id msmskunk_pinctrl_of_match[] = {
-	{ .compatible = "qcom,msmskunk-pinctrl", },
+static const struct of_device_id sdm845_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm845-pinctrl", },
 	{ },
 };
 
-static struct platform_driver msmskunk_pinctrl_driver = {
+static struct platform_driver sdm845_pinctrl_driver = {
 	.driver = {
-		.name = "msmskunk-pinctrl",
+		.name = "sdm845-pinctrl",
 		.owner = THIS_MODULE,
-		.of_match_table = msmskunk_pinctrl_of_match,
+		.of_match_table = sdm845_pinctrl_of_match,
 	},
-	.probe = msmskunk_pinctrl_probe,
+	.probe = sdm845_pinctrl_probe,
 	.remove = msm_pinctrl_remove,
 };
 
-static int __init msmskunk_pinctrl_init(void)
+static int __init sdm845_pinctrl_init(void)
 {
-	return platform_driver_register(&msmskunk_pinctrl_driver);
+	return platform_driver_register(&sdm845_pinctrl_driver);
 }
-arch_initcall(msmskunk_pinctrl_init);
+arch_initcall(sdm845_pinctrl_init);
 
-static void __exit msmskunk_pinctrl_exit(void)
+static void __exit sdm845_pinctrl_exit(void)
 {
-	platform_driver_unregister(&msmskunk_pinctrl_driver);
+	platform_driver_unregister(&sdm845_pinctrl_driver);
 }
-module_exit(msmskunk_pinctrl_exit);
+module_exit(sdm845_pinctrl_exit);
 
-MODULE_DESCRIPTION("QTI msmskunk pinctrl driver");
+MODULE_DESCRIPTION("QTI sdm845 pinctrl driver");
 MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, msmskunk_pinctrl_of_match);
+MODULE_DEVICE_TABLE(of, sdm845_pinctrl_of_match);
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 7018007..2457df7 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -113,4 +113,13 @@
 	  numbers in the kernel log along with the PMIC option status. The PMIC
 	  type is mapped to a QTI chip part number and logged as well.
 
+config MSM_MHI_DEV
+        tristate "Modem Device Interface Driver"
+	depends on EP_PCIE && IPA
+        help
+          This kernel module is used to interact with PCIe Root complex
+          supporting MHI protocol. MHI is a data transmission protocol
+          involving communication between a host and a device over shared
+          memory. MHI interacts with the IPA for supporting transfers
+	  on the HW accelerated channels between Host and device.
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 85c4673..e2fdfb6 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -5,7 +5,6 @@
 obj-$(CONFIG_IPA) += ipa/
 obj-$(CONFIG_IPA3) += ipa/
 obj-$(CONFIG_SPS) += sps/
-
 obj-$(CONFIG_QPNP_COINCELL) += qpnp-coincell.o
 obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o
-
+obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d68e15d..298f8c1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3613,6 +3613,7 @@
 
 static void ipa3_sps_release_resource(struct work_struct *work)
 {
+	mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
 	/* check whether still need to decrease client usage */
 	if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
 		if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
@@ -3624,6 +3625,7 @@
 		}
 	}
 	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+	mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
 }
 
 int ipa3_create_apps_resource(void)
@@ -4403,6 +4405,8 @@
 		goto fail_create_transport_wq;
 	}
 
+	/* Initialize the SPS PM lock. */
+	mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
 	spin_lock_init(&ipa3_ctx->transport_pm.lock);
 	ipa3_ctx->transport_pm.res_granted = false;
 	ipa3_ctx->transport_pm.res_rel_in_prog = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 4f6bf55..bec0b27 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -979,6 +979,7 @@
  * @lock: lock for ensuring atomic operations
  * @res_granted: true if SPS requested IPA resource and IPA granted it
  * @res_rel_in_prog: true if releasing IPA resource is in progress
+ * @transport_pm_mutex: Mutex to protect the transport_pm functionality.
  */
 struct ipa3_transport_pm {
 	spinlock_t lock;
@@ -986,6 +987,7 @@
 	bool res_rel_in_prog;
 	atomic_t dec_clients;
 	atomic_t eot_activity;
+	struct mutex transport_pm_mutex;
 };
 
 /**
diff --git a/drivers/platform/msm/mhi_dev/Makefile b/drivers/platform/msm/mhi_dev/Makefile
new file mode 100644
index 0000000..c1969e2
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/Makefile
@@ -0,0 +1,6 @@
+# Makefile for MHI driver
+obj-y += mhi_mmio.o
+obj-y += mhi.o
+obj-y += mhi_ring.o
+obj-y += mhi_uci.o
+obj-y += mhi_sm.o
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
new file mode 100644
index 0000000..7179fcd
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -0,0 +1,1952 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ep_pcie.h>
+#include <linux/ipa_mhi.h>
+#include <linux/vmalloc.h>
+
+#include "mhi.h"
+#include "mhi_hwio.h"
+#include "mhi_sm.h"
+
+/* Wait time on the device for Host to set M0 state */
+#define MHI_M0_WAIT_MIN_USLEEP		20000000
+#define MHI_M0_WAIT_MAX_USLEEP		25000000
+#define MHI_DEV_M0_MAX_CNT		30
+/* Wait time before suspend/resume is complete */
+#define MHI_SUSPEND_WAIT_MIN		3100
+#define MHI_SUSPEND_WAIT_MAX		3200
+#define MHI_SUSPEND_WAIT_TIMEOUT	500
+#define MHI_MASK_CH_EV_LEN		32
+#define MHI_RING_CMD_ID			0
+#define MHI_RING_PRIMARY_EVT_ID		1
+#define MHI_1K_SIZE			0x1000
+/* Updated Specification for event start is NER - 2 and end - NER -1 */
+#define MHI_HW_ACC_EVT_RING_START	2
+#define MHI_HW_ACC_EVT_RING_END		1
+
+#define MHI_HOST_REGION_NUM             2
+
+#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK	0x1
+#define MHI_MMIO_CTRL_CRDB_STATUS_MSK	0x2
+
+#define HOST_ADDR(lsb, msb)		((lsb) | ((uint64_t)(msb) << 32))
+#define HOST_ADDR_LSB(addr)		(addr & 0xFFFFFFFF)
+#define HOST_ADDR_MSB(addr)		((addr >> 32) & 0xFFFFFFFF)
+
+#define MHI_IPC_LOG_PAGES		(100)
+enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR;
+enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE;
+void *mhi_ipc_log;
+
+static struct mhi_dev *mhi_ctx;
+static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
+	unsigned long data);
+static void mhi_ring_init_cb(void *user_data);
+
+void mhi_dev_read_from_host(struct mhi_addr *host, dma_addr_t dev, size_t size)
+{
+	int rc = 0;
+	uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+	host_addr_pa = ((u64) host->host_pa) | bit_40;
+
+	mhi_log(MHI_MSG_ERROR, "device 0x%x <<-- host 0x%llx, size %d\n",
+		dev, host_addr_pa, size);
+
+	rc = ipa_dma_sync_memcpy((u64) dev, host_addr_pa, (int) size);
+	if (rc)
+		pr_err("error while reading from host:%d\n", rc);
+}
+EXPORT_SYMBOL(mhi_dev_read_from_host);
+
+void mhi_dev_write_to_host(struct mhi_addr *host, void *dev, size_t size,
+						struct mhi_dev *mhi)
+{
+	int rc = 0;
+	uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+	if (!mhi) {
+		pr_err("invalid MHI ctx\n");
+		return;
+	}
+
+	host_addr_pa = ((u64) host->host_pa) | bit_40;
+	/* Copy the device content to a local device physical address */
+	memcpy(mhi->dma_cache, dev, size);
+	mhi_log(MHI_MSG_ERROR, "device 0x%llx --> host 0x%llx, size %d\n",
+		(uint64_t) mhi->cache_dma_handle, host_addr_pa, (int) size);
+
+	rc = ipa_dma_sync_memcpy(host_addr_pa, (u64) mhi->cache_dma_handle,
+								(int) size);
+	if (rc)
+		pr_err("error while reading from host:%d\n", rc);
+}
+EXPORT_SYMBOL(mhi_dev_write_to_host);
+
+int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len,
+							struct mhi_dev *mhi)
+{
+	int rc = 0;
+	uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+	if (!mhi) {
+		pr_err("Invalid mhi device\n");
+		return -EINVAL;
+	}
+
+	if (!dev) {
+		pr_err("Invalid virt device\n");
+		return -EINVAL;
+	}
+
+	if (!host_pa) {
+		pr_err("Invalid host pa device\n");
+		return -EINVAL;
+	}
+
+	host_addr_pa = host_pa | bit_40;
+	mhi_log(MHI_MSG_ERROR, "device 0x%llx <-- host 0x%llx, size %d\n",
+		(uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len);
+	rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle,
+			host_addr_pa, (int) len);
+	if (rc) {
+		pr_err("error while reading from host:%d\n", rc);
+		return rc;
+	}
+
+	memcpy(dev, mhi->read_handle, len);
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_transfer_host_to_device);
+
+int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len,
+						struct mhi_dev *mhi)
+{
+	int rc = 0;
+	uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+	if (!mhi || !dev || !host_addr) {
+		pr_err("%sInvalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	host_addr_pa = host_addr | bit_40;
+	memcpy(mhi->write_handle, dev, len);
+
+	mhi_log(MHI_MSG_ERROR, "device 0x%llx ---> host 0x%llx, size %d\n",
+		(uint64_t) mhi->write_dma_handle, host_addr_pa, (int) len);
+	rc = ipa_dma_sync_memcpy(host_addr_pa,
+			(u64) mhi->write_dma_handle,
+			(int) len);
+	if (rc)
+		pr_err("error while reading from host:%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_transfer_device_to_host);
+
+int mhi_dev_is_list_empty(void)
+{
+
+	if (list_empty(&mhi_ctx->event_ring_list) &&
+			list_empty(&mhi_ctx->process_ring_list))
+		return 0;
+	else
+		return 1;
+}
+EXPORT_SYMBOL(mhi_dev_is_list_empty);
+
+static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi,
+				struct ep_pcie_db_config *erdb_cfg)
+{
+	switch (mhi->cfg.event_rings) {
+	case NUM_CHANNELS:
+		erdb_cfg->base = HW_CHANNEL_BASE;
+		erdb_cfg->end = HW_CHANNEL_END;
+		break;
+	default:
+		erdb_cfg->base = mhi->cfg.event_rings -
+					MHI_HW_ACC_EVT_RING_START;
+		erdb_cfg->end =  mhi->cfg.event_rings -
+					MHI_HW_ACC_EVT_RING_END;
+		break;
+	}
+}
+
+int mhi_pcie_config_db_routing(struct mhi_dev *mhi)
+{
+	int rc = 0;
+	struct ep_pcie_db_config chdb_cfg, erdb_cfg;
+
+	if (!mhi) {
+		pr_err("Invalid MHI context\n");
+		return -EINVAL;
+	}
+
+	/* Configure Doorbell routing */
+	chdb_cfg.base = HW_CHANNEL_BASE;
+	chdb_cfg.end = HW_CHANNEL_END;
+	chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb;
+
+	mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
+
+	mhi_log(MHI_MSG_ERROR,
+		"Event rings 0x%x => er_base 0x%x, er_end %d\n",
+		mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
+	erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
+	ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg);
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_pcie_config_db_routing);
+
+static int mhi_hwc_init(struct mhi_dev *mhi)
+{
+	int rc = 0;
+	struct ep_pcie_msi_config cfg;
+	struct ipa_mhi_init_params ipa_init_params;
+	struct ep_pcie_db_config erdb_cfg;
+
+	/* Call IPA HW_ACC Init with MSI Address and db routing info */
+	rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
+	if (rc) {
+		pr_err("Error retrieving pcie msi logic\n");
+		return rc;
+	}
+
+	rc = mhi_pcie_config_db_routing(mhi);
+	if (rc) {
+		pr_err("Error configuring DB routing\n");
+		return rc;
+	}
+
+	mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
+	mhi_log(MHI_MSG_ERROR,
+		"Event rings 0x%x => er_base 0x%x, er_end %d\n",
+		mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
+
+	erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
+	memset(&ipa_init_params, 0, sizeof(ipa_init_params));
+	ipa_init_params.msi.addr_hi = cfg.upper;
+	ipa_init_params.msi.addr_low = cfg.lower;
+	ipa_init_params.msi.data = cfg.data;
+	ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1);
+	ipa_init_params.first_er_idx = erdb_cfg.base;
+	ipa_init_params.first_ch_idx = HW_CHANNEL_BASE;
+	ipa_init_params.mmio_addr = ((uint32_t) mhi_ctx->mmio_base_pa_addr);
+	ipa_init_params.assert_bit40 = true;
+
+	mhi_log(MHI_MSG_ERROR,
+		"MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n",
+		ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data);
+	ipa_init_params.notify = mhi_hwc_cb;
+	ipa_init_params.priv = mhi;
+
+	rc = ipa_mhi_init(&ipa_init_params);
+	if (rc) {
+		pr_err("Error initializing IPA\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int mhi_hwc_start(struct mhi_dev *mhi)
+{
+	int rc = 0;
+	struct ipa_mhi_start_params ipa_start_params;
+
+	memset(&ipa_start_params, 0, sizeof(ipa_start_params));
+
+	ipa_start_params.channel_context_array_addr =
+				mhi->ch_ctx_shadow.host_pa;
+	ipa_start_params.event_context_array_addr =
+				mhi->ev_ctx_shadow.host_pa;
+
+	rc = ipa_mhi_start(&ipa_start_params);
+	if (rc)
+		pr_err("Error starting IPA (rc = 0x%X)\n", rc);
+
+	return rc;
+}
+
+static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
+	unsigned long data)
+{
+	int rc = 0;
+
+	switch (event) {
+	case IPA_MHI_EVENT_READY:
+		mhi_log(MHI_MSG_ERROR,
+			"HW Channel uC is ready event=0x%X\n", event);
+		rc = mhi_hwc_start(mhi_ctx);
+		if (rc) {
+			pr_err("hwc_init start failed with %d\n", rc);
+			return;
+		}
+
+		rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx);
+		if (rc) {
+			pr_err("Failed to enable channel db\n");
+			return;
+		}
+
+		rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx);
+		if (rc) {
+			pr_err("Failed to enable control interrupt\n");
+			return;
+		}
+
+		rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx);
+
+		if (rc) {
+			pr_err("Failed to enable command db\n");
+			return;
+		}
+		break;
+	case IPA_MHI_EVENT_DATA_AVAILABLE:
+		rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
+		if (rc) {
+			pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc);
+			return;
+		}
+		break;
+	default:
+		pr_err("HW Channel uC unknown event 0x%X\n", event);
+		break;
+	}
+}
+
+static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid,
+				enum mhi_dev_ring_element_type_id type)
+{
+	int rc = 0;
+	struct ipa_mhi_connect_params connect_params;
+
+	memset(&connect_params, 0, sizeof(connect_params));
+
+	switch (type) {
+	case MHI_DEV_RING_EL_STOP:
+		rc = ipa_mhi_disconnect_pipe(
+			mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
+		if (rc)
+			pr_err("Stopping HW Channel%d failed 0x%X\n",
+							chid, rc);
+		break;
+	case MHI_DEV_RING_EL_START:
+		connect_params.channel_id = chid;
+		connect_params.sys.skip_ep_cfg = true;
+		if ((chid % 2) == 0x0)
+			connect_params.sys.client = IPA_CLIENT_MHI_PROD;
+		else
+			connect_params.sys.client = IPA_CLIENT_MHI_CONS;
+
+		rc = ipa_mhi_connect_pipe(&connect_params,
+			&mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
+		if (rc)
+			pr_err("HW Channel%d start failed 0x%X\n",
+							chid, rc);
+		break;
+	case MHI_DEV_RING_EL_INVALID:
+	default:
+		pr_err("Invalid Ring Element type = 0x%X\n", type);
+		break;
+	}
+
+	return rc;
+}
+
+static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev,
+							uint32_t *int_value)
+{
+	int rc = 0;
+
+	rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value);
+	if (rc) {
+		pr_err("Failed to read A7 status\n");
+		return;
+	}
+
+	mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value);
+	if (rc) {
+		pr_err("Failed to clear A7 status\n");
+		return;
+	}
+}
+
+static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id)
+{
+	struct mhi_addr addr;
+
+	addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+					sizeof(struct mhi_dev_ch_ctx) * ch_id;
+	addr.size  = sizeof(struct mhi_dev_ch_ctx);
+	/* Fetch the channel ctx (*dst, *src, size) */
+	mhi_dev_read_from_host(&addr, mhi->ch_ctx_cache_dma_handle +
+					(sizeof(struct mhi_dev_ch_ctx) * ch_id),
+				sizeof(struct mhi_dev_ch_ctx));
+}
+
+int mhi_dev_syserr(struct mhi_dev *mhi)
+{
+
+	if (!mhi) {
+		pr_err("%s: Invalid MHI ctx\n", __func__);
+		return -EINVAL;
+	}
+
+	mhi_dev_dump_mmio(mhi);
+	pr_err("MHI dev sys error\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_syserr);
+
+int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
+					union mhi_dev_ring_element_type *el)
+{
+	int rc = 0;
+	uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
+	struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
+	union mhi_dev_ring_ctx *ctx;
+	struct ep_pcie_msi_config cfg;
+	struct mhi_addr msi_addr;
+	uint32_t msi = 0;
+	struct mhi_addr host_rp_addr;
+
+	rc = ep_pcie_get_msi_config(mhi->phandle,
+			&cfg);
+		if (rc) {
+			pr_err("Error retrieving pcie msi logic\n");
+			return rc;
+		}
+
+	if (evnt_ring_idx > mhi->cfg.event_rings) {
+		pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
+		return -EINVAL;
+	}
+
+	if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
+		ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
+		rc = mhi_ring_start(ring, ctx, mhi);
+		if (rc) {
+			mhi_log(MHI_MSG_ERROR,
+				"error starting event ring %d\n", evnt_ring);
+			return rc;
+		}
+	}
+
+	mutex_lock(&mhi->mhi_event_lock);
+	/* add the ring element */
+	mhi_dev_add_element(ring, el);
+
+	ring->ring_ctx_shadow->ev.rp =  (ring->rd_offset *
+				sizeof(union mhi_dev_ring_element_type)) +
+				ring->ring_ctx->generic.rbase;
+
+	mhi_log(MHI_MSG_ERROR, "ev.rp = %llx for %lld\n",
+				ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
+
+	host_rp_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
+			sizeof(struct mhi_dev_ev_ctx) *
+			evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
+			(uint32_t) ring->ring_ctx;
+	mhi_dev_write_to_host(&host_rp_addr, &ring->ring_ctx_shadow->ev.rp,
+						sizeof(uint64_t),
+						mhi);
+
+	/*
+	 * rp update in host memory should be flushed
+	 * before sending a MSI to the host
+	 */
+	wmb();
+
+	mutex_unlock(&mhi->mhi_event_lock);
+	mhi_log(MHI_MSG_ERROR, "event sent:\n");
+	mhi_log(MHI_MSG_ERROR, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr);
+	mhi_log(MHI_MSG_ERROR, "evnt len : 0x%x\n", el->evt_tr_comp.len);
+	mhi_log(MHI_MSG_ERROR, "evnt code :0x%x\n", el->evt_tr_comp.code);
+	mhi_log(MHI_MSG_ERROR, "evnt type :0x%x\n", el->evt_tr_comp.type);
+	mhi_log(MHI_MSG_ERROR, "evnt chid :0x%x\n", el->evt_tr_comp.chid);
+
+	msi_addr.host_pa = (uint64_t)((uint64_t)cfg.upper << 32) |
+						(uint64_t)cfg.lower;
+	msi = cfg.data + mhi_ctx->mhi_ep_msi_num;
+	mhi_log(MHI_MSG_ERROR, "Sending MSI %d to 0x%llx as data = 0x%x\n",
+			mhi_ctx->mhi_ep_msi_num, msi_addr.host_pa, msi);
+	mhi_dev_write_to_host(&msi_addr, &msi, 4, mhi);
+
+	return rc;
+}
+
+static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch,
+			uint32_t rd_ofst, uint32_t len,
+			enum mhi_dev_cmd_completion_code code)
+{
+	int rc = 0;
+	union mhi_dev_ring_element_type compl_event;
+	struct mhi_dev *mhi = ch->ring->mhi_dev;
+
+	compl_event.evt_tr_comp.chid = ch->ch_id;
+	compl_event.evt_tr_comp.type =
+				MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
+	compl_event.evt_tr_comp.len = len;
+	compl_event.evt_tr_comp.code = code;
+	compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
+			rd_ofst * sizeof(struct mhi_dev_transfer_ring_element);
+
+	rc = mhi_dev_send_event(mhi,
+			mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event);
+
+	return rc;
+}
+
+int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
+						enum mhi_dev_state state)
+{
+	union mhi_dev_ring_element_type event;
+	int rc = 0;
+
+	event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG;
+	event.evt_state_change.mhistate = state;
+
+	rc = mhi_dev_send_event(mhi, 0, &event);
+	if (rc) {
+		pr_err("Sending state change event failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_send_state_change_event);
+
+int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env)
+{
+	union mhi_dev_ring_element_type event;
+	int rc = 0;
+
+	event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY;
+	event.evt_ee_state.execenv = exec_env;
+
+	rc = mhi_dev_send_event(mhi, 0, &event);
+	if (rc) {
+		pr_err("Sending EE change event failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_send_ee_event);
+
+int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi)
+{
+	int rc = 0;
+
+	/*
+	 * Expected usuage is when there is HW ACC traffic IPA uC notifes
+	 * Q6 -> IPA A7 -> MHI core -> MHI SM
+	 */
+	rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
+	if (rc) {
+		pr_err("error sending SM event\n");
+		return rc;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup);
+
+static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi)
+{
+	int rc = 0;
+	union mhi_dev_ring_element_type event;
+
+	/* send the command completion event to the host */
+	event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase
+			+ (mhi->ring[MHI_RING_CMD_ID].rd_offset *
+			(sizeof(union mhi_dev_ring_element_type)));
+	mhi_log(MHI_MSG_ERROR, "evt cmd comp ptr :%d\n",
+			(uint32_t) event.evt_cmd_comp.ptr);
+	event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
+	event.evt_cmd_comp.code = MHI_CMD_COMPL_CODE_SUCCESS;
+
+	rc = mhi_dev_send_event(mhi, 0, &event);
+	if (rc)
+		pr_err("channel start command faied\n");
+
+	return rc;
+}
+
+static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id,
+							struct mhi_dev *mhi)
+{
+	int rc = 0;
+	struct mhi_addr host_addr;
+
+	if (ring->rd_offset != ring->wr_offset &&
+		mhi->ch_ctx_cache[ch_id].ch_type ==
+				MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) {
+		mhi_log(MHI_MSG_INFO, "Pending transaction to be processed\n");
+		return 0;
+	} else if (mhi->ch_ctx_cache[ch_id].ch_type ==
+			MHI_DEV_CH_TYPE_INBOUND_CHANNEL &&
+			mhi->ch[ch_id].wr_request_active) {
+		return 0;
+	}
+
+	/* set the channel to stop */
+	mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
+
+	host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+				sizeof(struct mhi_dev_ch_ctx) * ch_id;
+	/* update the channel state in the host */
+	mhi_dev_write_to_host(&host_addr, &mhi->ch_ctx_cache[ch_id].ch_state,
+				sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+	/* send the completion event to the host */
+	rc = mhi_dev_send_cmd_comp_event(mhi);
+	if (rc)
+		pr_err("Error sending command completion event\n");
+
+	return rc;
+}
+
+static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi,
+			union mhi_dev_ring_element_type *el, void *ctx)
+{
+	int rc = 0;
+	uint32_t ch_id = 0;
+	union mhi_dev_ring_element_type event;
+	struct mhi_addr host_addr;
+
+	mhi_log(MHI_MSG_ERROR, "for channel:%d and cmd:%d\n",
+		ch_id, el->generic.type);
+	ch_id = el->generic.chid;
+
+	switch (el->generic.type) {
+	case MHI_DEV_RING_EL_START:
+		mhi_log(MHI_MSG_ERROR, "recived start cmd for channel %d\n",
+								ch_id);
+		if (ch_id >= (HW_CHANNEL_BASE)) {
+			rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
+			if (rc) {
+				pr_err("Error with HW channel cmd :%d\n", rc);
+				return;
+			}
+			goto send_start_completion_event;
+		}
+
+		/* fetch the channel context from host */
+		mhi_dev_fetch_ch_ctx(mhi, ch_id);
+
+		/* Initialize and configure the corresponding channel ring */
+		rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id],
+			(union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id],
+			mhi);
+		if (rc) {
+			mhi_log(MHI_MSG_ERROR,
+				"start ring failed for ch %d\n", ch_id);
+			return;
+		}
+
+		mhi->ring[mhi->ch_ring_start + ch_id].state =
+						RING_STATE_PENDING;
+
+		/* set the channel to running */
+		mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
+		mhi->ch[ch_id].ch_id = ch_id;
+		mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id];
+		mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type;
+
+		/* enable DB for event ring */
+		rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id);
+		if (rc) {
+			pr_err("Failed to enable channel db\n");
+			return;
+		}
+
+		host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+					sizeof(struct mhi_dev_ch_ctx) * ch_id;
+		mhi_dev_write_to_host(&host_addr,
+					&mhi->ch_ctx_cache[ch_id].ch_state,
+					sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+send_start_completion_event:
+		rc = mhi_dev_send_cmd_comp_event(mhi);
+		if (rc)
+			pr_err("Error sending command completion event\n");
+
+		break;
+	case MHI_DEV_RING_EL_STOP:
+		if (ch_id >= HW_CHANNEL_BASE) {
+			rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
+			if (rc) {
+				mhi_log(MHI_MSG_ERROR,
+					"send channel stop cmd event failed\n");
+				return;
+			}
+
+			/* send the completion event to the host */
+			event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
+				(mhi->ring[MHI_RING_CMD_ID].rd_offset *
+				(sizeof(union mhi_dev_ring_element_type)));
+			event.evt_cmd_comp.type =
+					MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
+			if (rc == 0)
+				event.evt_cmd_comp.code =
+					MHI_CMD_COMPL_CODE_SUCCESS;
+			else
+				event.evt_cmd_comp.code =
+					MHI_CMD_COMPL_CODE_UNDEFINED;
+
+			rc = mhi_dev_send_event(mhi, 0, &event);
+			if (rc) {
+				pr_err("stop event send failed\n");
+				return;
+			}
+		} else {
+			/*
+			 * Check if there are any pending transactions for the
+			 * ring associated with the channel. If no, proceed to
+			 * write disable the channel state else send stop
+			 * channel command to check if one can suspend the
+			 * command.
+			 */
+			mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP;
+			rc = mhi_dev_process_stop_cmd(
+				&mhi->ring[mhi->ch_ring_start + ch_id],
+				ch_id, mhi);
+			if (rc) {
+				pr_err("stop event send failed\n");
+				return;
+			}
+		}
+		break;
+	case MHI_DEV_RING_EL_RESET:
+		/* hard stop and set the channel to stop */
+		mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
+		host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+				sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+		/* update the channel state in the host */
+		mhi_dev_write_to_host(&host_addr,
+				&mhi->ch_ctx_cache[ch_id].ch_state,
+				sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+		/* send the completion event to the host */
+		rc = mhi_dev_send_cmd_comp_event(mhi);
+		if (rc)
+			pr_err("Error sending command completion event\n");
+		break;
+	default:
+		pr_err("%s: Invalid command:%d\n", __func__, el->generic.type);
+		break;
+	}
+}
+
+static void mhi_dev_process_tre_ring(struct mhi_dev *mhi,
+			union mhi_dev_ring_element_type *el, void *ctx)
+{
+	struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx;
+	struct mhi_dev_channel *ch;
+	struct mhi_dev_client_cb_reason reason;
+
+	if (ring->id < mhi->ch_ring_start) {
+		mhi_log(MHI_MSG_ERROR,
+			"invalid channel ring id (%d), should be < %d\n",
+			ring->id, mhi->ch_ring_start);
+		return;
+	}
+
+	ch = &mhi->ch[ring->id - mhi->ch_ring_start];
+	reason.ch_id = ch->ch_id;
+	reason.reason = MHI_DEV_TRE_AVAILABLE;
+
+	/* Invoke a callback to let the client know its data is ready.
+	 * Copy this event to the clients context so that it can be
+	 * sent out once the client has fetch the data. Update the rp
+	 * before sending the data as part of the event completion
+	 */
+	if (ch->active_client && ch->active_client->event_trigger != NULL)
+		ch->active_client->event_trigger(&reason);
+}
+
+static void mhi_dev_process_ring_pending(struct work_struct *work)
+{
+	struct mhi_dev *mhi = container_of(work,
+				struct mhi_dev, pending_work);
+	struct list_head *cp, *q;
+	struct mhi_dev_ring *ring;
+	struct mhi_dev_channel *ch;
+	int rc = 0;
+
+	mutex_lock(&mhi_ctx->mhi_lock);
+	rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]);
+	if (rc) {
+		mhi_log(MHI_MSG_ERROR, "error processing command ring\n");
+		goto exit;
+	}
+
+	list_for_each_safe(cp, q, &mhi->process_ring_list) {
+		ring = list_entry(cp, struct mhi_dev_ring, list);
+		list_del(cp);
+		mhi_log(MHI_MSG_ERROR, "processing ring %d\n", ring->id);
+		rc = mhi_dev_process_ring(ring);
+		if (rc) {
+			mhi_log(MHI_MSG_ERROR,
+				"error processing ring %d\n", ring->id);
+			goto exit;
+		}
+
+		if (ring->id < mhi->ch_ring_start) {
+			mhi_log(MHI_MSG_ERROR,
+				"ring (%d) is not a channel ring\n", ring->id);
+			goto exit;
+		}
+
+		ch = &mhi->ch[ring->id - mhi->ch_ring_start];
+		rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id);
+		if (rc) {
+			mhi_log(MHI_MSG_ERROR,
+			"error enabling chdb interrupt for %d\n", ch->ch_id);
+			goto exit;
+		}
+	}
+
+exit:
+	mutex_unlock(&mhi_ctx->mhi_lock);
+}
+
+static int mhi_dev_get_event_notify(enum mhi_dev_state state,
+						enum mhi_dev_event *event)
+{
+	int rc = 0;
+
+	switch (state) {
+	case MHI_DEV_M0_STATE:
+		*event = MHI_DEV_EVENT_M0_STATE;
+		break;
+	case MHI_DEV_M1_STATE:
+		*event = MHI_DEV_EVENT_M1_STATE;
+		break;
+	case MHI_DEV_M2_STATE:
+		*event = MHI_DEV_EVENT_M2_STATE;
+		break;
+	case MHI_DEV_M3_STATE:
+		*event = MHI_DEV_EVENT_M3_STATE;
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static void mhi_dev_queue_channel_db(struct mhi_dev *mhi,
+					uint32_t chintr_value, uint32_t ch_num)
+{
+	struct mhi_dev_ring *ring;
+	int rc = 0;
+
+	for (; chintr_value; ch_num++, chintr_value >>= 1) {
+		if (chintr_value & 1) {
+			ring = &mhi->ring[ch_num + mhi->ch_ring_start];
+			if (ring->state == RING_STATE_UINT) {
+				pr_err("Channel not opened for %d\n", ch_num);
+				break;
+			}
+			mhi_ring_set_state(ring, RING_STATE_PENDING);
+			list_add(&ring->list, &mhi->process_ring_list);
+			rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num);
+			if (rc) {
+				pr_err("Error disabling chdb\n");
+				return;
+			}
+			queue_work(mhi->pending_ring_wq, &mhi->pending_work);
+		}
+	}
+}
+
+static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi)
+{
+	int i, rc = 0;
+	uint32_t chintr_value = 0, ch_num = 0;
+
+	rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi);
+	if (rc) {
+		pr_err("Read channel db\n");
+		return;
+	}
+
+	for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+		ch_num = i * MHI_MASK_CH_EV_LEN;
+		chintr_value = mhi->chdb[i].status;
+		if (chintr_value) {
+			mhi_log(MHI_MSG_ERROR,
+				"processing id: %d, ch interrupt 0x%x\n",
+							i, chintr_value);
+			mhi_dev_queue_channel_db(mhi, chintr_value, ch_num);
+			rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i),
+							mhi->chdb[i].status);
+			if (rc) {
+				pr_err("Error writing interrupt clear for A7\n");
+				return;
+			}
+		}
+	}
+}
+
+static void mhi_dev_scheduler(struct work_struct *work)
+{
+	struct mhi_dev *mhi = container_of(work,
+				struct mhi_dev, chdb_ctrl_work);
+	int rc = 0;
+	uint32_t int_value = 0;
+	struct mhi_dev_ring *ring;
+	enum mhi_dev_state state;
+	enum mhi_dev_event event = 0;
+
+	mutex_lock(&mhi_ctx->mhi_lock);
+	/* Check for interrupts */
+	mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value);
+
+	if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) {
+		mhi_log(MHI_MSG_ERROR,
+			"processing ctrl interrupt with %d\n", int_value);
+		rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+		if (rc) {
+			pr_err("%s: get mhi state failed\n", __func__);
+			mutex_unlock(&mhi_ctx->mhi_lock);
+			return;
+		}
+
+		rc = mhi_dev_get_event_notify(state, &event);
+		if (rc) {
+			pr_err("unsupported state :%d\n", state);
+			mutex_unlock(&mhi_ctx->mhi_lock);
+			return;
+		}
+
+		rc = mhi_dev_notify_sm_event(event);
+		if (rc) {
+			pr_err("error sending SM event\n");
+			mutex_unlock(&mhi_ctx->mhi_lock);
+			return;
+		}
+	}
+
+	if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) {
+		mhi_log(MHI_MSG_ERROR,
+			"processing cmd db interrupt with %d\n", int_value);
+		ring = &mhi->ring[MHI_RING_CMD_ID];
+		ring->state = RING_STATE_PENDING;
+		queue_work(mhi->pending_ring_wq, &mhi->pending_work);
+	}
+
+	/* get the specific channel interrupts */
+	mhi_dev_check_channel_interrupt(mhi);
+
+	mutex_unlock(&mhi_ctx->mhi_lock);
+	ep_pcie_mask_irq_event(mhi->phandle,
+				EP_PCIE_INT_EVT_MHI_A7, true);
+}
+
+void mhi_dev_notify_a7_event(struct mhi_dev *mhi)
+{
+	schedule_work(&mhi->chdb_ctrl_work);
+	mhi_log(MHI_MSG_ERROR, "mhi irq triggered\n");
+}
+EXPORT_SYMBOL(mhi_dev_notify_a7_event);
+
+int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi)
+{
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_config_outbound_iatu);
+
+static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)
+{
+	int rc = 0;
+	struct platform_device *pdev;
+	uint64_t addr1 = 0;
+
+	pdev = mhi->pdev;
+
+	/* Get host memory region configuration */
+	mhi_dev_get_mhi_addr(mhi);
+
+	mhi->ctrl_base.host_pa  = HOST_ADDR(mhi->host_addr.ctrl_base_lsb,
+						mhi->host_addr.ctrl_base_msb);
+	mhi->data_base.host_pa  = HOST_ADDR(mhi->host_addr.data_base_lsb,
+						mhi->host_addr.data_base_msb);
+
+	addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb,
+					mhi->host_addr.ctrl_limit_msb);
+	mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa;
+	addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb,
+					mhi->host_addr.data_limit_msb);
+	mhi->data_base.size = addr1 - mhi->data_base.host_pa;
+
+	/* Get Channel, event and command context base pointer */
+	rc = mhi_dev_mmio_get_chc_base(mhi);
+	if (rc) {
+		pr_err("Fetching channel context failed\n");
+		return rc;
+	}
+
+	rc = mhi_dev_mmio_get_erc_base(mhi);
+	if (rc) {
+		pr_err("Fetching event ring context failed\n");
+		return rc;
+	}
+
+	rc = mhi_dev_mmio_get_crc_base(mhi);
+	if (rc) {
+		pr_err("Fetching command ring context failed\n");
+		return rc;
+	}
+
+	rc = mhi_dev_update_ner(mhi);
+	if (rc) {
+		pr_err("Fetching NER failed\n");
+		return rc;
+	}
+
+	mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx);
+	mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) *
+					mhi->cfg.event_rings;
+	mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) *
+					mhi->cfg.channels;
+
+	mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev,
+				sizeof(struct mhi_dev_cmd_ctx),
+				&mhi->cmd_ctx_cache_dma_handle,
+				GFP_KERNEL);
+	if (!mhi->cmd_ctx_cache) {
+		pr_err("no memory while allocating cmd ctx\n");
+		return -ENOMEM;
+	}
+	memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx));
+
+	mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev,
+				sizeof(struct mhi_dev_ev_ctx) *
+				mhi->cfg.event_rings,
+				&mhi->ev_ctx_cache_dma_handle,
+				GFP_KERNEL);
+	if (!mhi->ev_ctx_cache)
+		return -ENOMEM;
+
+	mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev,
+				sizeof(struct mhi_dev_ch_ctx) *
+				mhi->cfg.channels,
+				&mhi->ch_ctx_cache_dma_handle,
+				GFP_KERNEL);
+	if (!mhi_ctx->ch_ctx_cache)
+		return -ENOMEM;
+
+	/* Cache the command and event context */
+	mhi_dev_read_from_host(&mhi->cmd_ctx_shadow,
+				mhi->cmd_ctx_cache_dma_handle,
+				mhi->cmd_ctx_shadow.size);
+
+	mhi_dev_read_from_host(&mhi->ev_ctx_shadow,
+				mhi->ev_ctx_cache_dma_handle,
+				mhi->ev_ctx_shadow.size);
+
+	mhi_log(MHI_MSG_ERROR,
+			"cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+					mhi->cmd_ctx_cache->rbase,
+					mhi->cmd_ctx_cache->rp,
+					mhi->cmd_ctx_cache->wp);
+	mhi_log(MHI_MSG_ERROR,
+			"ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+					mhi_ctx->ev_ctx_cache->rbase,
+					mhi->ev_ctx_cache->rp,
+					mhi->ev_ctx_cache->wp);
+
+	rc = mhi_ring_start(&mhi->ring[0],
+			(union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi);
+	if (rc) {
+		pr_err("error in ring start\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+int mhi_dev_suspend(struct mhi_dev *mhi)
+{
+	int ch_id = 0, rc = 0;
+	struct mhi_addr host_addr;
+
+	mutex_lock(&mhi_ctx->mhi_write_test);
+	atomic_set(&mhi->is_suspended, 1);
+
+	for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
+		if (mhi->ch_ctx_cache[ch_id].ch_state !=
+						MHI_DEV_CH_STATE_RUNNING)
+			continue;
+
+		mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED;
+
+		host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+				sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+		/* update the channel state in the host */
+		mhi_dev_write_to_host(&host_addr,
+			&mhi->ch_ctx_cache[ch_id].ch_state,
+			sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+	}
+
+	rc = ipa_dma_disable();
+	if (rc)
+		pr_err("Disable IPA failed\n");
+
+	mutex_unlock(&mhi_ctx->mhi_write_test);
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_suspend);
+
+int mhi_dev_resume(struct mhi_dev *mhi)
+{
+	int ch_id = 0, rc = 0;
+	struct mhi_addr host_addr;
+
+	rc = ipa_dma_enable();
+	if (rc) {
+		pr_err("IPA enable failed\n");
+		return rc;
+	}
+
+	for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
+		if (mhi->ch_ctx_cache[ch_id].ch_state !=
+				MHI_DEV_CH_STATE_SUSPENDED)
+			continue;
+
+		mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
+		host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+				sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+		/* update the channel state in the host */
+		mhi_dev_write_to_host(&host_addr,
+				&mhi->ch_ctx_cache[ch_id].ch_state,
+				sizeof(enum mhi_dev_ch_ctx_state), mhi);
+	}
+
+	atomic_set(&mhi->is_suspended, 0);
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_resume);
+
+static int mhi_dev_ring_init(struct mhi_dev *dev)
+{
+	int i = 0;
+
+	mhi_log(MHI_MSG_INFO, "initializing all rings");
+	dev->cmd_ring_idx = 0;
+	dev->ev_ring_start = 1;
+	dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings;
+
+	/* Initialize CMD ring */
+	mhi_ring_init(&dev->ring[dev->cmd_ring_idx],
+				RING_TYPE_CMD, dev->cmd_ring_idx);
+
+	mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx],
+				mhi_dev_process_cmd_ring);
+
+	/* Initialize Event ring */
+	for (i = dev->ev_ring_start; i < (dev->cfg.event_rings
+					+ dev->ev_ring_start); i++)
+		mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i);
+
+	/* Initialize CH */
+	for (i = dev->ch_ring_start; i < (dev->cfg.channels
+					+ dev->ch_ring_start); i++) {
+		mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i);
+		mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring);
+	}
+
+
+	return 0;
+}
+
+int mhi_dev_open_channel(uint32_t chan_id,
+			struct mhi_dev_client **handle_client,
+			void (*mhi_dev_client_cb_reason)
+			(struct mhi_dev_client_cb_reason *cb))
+{
+	int rc = 0;
+	struct mhi_dev_channel *ch;
+	struct platform_device *pdev;
+
+	pdev = mhi_ctx->pdev;
+	ch = &mhi_ctx->ch[chan_id];
+
+	mutex_lock(&ch->ch_lock);
+
+	if (ch->active_client) {
+		mhi_log(MHI_MSG_ERROR,
+			"Channel (%d) already opened by client\n", chan_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	/* Initialize the channel, client and state information */
+	*handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL);
+	if (!(*handle_client)) {
+		dev_err(&pdev->dev, "can not allocate mhi_dev memory\n");
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	ch->active_client = (*handle_client);
+	(*handle_client)->channel = ch;
+	(*handle_client)->event_trigger = mhi_dev_client_cb_reason;
+
+	if (ch->state == MHI_DEV_CH_UNINT) {
+		ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start];
+		ch->state = MHI_DEV_CH_PENDING_START;
+	} else if (ch->state == MHI_DEV_CH_CLOSED)
+		ch->state = MHI_DEV_CH_STARTED;
+	else if (ch->state == MHI_DEV_CH_STOPPED)
+		ch->state = MHI_DEV_CH_PENDING_START;
+
+exit:
+	mutex_unlock(&ch->ch_lock);
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_open_channel);
+
+int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
+{
+	struct mhi_dev_channel *ch;
+	int rc;
+
+	ch = handle->channel;
+
+	rc = ch->ring->rd_offset == ch->ring->wr_offset;
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_channel_isempty);
+
+int mhi_dev_close_channel(struct mhi_dev_client *handle)
+{
+	struct mhi_dev_channel *ch;
+	int rc = 0;
+
+	ch = handle->channel;
+
+	mutex_lock(&ch->ch_lock);
+	if (ch->state != MHI_DEV_CH_PENDING_START) {
+		if (ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL &&
+					!mhi_dev_channel_isempty(handle)) {
+			mhi_log(MHI_MSG_ERROR,
+				"Trying to close an active channel (%d)\n",
+				ch->ch_id);
+			mutex_unlock(&ch->ch_lock);
+			rc = -EAGAIN;
+			goto exit;
+		} else if (ch->tre_loc) {
+			mhi_log(MHI_MSG_ERROR,
+				"Trying to close channel (%d) when a TRE is active",
+				ch->ch_id);
+			mutex_unlock(&ch->ch_lock);
+			rc = -EAGAIN;
+			goto exit;
+		}
+	}
+
+	ch->state = MHI_DEV_CH_CLOSED;
+	ch->active_client = NULL;
+	kfree(handle);
+exit:
+	mutex_unlock(&ch->ch_lock);
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_close_channel);
+
+static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch,
+		struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el,
+		uint32_t *chain)
+{
+	uint32_t td_done = 0;
+
+	/*
+	 * A full TRE worth of data was consumed.
+	 * Check if we are at a TD boundary.
+	 */
+	if (ch->tre_bytes_left == 0) {
+		if (el->tre.chain) {
+			if (el->tre.ieob)
+				mhi_dev_send_completion_event(ch,
+					ring->rd_offset, el->tre.len,
+					MHI_CMD_COMPL_CODE_EOB);
+				*chain = 1;
+		} else {
+			if (el->tre.ieot)
+				mhi_dev_send_completion_event(
+					ch, ring->rd_offset, el->tre.len,
+					MHI_CMD_COMPL_CODE_EOT);
+				td_done = 1;
+				*chain = 0;
+		}
+		mhi_dev_ring_inc_index(ring, ring->rd_offset);
+		ch->tre_bytes_left = 0;
+		ch->tre_loc = 0;
+	}
+
+	return td_done;
+}
+
+int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
+				void *buf, uint32_t buf_size, uint32_t *chain)
+{
+	struct mhi_dev_channel *ch;
+	struct mhi_dev_ring *ring;
+	union mhi_dev_ring_element_type *el;
+	uint32_t ch_id;
+	size_t bytes_to_read, addr_offset;
+	uint64_t read_from_loc;
+	ssize_t bytes_read = 0;
+	uint32_t write_to_loc = 0;
+	size_t usr_buf_remaining = buf_size;
+	int td_done = 0, rc = 0;
+
+	if (!handle_client) {
+		mhi_log(MHI_MSG_ERROR, "invalid client handle\n");
+		return -ENXIO;
+	}
+
+	ch = handle_client->channel;
+	ring = ch->ring;
+	ch_id = ch->ch_id;
+	*chain = 0;
+
+	mutex_lock(&ch->ch_lock);
+
+	do {
+		el = &ring->ring_cache[ring->rd_offset];
+		if (ch->tre_loc) {
+			bytes_to_read = min(usr_buf_remaining,
+						ch->tre_bytes_left);
+			*chain = 1;
+			mhi_log(MHI_MSG_ERROR,
+				"remaining buffered data size %d\n",
+				(int) ch->tre_bytes_left);
+		} else {
+			if (ring->rd_offset == ring->wr_offset) {
+				mhi_log(MHI_MSG_ERROR,
+					"nothing to read, returning\n");
+				bytes_read = 0;
+				goto exit;
+			}
+
+			if (ch->state == MHI_DEV_CH_STOPPED) {
+				mhi_log(MHI_MSG_ERROR,
+					"channel (%d) already stopped\n",
+					ch_id);
+				bytes_read = -1;
+				goto exit;
+			}
+
+			ch->tre_loc = el->tre.data_buf_ptr;
+			ch->tre_size = el->tre.len;
+			ch->tre_bytes_left = ch->tre_size;
+
+			mhi_log(MHI_MSG_ERROR,
+			"user_buf_remaining %d, ch->tre_size %d\n",
+			usr_buf_remaining, ch->tre_size);
+			bytes_to_read = min(usr_buf_remaining, ch->tre_size);
+		}
+
+		addr_offset = ch->tre_size - ch->tre_bytes_left;
+		read_from_loc = ch->tre_loc + addr_offset;
+		write_to_loc = (uint32_t) buf + (buf_size - usr_buf_remaining);
+
+		mhi_log(MHI_MSG_ERROR, "reading %d bytes from chan %d\n",
+				bytes_to_read, ch_id);
+
+		mhi_transfer_host_to_device((void *) write_to_loc,
+			read_from_loc, bytes_to_read, mhi_ctx);
+
+		bytes_read += bytes_to_read;
+		ch->tre_bytes_left -= bytes_to_read;
+		usr_buf_remaining -= bytes_to_read;
+		td_done = mhi_dev_check_tre_bytes_left(ch, ring, el, chain);
+	} while (usr_buf_remaining  && !td_done);
+
+	if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) {
+		ch->state = MHI_DEV_CH_STOPPED;
+		rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx);
+		if (rc) {
+			mhi_log(MHI_MSG_ERROR,
+				"Error while stopping channel (%d)\n", ch_id);
+			bytes_read = -1;
+		}
+	}
+exit:
+	mutex_unlock(&ch->ch_lock);
+	return bytes_read;
+}
+EXPORT_SYMBOL(mhi_dev_read_channel);
+
+static void skip_to_next_td(struct mhi_dev_channel *ch)
+{
+	struct mhi_dev_ring *ring = ch->ring;
+	union mhi_dev_ring_element_type *el;
+	uint32_t td_boundary_reached = 0;
+
+	ch->skip_td = 1;
+	el = &ring->ring_cache[ring->rd_offset];
+	while (ring->rd_offset != ring->wr_offset) {
+		if (td_boundary_reached) {
+			ch->skip_td = 0;
+			break;
+		}
+		if (!el->tre.chain)
+			td_boundary_reached = 1;
+		mhi_dev_ring_inc_index(ring, ring->rd_offset);
+		el = &ring->ring_cache[ring->rd_offset];
+	}
+}
+
+int mhi_dev_write_channel(struct mhi_dev_client *handle_client,
+						void *buf, size_t buf_size)
+{
+	struct mhi_dev_channel *ch;
+	struct mhi_dev_ring *ring;
+	union mhi_dev_ring_element_type *el;
+	enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID;
+	int rc = 0;
+	uint64_t ch_id, skip_tres = 0, write_to_loc;
+	uint32_t read_from_loc;
+	size_t usr_buf_remaining = buf_size;
+	size_t usr_buf_offset = 0;
+	size_t bytes_to_write = 0;
+	size_t bytes_written = 0;
+	uint32_t tre_len = 0, suspend_wait_timeout = 0;
+
+	if (!handle_client) {
+		pr_err("%s: invalid client handle\n", __func__);
+		return -ENXIO;
+	}
+
+	if (!buf) {
+		pr_err("%s: invalid buffer to write data\n", __func__);
+		return -ENXIO;
+	}
+
+	mutex_lock(&mhi_ctx->mhi_write_test);
+
+	if (atomic_read(&mhi_ctx->is_suspended)) {
+		/*
+		 * Expected usage is when there is a write
+		 * to the MHI core -> notify SM.
+		 */
+		rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP);
+		if (rc) {
+			pr_err("error sending core wakeup event\n");
+			mutex_unlock(&mhi_ctx->mhi_write_test);
+			return rc;
+		}
+	}
+
+	atomic_inc(&mhi_ctx->write_active);
+	while (atomic_read(&mhi_ctx->is_suspended) &&
+			suspend_wait_timeout < MHI_SUSPEND_WAIT_TIMEOUT) {
+		/* wait for the suspend to finish */
+		usleep_range(MHI_SUSPEND_WAIT_MIN, MHI_SUSPEND_WAIT_MAX);
+		suspend_wait_timeout++;
+	}
+
+	ch = handle_client->channel;
+	ch->wr_request_active = true;
+
+	ring = ch->ring;
+	ch_id = ch->ch_id;
+
+	mutex_lock(&ch->ch_lock);
+
+	if (ch->state == MHI_DEV_CH_STOPPED) {
+		mhi_log(MHI_MSG_ERROR,
+			"channel (%lld) already stopped\n", ch_id);
+		bytes_written = -1;
+		goto exit;
+	}
+
+	if (ch->state == MHI_DEV_CH_PENDING_STOP) {
+		if (mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx) < 0)
+			bytes_written = -1;
+		goto exit;
+	}
+
+	if (ch->skip_td)
+		skip_to_next_td(ch);
+
+	do {
+		if (ring->rd_offset == ring->wr_offset) {
+			mhi_log(MHI_MSG_INFO, "No TREs available\n");
+			break;
+		}
+
+		el = &ring->ring_cache[ring->rd_offset];
+		tre_len = el->tre.len;
+
+		bytes_to_write = min(usr_buf_remaining, tre_len);
+		usr_buf_offset = buf_size - bytes_to_write;
+		read_from_loc = (uint32_t) buf + usr_buf_offset;
+		write_to_loc = el->tre.data_buf_ptr;
+		mhi_transfer_device_to_host(write_to_loc,
+						(void *) read_from_loc,
+						bytes_to_write, mhi_ctx);
+		bytes_written += bytes_to_write;
+		usr_buf_remaining -= bytes_to_write;
+
+		if (usr_buf_remaining) {
+			if (!el->tre.chain)
+				code = MHI_CMD_COMPL_CODE_OVERFLOW;
+			else if (el->tre.ieob)
+				code = MHI_CMD_COMPL_CODE_EOB;
+		} else {
+			if (el->tre.chain)
+				skip_tres = 1;
+			code = MHI_CMD_COMPL_CODE_EOT;
+		}
+
+		if (mhi_dev_send_completion_event(ch,
+				ring->rd_offset, bytes_to_write, code) < 0) {
+			mhi_log(MHI_MSG_ERROR,
+				"error sending completion event ch_id:%lld\n",
+				ch_id);
+		}
+
+		if (ch->state == MHI_DEV_CH_PENDING_STOP)
+			break;
+
+		mhi_dev_ring_inc_index(ring, ring->rd_offset);
+	} while (!skip_tres && usr_buf_remaining);
+
+	if (skip_tres)
+		skip_to_next_td(ch);
+
+	if (ch->state == MHI_DEV_CH_PENDING_STOP) {
+		rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx);
+		if (rc) {
+			mhi_log(MHI_MSG_ERROR,
+				"channel (%lld) stop failed\n", ch_id);
+		}
+	}
+exit:
+	mutex_unlock(&ch->ch_lock);
+	atomic_dec(&mhi_ctx->write_active);
+	mutex_unlock(&mhi_ctx->mhi_write_test);
+	return bytes_written;
+}
+EXPORT_SYMBOL(mhi_dev_write_channel);
+
+static void mhi_dev_enable(struct work_struct *work)
+{
+	int rc = 0;
+	struct ep_pcie_msi_config msi_cfg;
+	struct mhi_dev *mhi = container_of(work,
+				struct mhi_dev, ring_init_cb_work);
+
+	enum mhi_dev_state state;
+	uint32_t max_cnt = 0;
+
+
+	rc = ipa_dma_init();
+	if (rc) {
+		pr_err("ipa dma init failed\n");
+		return;
+	}
+
+	rc = ipa_dma_enable();
+	if (rc) {
+		pr_err("ipa enable failed\n");
+		return;
+	}
+
+	rc = mhi_dev_ring_init(mhi);
+	if (rc) {
+		pr_err("MHI dev ring init failed\n");
+		return;
+	}
+
+	/* Invoke MHI SM when device is in RESET state */
+	mhi_dev_sm_init(mhi);
+
+	/* set the env before setting the ready bit */
+	rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
+	if (rc) {
+		pr_err("%s: env setting failed\n", __func__);
+		return;
+	}
+	mhi_uci_init();
+
+	/* All set...let's notify the host */
+	mhi_dev_sm_set_ready();
+
+	rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg);
+	if (rc)
+		pr_warn("MHI: error geting msi configs\n");
+
+	rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+	if (rc) {
+		pr_err("%s: get mhi state failed\n", __func__);
+		return;
+	}
+
+	while (state != MHI_DEV_M0_STATE && max_cnt < MHI_DEV_M0_MAX_CNT) {
+		/* Wait for Host to set the M0 state */
+		usleep_range(MHI_M0_WAIT_MIN_USLEEP, MHI_M0_WAIT_MAX_USLEEP);
+		rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+		if (rc) {
+			pr_err("%s: get mhi state failed\n", __func__);
+			return;
+		}
+		max_cnt++;
+	}
+
+	mhi_log(MHI_MSG_INFO, "state:%d\n", state);
+
+	if (state == MHI_DEV_M0_STATE) {
+		rc = mhi_dev_cache_host_cfg(mhi);
+		if (rc) {
+			pr_err("Failed to cache the host config\n");
+			return;
+		}
+
+		rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
+		if (rc) {
+			pr_err("%s: env setting failed\n", __func__);
+			return;
+		}
+	} else {
+		pr_err("MHI device failed to enter M0\n");
+		return;
+	}
+
+	rc = mhi_hwc_init(mhi_ctx);
+	if (rc) {
+		pr_err("error during hwc_init\n");
+		return;
+	}
+}
+
+static void mhi_ring_init_cb(void *data)
+{
+	struct mhi_dev *mhi = data;
+
+	if (!mhi) {
+		pr_err("Invalid MHI ctx\n");
+		return;
+	}
+
+	queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work);
+}
+
+static int get_device_tree_data(struct platform_device *pdev)
+{
+	struct mhi_dev *mhi;
+	int rc = 0;
+	struct resource *res_mem = NULL;
+
+	mhi = devm_kzalloc(&pdev->dev,
+			sizeof(struct mhi_dev), GFP_KERNEL);
+	if (!mhi)
+		return -ENOMEM;
+
+	mhi->pdev = pdev;
+	mhi->dev = &pdev->dev;
+	res_mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "mhi_mmio_base");
+	if (!res_mem) {
+		rc = -EINVAL;
+		pr_err("Request MHI MMIO physical memory region failed\n");
+		return rc;
+	}
+
+	mhi->mmio_base_pa_addr = res_mem->start;
+	mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE);
+	if (!mhi->mmio_base_addr) {
+		pr_err("Failed to IO map MMIO registers.\n");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	res_mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ipa_uc_mbox_crdb");
+	if (!res_mem) {
+		rc = -EINVAL;
+		pr_err("Request IPA_UC_MBOX CRDB physical region failed\n");
+		return rc;
+	}
+
+	mhi->ipa_uc_mbox_crdb = res_mem->start;
+
+	res_mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ipa_uc_mbox_erdb");
+	if (!res_mem) {
+		rc = -EINVAL;
+		pr_err("Request IPA_UC_MBOX ERDB physical region failed\n");
+		return rc;
+	}
+
+	mhi->ipa_uc_mbox_erdb = res_mem->start;
+
+	mhi_ctx = mhi;
+
+	rc = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,mhi-ifc-id",
+				&mhi_ctx->ifc_id);
+
+	if (rc) {
+		pr_err("qcom,mhi-ifc-id does not exist.\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,mhi-ep-msi",
+				&mhi_ctx->mhi_ep_msi_num);
+	if (rc) {
+		pr_err("qcom,mhi-ep-msi does not exist.\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,mhi-version",
+				&mhi_ctx->mhi_version);
+	if (rc) {
+		pr_err("qcom,mhi-version does not exist.\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int mhi_init(struct mhi_dev *mhi)
+{
+	int rc = 0, i = 0;
+	struct platform_device *pdev = mhi->pdev;
+
+
+	rc = mhi_dev_mmio_init(mhi);
+	if (rc) {
+		pr_err("Failed to update the MMIO init\n");
+		return rc;
+	}
+
+
+	mhi->ring = devm_kzalloc(&pdev->dev,
+			(sizeof(struct mhi_dev_ring) *
+			(mhi->cfg.channels + mhi->cfg.event_rings + 1)),
+			GFP_KERNEL);
+	if (!mhi->ring)
+		return -ENOMEM;
+
+	mhi->ch = devm_kzalloc(&pdev->dev,
+			(sizeof(struct mhi_dev_channel) *
+			(mhi->cfg.channels)), GFP_KERNEL);
+	if (!mhi->ch)
+		return -ENOMEM;
+
+	for (i = 0; i < mhi->cfg.channels; i++)
+		mutex_init(&mhi->ch[i].ch_lock);
+
+	mhi->mmio_backup = devm_kzalloc(&pdev->dev, MHI_DEV_MMIO_RANGE,
+								GFP_KERNEL);
+	if (!mhi->mmio_backup)
+		return -ENOMEM;
+
+	mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, "mhi", 0);
+	if (mhi_ipc_log == NULL) {
+		dev_err(&pdev->dev,
+				"Failed to create IPC logging context\n");
+	}
+
+	return 0;
+}
+
+static int mhi_dev_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	if (pdev->dev.of_node) {
+		rc = get_device_tree_data(pdev);
+		if (rc) {
+			pr_err("Error reading MHI Dev DT\n");
+			return rc;
+		}
+	}
+
+	mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
+	if (!mhi_ctx->phandle) {
+		pr_err("PCIe driver is not ready yet.\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (ep_pcie_get_linkstatus(mhi_ctx->phandle) != EP_PCIE_LINK_ENABLED) {
+		pr_err("PCIe link is not ready to use.\n");
+		return -EPROBE_DEFER;
+	}
+
+	INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler);
+
+	mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq",
+							WQ_HIGHPRI, 0);
+	if (!mhi_ctx->pending_ring_wq) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending);
+
+	INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable);
+
+	mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq",
+							WQ_HIGHPRI, 0);
+	if (!mhi_ctx->ring_init_wq) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
+	INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
+	mutex_init(&mhi_ctx->mhi_lock);
+	mutex_init(&mhi_ctx->mhi_event_lock);
+	mutex_init(&mhi_ctx->mhi_write_test);
+
+	rc = mhi_init(mhi_ctx);
+	if (rc)
+		return rc;
+
+	mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev,
+			(TRB_MAX_DATA_SIZE * 4),
+			&mhi_ctx->cache_dma_handle, GFP_KERNEL);
+	if (!mhi_ctx->dma_cache)
+		return -ENOMEM;
+
+	mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev,
+			(TRB_MAX_DATA_SIZE * 4),
+			&mhi_ctx->read_dma_handle,
+			GFP_KERNEL);
+	if (!mhi_ctx->read_handle)
+		return -ENOMEM;
+
+	mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev,
+			(TRB_MAX_DATA_SIZE * 24),
+			&mhi_ctx->write_dma_handle,
+			GFP_KERNEL);
+	if (!mhi_ctx->write_handle)
+		return -ENOMEM;
+
+	rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version);
+	if (rc) {
+		pr_err("Failed to update the MHI version\n");
+		return rc;
+	}
+
+	mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
+		EP_PCIE_EVENT_PM_D3_COLD |
+		EP_PCIE_EVENT_PM_D0 |
+		EP_PCIE_EVENT_PM_RST_DEAST |
+		EP_PCIE_EVENT_MHI_A7 |
+		EP_PCIE_EVENT_LINKDOWN;
+	mhi_ctx->event_reg.user = mhi_ctx;
+	mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
+	mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
+
+	rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
+	if (rc) {
+		pr_err("Failed to register for events from PCIe\n");
+		return rc;
+	}
+
+	pr_err("Registering with IPA\n");
+
+	rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
+	if (rc < 0) {
+		if (rc == -EEXIST) {
+			mhi_ring_init_cb(mhi_ctx);
+		} else {
+			pr_err("Error calling IPA cb with %d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int mhi_dev_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id mhi_dev_match_table[] = {
+	{	.compatible = "qcom,msm-mhi-dev" },
+	{}
+};
+
+static struct platform_driver mhi_dev_driver = {
+	.driver		= {
+		.name	= "qcom,msm-mhi-dev",
+		.of_match_table = mhi_dev_match_table,
+	},
+	.probe		= mhi_dev_probe,
+	.remove		= mhi_dev_remove,
+};
+
+module_param(mhi_msg_lvl, uint, 0644);
+module_param(mhi_ipc_msg_lvl, uint, 0644);
+
+MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl");
+MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl");
+
+static int __init mhi_dev_init(void)
+{
+	return platform_driver_register(&mhi_dev_driver);
+}
+module_init(mhi_dev_init);
+
+static void __exit mhi_dev_exit(void)
+{
+	platform_driver_unregister(&mhi_dev_driver);
+}
+module_exit(mhi_dev_exit);
+
+MODULE_DESCRIPTION("MHI device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
new file mode 100644
index 0000000..1a73d92
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -0,0 +1,1126 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MHI_H
+#define __MHI_H
+
+#include <linux/msm_ep_pcie.h>
+#include <linux/types.h>
+#include <linux/ipc_logging.h>
+#include <linux/dma-mapping.h>
+
+/**
+ * MHI control data structures alloted by the host, including
+ * channel context array, event context array, command context and rings.
+ */
+
+/* Channel context state */
+enum mhi_dev_ch_ctx_state {
+	MHI_DEV_CH_STATE_DISABLED,
+	MHI_DEV_CH_STATE_ENABLED,
+	MHI_DEV_CH_STATE_RUNNING,
+	MHI_DEV_CH_STATE_SUSPENDED,
+	MHI_DEV_CH_STATE_STOP,
+	MHI_DEV_CH_STATE_ERROR,
+	MHI_DEV_CH_STATE_RESERVED,
+	MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
+};
+
+/* Channel type */
+enum mhi_dev_ch_ctx_type {
+	MHI_DEV_CH_TYPE_NONE,
+	MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
+	MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
+	MHI_DEV_CH_RESERVED
+};
+
+/* Channel context type */
+struct mhi_dev_ch_ctx {
+	enum mhi_dev_ch_ctx_state	ch_state;
+	enum mhi_dev_ch_ctx_type	ch_type;
+	uint32_t			err_indx;
+	uint64_t			rbase;
+	uint64_t			rlen;
+	uint64_t			rp;
+	uint64_t			wp;
+} __packed;
+
+enum mhi_dev_ring_element_type_id {
+	MHI_DEV_RING_EL_INVALID = 0,
+	MHI_DEV_RING_EL_NOOP = 1,
+	MHI_DEV_RING_EL_TRANSFER = 2,
+	MHI_DEV_RING_EL_RESET = 16,
+	MHI_DEV_RING_EL_STOP = 17,
+	MHI_DEV_RING_EL_START = 18,
+	MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
+	MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
+	MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
+	MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
+	MHI_DEV_RING_EL_UNDEF
+};
+
+enum mhi_dev_ring_state {
+	RING_STATE_UINT = 0,
+	RING_STATE_IDLE,
+	RING_STATE_PENDING,
+};
+
+enum mhi_dev_ring_type {
+	RING_TYPE_CMD = 0,
+	RING_TYPE_ER,
+	RING_TYPE_CH,
+	RING_TYPE_INVAL
+};
+
+/* Event context interrupt moderation */
+enum mhi_dev_evt_ctx_int_mod_timer {
+	MHI_DEV_EVT_INT_MODERATION_DISABLED
+};
+
+/* Event ring type */
+enum mhi_dev_evt_ctx_event_ring_type {
+	MHI_DEV_EVT_TYPE_DEFAULT,
+	MHI_DEV_EVT_TYPE_VALID,
+	MHI_DEV_EVT_RESERVED
+};
+
+/* Event ring context type */
+struct mhi_dev_ev_ctx {
+	uint32_t				res1:16;
+	enum mhi_dev_evt_ctx_int_mod_timer	intmodt:16;
+	enum mhi_dev_evt_ctx_event_ring_type	ertype;
+	uint32_t				msivec;
+	uint64_t				rbase;
+	uint64_t				rlen;
+	uint64_t				rp;
+	uint64_t				wp;
+} __packed;
+
+/* Command context */
+struct mhi_dev_cmd_ctx {
+	uint32_t				res1;
+	uint32_t				res2;
+	uint32_t				res3;
+	uint64_t				rbase;
+	uint64_t				rlen;
+	uint64_t				rp;
+	uint64_t				wp;
+} __packed;
+
+/* generic context */
+struct mhi_dev_gen_ctx {
+	uint32_t				res1;
+	uint32_t				res2;
+	uint32_t				res3;
+	uint64_t				rbase;
+	uint64_t				rlen;
+	uint64_t				rp;
+	uint64_t				wp;
+} __packed;
+
+/* Transfer ring element */
+struct mhi_dev_transfer_ring_element {
+	uint64_t				data_buf_ptr;
+	uint32_t				len:16;
+	uint32_t				res1:16;
+	uint32_t				chain:1;
+	uint32_t				res2:7;
+	uint32_t				ieob:1;
+	uint32_t				ieot:1;
+	uint32_t				bei:1;
+	uint32_t				res3:5;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				res4:8;
+} __packed;
+
+/* Command ring element */
+/* Command ring No op command */
+struct mhi_dev_cmd_ring_op {
+	uint64_t				res1;
+	uint32_t				res2;
+	uint32_t				res3:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				chid:8;
+} __packed;
+
+/* Command ring reset channel command */
+struct mhi_dev_cmd_ring_reset_channel_cmd {
+	uint64_t				res1;
+	uint32_t				res2;
+	uint32_t				res3:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				chid:8;
+} __packed;
+
+/* Command ring stop channel command */
+struct mhi_dev_cmd_ring_stop_channel_cmd {
+	uint64_t				res1;
+	uint32_t				res2;
+	uint32_t				res3:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				chid:8;
+} __packed;
+
+/* Command ring start channel command */
+struct mhi_dev_cmd_ring_start_channel_cmd {
+	uint64_t				res1;
+	uint32_t				seqnum;
+	uint32_t				reliable:1;
+	uint32_t				res2:15;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				chid:8;
+} __packed;
+
+enum mhi_dev_cmd_completion_code {
+	MHI_CMD_COMPL_CODE_INVALID = 0,
+	MHI_CMD_COMPL_CODE_SUCCESS = 1,
+	MHI_CMD_COMPL_CODE_EOT = 2,
+	MHI_CMD_COMPL_CODE_OVERFLOW = 3,
+	MHI_CMD_COMPL_CODE_EOB = 4,
+	MHI_CMD_COMPL_CODE_UNDEFINED = 16,
+	MHI_CMD_COMPL_CODE_RING_EL = 17,
+	MHI_CMD_COMPL_CODE_RES
+};
+
+/* Event ring elements */
+/* Transfer completion event */
+struct mhi_dev_event_ring_transfer_completion {
+	uint64_t				ptr;
+	uint32_t				len:16;
+	uint32_t				res1:8;
+	enum mhi_dev_cmd_completion_code	code:8;
+	uint32_t				res2:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				chid:8;
+} __packed;
+
+/* Command completion event */
+struct mhi_dev_event_ring_cmd_completion {
+	uint64_t				ptr;
+	uint32_t				res1:24;
+	enum mhi_dev_cmd_completion_code	code:8;
+	uint32_t				res2:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				res3:8;
+} __packed;
+
+enum mhi_dev_state {
+	MHI_DEV_RESET_STATE = 0,
+	MHI_DEV_READY_STATE,
+	MHI_DEV_M0_STATE,
+	MHI_DEV_M1_STATE,
+	MHI_DEV_M2_STATE,
+	MHI_DEV_M3_STATE,
+	MHI_DEV_MAX_STATE,
+	MHI_DEV_SYSERR_STATE = 0xff
+};
+
+/* MHI state change event */
+struct mhi_dev_event_ring_state_change {
+	uint64_t				ptr;
+	uint32_t				res1:24;
+	enum mhi_dev_state			mhistate:8;
+	uint32_t				res2:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				res3:8;
+} __packed;
+
+enum mhi_dev_execenv {
+	MHI_DEV_SBL_EE = 1,
+	MHI_DEV_AMSS_EE = 2,
+	MHI_DEV_UNRESERVED
+};
+
+/* EE state change event */
+struct mhi_dev_event_ring_ee_state_change {
+	uint64_t				ptr;
+	uint32_t				res1:24;
+	enum mhi_dev_execenv			execenv:8;
+	uint32_t				res2:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				res3:8;
+} __packed;
+
+/* Generic cmd to parse common details like type and channel id */
+struct mhi_dev_ring_generic {
+	uint64_t				ptr;
+	uint32_t				res1:24;
+	enum mhi_dev_state			mhistate:8;
+	uint32_t				res2:16;
+	enum mhi_dev_ring_element_type_id	type:8;
+	uint32_t				chid:8;
+} __packed;
+
+struct mhi_config {
+	uint32_t	mhi_reg_len;
+	uint32_t	version;
+	uint32_t	event_rings;
+	uint32_t	channels;
+	uint32_t	chdb_offset;
+	uint32_t	erdb_offset;
+};
+
+#define NUM_CHANNELS			128
+#define HW_CHANNEL_BASE			100
+#define HW_CHANNEL_END			107
+#define MHI_ENV_VALUE			2
+#define MHI_MASK_ROWS_CH_EV_DB		4
+#define TRB_MAX_DATA_SIZE		4096
+
+/* Possible ring element types */
+union mhi_dev_ring_element_type {
+	struct mhi_dev_cmd_ring_op			cmd_no_op;
+	struct mhi_dev_cmd_ring_reset_channel_cmd	cmd_reset;
+	struct mhi_dev_cmd_ring_stop_channel_cmd	cmd_stop;
+	struct mhi_dev_cmd_ring_start_channel_cmd	cmd_start;
+	struct mhi_dev_transfer_ring_element		tre;
+	struct mhi_dev_event_ring_transfer_completion	evt_tr_comp;
+	struct mhi_dev_event_ring_cmd_completion	evt_cmd_comp;
+	struct mhi_dev_event_ring_state_change		evt_state_change;
+	struct mhi_dev_event_ring_ee_state_change	evt_ee_state;
+	struct mhi_dev_ring_generic			generic;
+};
+
+/* Transfer ring element type */
+union mhi_dev_ring_ctx {
+	struct mhi_dev_cmd_ctx		cmd;
+	struct mhi_dev_ev_ctx		ev;
+	struct mhi_dev_ch_ctx		ch;
+	struct mhi_dev_gen_ctx		generic;
+};
+
+/* MHI host Control and data address region */
+struct mhi_host_addr {
+	uint32_t	ctrl_base_lsb;
+	uint32_t	ctrl_base_msb;
+	uint32_t	ctrl_limit_lsb;
+	uint32_t	ctrl_limit_msb;
+	uint32_t	data_base_lsb;
+	uint32_t	data_base_msb;
+	uint32_t	data_limit_lsb;
+	uint32_t	data_limit_msb;
+};
+
+/* MHI physical and virtual address region */
+struct mhi_meminfo {
+	struct device	*dev;
+	uintptr_t	pa_aligned;
+	uintptr_t	pa_unaligned;
+	uintptr_t	va_aligned;
+	uintptr_t	va_unaligned;
+	uintptr_t	size;
+};
+
+struct mhi_addr {
+	uint64_t	host_pa;
+	uintptr_t	device_pa;
+	uintptr_t	device_va;
+	uint32_t	size;
+};
+
+struct mhi_interrupt_state {
+	uint32_t	mask;
+	uint32_t	status;
+};
+
+enum mhi_dev_channel_state {
+	MHI_DEV_CH_UNINT,
+	MHI_DEV_CH_STARTED,
+	MHI_DEV_CH_PENDING_START,
+	MHI_DEV_CH_PENDING_STOP,
+	MHI_DEV_CH_STOPPED,
+	MHI_DEV_CH_CLOSED,
+};
+
+enum mhi_dev_ch_operation {
+	MHI_DEV_OPEN_CH,
+	MHI_DEV_CLOSE_CH,
+	MHI_DEV_READ_CH,
+	MHI_DEV_READ_WR,
+	MHI_DEV_POLL,
+};
+
+struct mhi_dev_channel;
+
+struct mhi_dev_ring {
+	struct list_head			list;
+	struct mhi_dev				*mhi_dev;
+
+	uint32_t				id;
+	uint32_t				rd_offset;
+	uint32_t				wr_offset;
+	uint32_t				ring_size;
+
+	enum mhi_dev_ring_type			type;
+	enum mhi_dev_ring_state			state;
+
+	/* device virtual address location of the cached host ring ctx data */
+	union mhi_dev_ring_element_type		*ring_cache;
+	/* Physical address of the cached ring copy on the device side */
+	dma_addr_t				ring_cache_dma_handle;
+	/* Physical address of the host where we will write/read to/from */
+	struct mhi_addr				ring_shadow;
+	/* Ring type - cmd, event, transfer ring and its rp/wp... */
+	union mhi_dev_ring_ctx			*ring_ctx;
+	/* ring_ctx_shadow -> tracking ring_ctx in the host */
+	union mhi_dev_ring_ctx			*ring_ctx_shadow;
+	void (*ring_cb)(struct mhi_dev *dev,
+			union mhi_dev_ring_element_type *el,
+			void *ctx);
+};
+
+static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring,
+						uint32_t rd_offset)
+{
+	ring->rd_offset++;
+	if (ring->rd_offset == ring->ring_size)
+		ring->rd_offset = 0;
+}
+
+/* trace information planned to use for read/write */
+#define TRACE_DATA_MAX				128
+#define MHI_DEV_DATA_MAX			512
+
+#define MHI_DEV_MMIO_RANGE			0xc80
+
+enum cb_reason {
+	MHI_DEV_TRE_AVAILABLE = 0,
+};
+
+struct mhi_dev_client_cb_reason {
+	uint32_t		ch_id;
+	enum cb_reason		reason;
+};
+
+struct mhi_dev_client {
+	struct list_head		list;
+	struct mhi_dev_channel		*channel;
+	void (*event_trigger)(struct mhi_dev_client_cb_reason *cb);
+
+	/* mhi_dev calls are fully synchronous -- only one call may be
+	 * active per client at a time for now.
+	 */
+	struct mutex			write_lock;
+	wait_queue_head_t		wait;
+
+	/* trace logs */
+	spinlock_t			tr_lock;
+	unsigned int			tr_head;
+	unsigned int			tr_tail;
+	struct mhi_dev_trace		*tr_log;
+
+	/* client buffers */
+	struct mhi_dev_iov		*iov;
+	uint32_t			nr_iov;
+};
+
+struct mhi_dev_channel {
+	struct list_head		list;
+	struct list_head		clients;
+	/* synchronization for changing channel state,
+	 * adding/removing clients, mhi_dev callbacks, etc
+	 */
+	spinlock_t			lock;
+
+	struct mhi_dev_ring		*ring;
+
+	enum mhi_dev_channel_state	state;
+	uint32_t			ch_id;
+	enum mhi_dev_ch_ctx_type	ch_type;
+	struct mutex			ch_lock;
+	/* client which the current inbound/outbound message is for */
+	struct mhi_dev_client		*active_client;
+
+	/* current TRE being processed */
+	uint64_t			tre_loc;
+	/* current TRE size */
+	uint32_t			tre_size;
+	/* tre bytes left to read/write */
+	uint32_t			tre_bytes_left;
+	/* td size being read/written from/to so far */
+	uint32_t			td_size;
+	bool				wr_request_active;
+	bool				skip_td;
+};
+
+/* Structure device for mhi dev */
+struct mhi_dev {
+	struct platform_device		*pdev;
+	struct device			*dev;
+	/* MHI MMIO related members */
+	phys_addr_t			mmio_base_pa_addr;
+	void				*mmio_base_addr;
+	phys_addr_t			ipa_uc_mbox_crdb;
+	phys_addr_t			ipa_uc_mbox_erdb;
+
+	uint32_t			*mmio_backup;
+	struct mhi_config		cfg;
+	bool				mmio_initialized;
+
+	/* Host control base information */
+	struct mhi_host_addr		host_addr;
+	struct mhi_addr			ctrl_base;
+	struct mhi_addr			data_base;
+	struct mhi_addr			ch_ctx_shadow;
+	struct mhi_dev_ch_ctx		*ch_ctx_cache;
+	dma_addr_t			ch_ctx_cache_dma_handle;
+	struct mhi_addr			ev_ctx_shadow;
+	struct mhi_dev_ch_ctx		*ev_ctx_cache;
+	dma_addr_t			ev_ctx_cache_dma_handle;
+
+	struct mhi_addr			cmd_ctx_shadow;
+	struct mhi_dev_ch_ctx		*cmd_ctx_cache;
+	dma_addr_t			cmd_ctx_cache_dma_handle;
+	struct mhi_dev_ring		*ring;
+	struct mhi_dev_channel		*ch;
+
+	int				ctrl_int;
+	int				cmd_int;
+	/* CHDB and EVDB device interrupt state */
+	struct mhi_interrupt_state	chdb[4];
+	struct mhi_interrupt_state	evdb[4];
+
+	/* Scheduler work */
+	struct work_struct		chdb_ctrl_work;
+	struct mutex			mhi_lock;
+	struct mutex			mhi_event_lock;
+
+	/* process a ring element */
+	struct workqueue_struct		*pending_ring_wq;
+	struct work_struct		pending_work;
+
+	struct list_head		event_ring_list;
+	struct list_head		process_ring_list;
+
+	uint32_t			cmd_ring_idx;
+	uint32_t			ev_ring_start;
+	uint32_t			ch_ring_start;
+
+	/* IPA Handles */
+	u32				ipa_clnt_hndl[4];
+	struct workqueue_struct		*ring_init_wq;
+	struct work_struct		ring_init_cb_work;
+
+	/* EP PCIe registration */
+	struct ep_pcie_register_event	event_reg;
+	u32                             ifc_id;
+	struct ep_pcie_hw               *phandle;
+
+	atomic_t			write_active;
+	atomic_t			is_suspended;
+	struct mutex			mhi_write_test;
+	u32				mhi_ep_msi_num;
+	u32				mhi_version;
+	void				*dma_cache;
+	void				*read_handle;
+	void				*write_handle;
+	/* Physical scratch buffer for writing control data to the host */
+	dma_addr_t			cache_dma_handle;
+	/*
+	 * Physical scratch buffer address used when picking host data
+	 * from the host used in mhi_read()
+	 */
+	dma_addr_t			read_dma_handle;
+	/*
+	 * Physical scratch buffer address used when writing to the host
+	 * region from device used in mhi_write()
+	 */
+	dma_addr_t			write_dma_handle;
+};
+
+enum mhi_msg_level {
+	MHI_MSG_VERBOSE = 0x0,
+	MHI_MSG_INFO = 0x1,
+	MHI_MSG_DBG = 0x2,
+	MHI_MSG_WARNING = 0x3,
+	MHI_MSG_ERROR = 0x4,
+	MHI_MSG_CRITICAL = 0x5,
+	MHI_MSG_reserved = 0x80000000
+};
+
+extern enum mhi_msg_level mhi_msg_lvl;
+extern enum mhi_msg_level mhi_ipc_msg_lvl;
+extern void *mhi_ipc_log;
+
+#define mhi_log(_msg_lvl, _msg, ...) do { \
+	if (_msg_lvl >= mhi_msg_lvl) { \
+		pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
+	} \
+	if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \
+		ipc_log_string(mhi_ipc_log,                     \
+			"[%s] " _msg, __func__, ##__VA_ARGS__);     \
+	} \
+} while (0)
+
+/* SW channel client list */
+enum mhi_client_channel {
+	MHI_CLIENT_LOOPBACK_OUT = 0,
+	MHI_CLIENT_LOOPBACK_IN = 1,
+	MHI_CLIENT_SAHARA_OUT = 2,
+	MHI_CLIENT_SAHARA_IN = 3,
+	MHI_CLIENT_DIAG_OUT = 4,
+	MHI_CLIENT_DIAG_IN = 5,
+	MHI_CLIENT_SSR_OUT = 6,
+	MHI_CLIENT_SSR_IN = 7,
+	MHI_CLIENT_QDSS_OUT = 8,
+	MHI_CLIENT_QDSS_IN = 9,
+	MHI_CLIENT_EFS_OUT = 10,
+	MHI_CLIENT_EFS_IN = 11,
+	MHI_CLIENT_MBIM_OUT = 12,
+	MHI_CLIENT_MBIM_IN = 13,
+	MHI_CLIENT_QMI_OUT = 14,
+	MHI_CLIENT_QMI_IN = 15,
+	MHI_CLIENT_IP_CTRL_0_OUT = 16,
+	MHI_CLIENT_IP_CTRL_0_IN = 17,
+	MHI_CLIENT_IP_CTRL_1_OUT = 18,
+	MHI_CLIENT_IP_CTRL_1_IN = 19,
+	MHI_CLIENT_DCI_OUT = 20,
+	MHI_CLIENT_DCI_IN = 21,
+	MHI_CLIENT_IP_CTRL_3_OUT = 22,
+	MHI_CLIENT_IP_CTRL_3_IN = 23,
+	MHI_CLIENT_IP_CTRL_4_OUT = 24,
+	MHI_CLIENT_IP_CTRL_4_IN = 25,
+	MHI_CLIENT_IP_CTRL_5_OUT = 26,
+	MHI_CLIENT_IP_CTRL_5_IN = 27,
+	MHI_CLIENT_IP_CTRL_6_OUT = 28,
+	MHI_CLIENT_IP_CTRL_6_IN = 29,
+	MHI_CLIENT_IP_CTRL_7_OUT = 30,
+	MHI_CLIENT_IP_CTRL_7_IN = 31,
+	MHI_CLIENT_DUN_OUT = 32,
+	MHI_CLIENT_DUN_IN = 33,
+	MHI_CLIENT_IP_SW_0_OUT = 34,
+	MHI_CLIENT_IP_SW_0_IN = 35,
+	MHI_CLIENT_IP_SW_1_OUT = 36,
+	MHI_CLIENT_IP_SW_1_IN = 37,
+	MHI_CLIENT_IP_SW_2_OUT = 38,
+	MHI_CLIENT_IP_SW_2_IN = 39,
+	MHI_CLIENT_IP_SW_3_OUT = 40,
+	MHI_CLIENT_IP_SW_3_IN = 41,
+	MHI_CLIENT_CSVT_OUT = 42,
+	MHI_CLIENT_CSVT_IN = 43,
+	MHI_CLIENT_SMCT_OUT = 44,
+	MHI_CLIENT_SMCT_IN = 45,
+	MHI_MAX_SOFTWARE_CHANNELS = 46,
+	MHI_CLIENT_TEST_OUT = 60,
+	MHI_CLIENT_TEST_IN = 61,
+	MHI_CLIENT_RESERVED_1_LOWER = 62,
+	MHI_CLIENT_RESERVED_1_UPPER = 99,
+	MHI_CLIENT_IP_HW_0_OUT = 100,
+	MHI_CLIENT_IP_HW_0_IN = 101,
+	MHI_CLIENT_RESERVED_2_LOWER = 102,
+	MHI_CLIENT_RESERVED_2_UPPER = 127,
+	MHI_MAX_CHANNELS = 102,
+};
+
+struct mhi_dev_iov {
+	void		*addr;
+	uint32_t	buf_size;
+};
+
+/**
+ * mhi_dev_open_channel() - Channel open for a given client done prior
+ *		to read/write.
+ * @chan_id:	Software Channel ID for the assigned client.
+ * @handle_client: Structure device for client handle.
+ * @notifier: Client issued callback notification.
+ */
+int mhi_dev_open_channel(uint32_t chan_id,
+		struct mhi_dev_client **handle_client,
+		void (*event_trigger)(struct mhi_dev_client_cb_reason *cb));
+/**
+ * mhi_dev_close_channel() - Channel close for a given client.
+ */
+int mhi_dev_close_channel(struct mhi_dev_client *handle_client);
+
+/**
+ * mhi_dev_read_channel() - Channel read for a given client
+ * @handle_client:	Client Handle issued during mhi_dev_open_channel
+ * @buf: Pointer to the buffer used by the MHI core to copy the data received
+ *	 from the Host.
+ * @buf_size: Size of the buffer pointer.
+ * @chain : Indicate if the received data is part of chained packet.
+ */
+int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
+				void *buf, uint32_t buf_size, uint32_t *chain);
+
+/**
+ * mhi_dev_write_channel() - Channel write for a given software client.
+ * @handle_client:	Client Handle issued during mhi_dev_open_channel
+ * @buf: Pointer to the buffer used by the MHI core to copy the data from the
+ *	 device to the host.
+ * @buf_size: Size of the buffer pointer.
+ */
+int mhi_dev_write_channel(struct mhi_dev_client *handle_client, void *buf,
+							uint32_t buf_size);
+
+/**
+ * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process.
+ * @handle_client:	Client Handle issued during mhi_dev_open_channel
+ */
+int mhi_dev_channel_isempty(struct mhi_dev_client *handle);
+
+struct mhi_dev_trace {
+	unsigned int timestamp;
+	uint32_t data[TRACE_DATA_MAX];
+};
+
+/* MHI Ring related functions */
+
+/**
+ * mhi_ring_init() - Initializes the Ring id to the default un-initialized
+ *		state. Once a start command is received, the respective ring
+ *		is then prepared by fetching the context and updating the
+ *		offset.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ * @type:	Command/Event or Channel transfer ring.
+ * @id:		Index to the ring id. For command its usually 1, Event rings
+ *		may vary from 1 to 128. Channels vary from 1 to 256.
+ */
+void mhi_ring_init(struct mhi_dev_ring *ring,
+			enum mhi_dev_ring_type type, int id);
+
+/**
+ * mhi_ring_start() - Fetches the respective transfer ring's context from
+ *		the host and updates the write offset.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ * @ctx:	Transfer ring of type mhi_dev_ring_ctx.
+ * @dev:	MHI device structure.
+ */
+int mhi_ring_start(struct mhi_dev_ring *ring,
+			union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ * @wr_offset:	Cache the TRE's upto the write offset value.
+ */
+int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset);
+
+/**
+ * mhi_dev_update_wr_offset() - Check for any updates in the write offset.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring);
+
+/**
+ * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements
+ *			    and invoke the clients callback.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_dev_process_ring(struct mhi_dev_ring *ring);
+
+/**
+ * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the
+ *			    clients callback.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ * @offset:	Offset index into the respective ring's cache element.
+ */
+int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset);
+
+/**
+ * mhi_dev_add_element() - Copy the element to the respective transfer rings
+ *			read pointer and increment the index.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ * @element:	Transfer ring element to be copied to the host memory.
+ */
+int mhi_dev_add_element(struct mhi_dev_ring *ring,
+				union mhi_dev_ring_element_type *element);
+
+/**
+ * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
+ *		from device to the host.
+ * @dst_pa:	Physical destination address.
+ * @src:	Source virtual address.
+ * @len:	Numer of bytes to be transferred.
+ * @mhi:	MHI dev structure.
+ */
+int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
+				struct mhi_dev *mhi);
+
+/**
+ * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
+ *		from host to the device.
+ * @dst:	Physical destination virtual address.
+ * @src_pa:	Source physical address.
+ * @len:	Numer of bytes to be transferred.
+ * @mhi:	MHI dev structure.
+ */
+int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
+				struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_write_to_host() - memcpy equivalent API to transfer data
+ *		from device to host.
+ * @host:	Host and device address details.
+ * @buf:	Data buffer that needs to be written to the host.
+ * @size:	Data buffer size.
+ */
+void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size,
+				struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
+ *		from host to device.
+ * @host:	Host and device address details.
+ * @buf:	Data buffer that needs to be read from the host.
+ * @size:	Data buffer size.
+ */
+void mhi_dev_read_from_host(struct mhi_addr *dst, dma_addr_t buf, size_t size);
+
+/**
+ * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
+ *		from host to device.
+ * @host:	Host and device address details.
+ * @buf:	Data buffer that needs to be read from the host.
+ * @size:	Data buffer size.
+ */
+void mhi_ring_set_cb(struct mhi_dev_ring *ring,
+			void (*ring_cb)(struct mhi_dev *dev,
+			union mhi_dev_ring_element_type *el, void *ctx));
+
+/**
+ * mhi_ring_set_state() - Sets internal state of the ring for tracking whether
+ *		a ring is being processed, idle or uninitialized.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ * @state:	state of type mhi_dev_ring_state.
+ */
+void mhi_ring_set_state(struct mhi_dev_ring *ring,
+			enum mhi_dev_ring_state state);
+
+/**
+ * mhi_ring_get_state() - Obtains the internal state of the ring.
+ * @ring:	Ring for the respective context - Channel/Event/Command.
+ */
+enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring);
+
+/* MMIO related functions */
+
+/**
+ * mhi_dev_mmio_read() - Generic MHI MMIO register read API.
+ * @dev:	MHI device structure.
+ * @offset:	MHI address offset from base.
+ * @reg_val:	Pointer the register value is stored to.
+ */
+int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
+			uint32_t *reg_value);
+
+/**
+ * mhi_dev_mmio_read() - Generic MHI MMIO register write API.
+ * @dev:	MHI device structure.
+ * @offset:	MHI address offset from base.
+ * @val:	Value to be written to the register offset.
+ */
+int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
+				uint32_t val);
+
+/**
+ * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API.
+ * @dev:	MHI device structure.
+ * @offset:	MHI address offset from base.
+ * @mask:	Register field mask.
+ * @shift:	Register field mask shift value.
+ * @val:	Value to be written to the register offset.
+ */
+int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
+						uint32_t mask, uint32_t shift,
+						uint32_t val);
+/**
+ * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API.
+ * @dev:	MHI device structure.
+ * @offset:	MHI address offset from base.
+ * @mask:	Register field mask.
+ * @shift:	Register field mask shift value.
+ * @reg_val:	Pointer the register value is stored to.
+ */
+int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
+						uint32_t mask, uint32_t shift,
+						uint32_t *reg_val);
+/**
+ * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given
+ *		channel id.
+ * @dev:	MHI device structure.
+ * @chdb_id:	Channel id number.
+ */
+int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
+/**
+ * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given
+ *		channel id.
+ * @dev:	MHI device structure.
+ * @chdb_id:	Channel id number.
+ */
+int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
+
+/**
+ * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given
+ *		event ring id.
+ * @dev:	MHI device structure.
+ * @erdb_id:	Event ring id number.
+ */
+int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
+
+/**
+ * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given
+ *		event ring id.
+ * @dev:	MHI device structure.
+ * @erdb_id:	Event ring id number.
+ */
+int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
+
+/**
+ * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell
+ *		interrupts.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell
+ *		interrupts.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell
+ *		interrupts.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell
+ *		interrupts.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell
+ *		interrupts.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell
+ *		interrupts.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address.
+ @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_crc_base() - Fetch the Command ring context base address.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID.
+ * @dev:	MHI device structure.
+ * @wr_offset:	Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID.
+ * @dev:	MHI device structure.
+ * @wr_offset:	Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID.
+ * @dev:	MHI device structure.
+ * @wr_offset:	Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_mmio_set_env() - Write the Execution Enviornment.
+ * @dev:	MHI device structure.
+ * @value:	Value of the EXEC EVN.
+ */
+int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value);
+
+/**
+ * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_reset(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_get_mhi_addr(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3.
+ * @dev:	MHI device structure.
+ * @state:	Pointer of type mhi_dev_state
+ */
+int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state);
+
+/**
+ * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event
+ *		rings, support number of channels, and offsets to the Channel
+ *		and Event doorbell from the host.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_mmio_init(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by
+ *		the host.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_update_ner(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_restore_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_backup_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_dump_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation
+ *		unit between device and host to map the Data and Control
+ *		information.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_send_state_change_event() - Send state change event to the host
+ *		such as M0/M1/M2/M3.
+ * @dev:	MHI device structure.
+ * @state:	MHI state of type mhi_dev_state
+ */
+int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
+					enum mhi_dev_state state);
+/**
+ * mhi_dev_send_ee_event() - Send Execution enviornment state change
+ *		event to the host.
+ * @dev:	MHI device structure.
+ * @state:	MHI state of type mhi_dev_execenv
+ */
+int mhi_dev_send_ee_event(struct mhi_dev *mhi,
+					enum mhi_dev_execenv exec_env);
+/**
+ * mhi_dev_syserr() - System error when unexpected events are received.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_syserr(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_suspend() - MHI device suspend to stop channel processing at the
+ *		Transfer ring boundary, update the channel state to suspended.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_suspend(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_resume() - MHI device resume to update the channel state to running.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_resume(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW
+ *		accelerated data to be send and prevent MHI suspend.
+ * @dev:	MHI device structure.
+ */
+int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi);
+
+/**
+ * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel
+ *		context with IPA when performing a MHI resume.
+ * @dev:	MHI device structure.
+ */
+int mhi_pcie_config_db_routing(struct mhi_dev *mhi);
+
+/**
+ * mhi_uci_init() - Initializes the User control interface (UCI) which
+ *		exposes device nodes for the supported MHI software
+ *		channels.
+ */
+int mhi_uci_init(void);
+
+void mhi_dev_notify_a7_event(struct mhi_dev *mhi);
+
+#endif /* _MHI_H_ */
diff --git a/drivers/platform/msm/mhi_dev/mhi_hwio.h b/drivers/platform/msm/mhi_dev/mhi_hwio.h
new file mode 100644
index 0000000..197713b
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_hwio.h
@@ -0,0 +1,191 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MHI_HWIO_
+#define _MHI_HWIO_
+
+/* MHI register definition */
+#define MHI_CTRL_INT_STATUS_A7				(0x0004)
+#define MHI_CTRL_INT_STATUS_A7_STATUS_MASK		0xffffffff
+#define MHI_CTRL_INT_STATUS_A7_STATUS_SHIFT		0x0
+
+#define MHI_CHDB_INT_STATUS_A7_n(n)			(0x0028 + 0x4 * (n))
+#define MHI_CHDB_INT_STATUS_A7_n_STATUS_MASK		0xffffffff
+#define MHI_CHDB_INT_STATUS_A7_n_STATUS_SHIFT		0x0
+
+#define MHI_ERDB_INT_STATUS_A7_n(n)			(0x0038 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_A7_n_STATUS_MASK		0xffffffff
+#define MHI_ERDB_INT_STATUS_A7_n_STATUS_SHIFT		0x0
+
+#define MHI_CTRL_INT_CLEAR_A7				(0x004C)
+#define MHI_CTRL_INT_CLEAR_A7_CLEAR_MASK		0xffffffff
+#define MHI_CTRL_INT_CLEAR_A7_CLEAR_SHIFT		0x0
+#define MHI_CTRL_INT_CRDB_CLEAR				BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR			BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_A7_n(n)			(0x0070 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK		0xffffffff
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_SHIFT		0x0
+
+#define MHI_ERDB_INT_CLEAR_A7_n(n)			(0x0080 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK		0xffffffff
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_SHIFT		0x0
+
+#define MHI_CTRL_INT_MASK_A7				(0x0094)
+#define MHI_CTRL_INT_MASK_A7_MASK_MASK			0x3
+#define MHI_CTRL_INT_MASK_A7_MASK_SHIFT			0x0
+#define MHI_CTRL_MHICTRL_MASK				BIT(0)
+#define MHI_CTRL_MHICTRL_SHFT				0
+#define MHI_CTRL_CRDB_MASK				BIT(1)
+#define MHI_CTRL_CRDB_SHFT				1
+
+#define MHI_CHDB_INT_MASK_A7_n(n)			(0x00B8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_A7_n_MASK_MASK		0xffffffff
+#define MHI_CHDB_INT_MASK_A7_n_MASK_SHIFT		0x0
+
+#define MHI_ERDB_INT_MASK_A7_n(n)			(0x00C8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_A7_n_MASK_MASK		0xffffffff
+#define MHI_ERDB_INT_MASK_A7_n_MASK_SHIFT		0x0
+
+#define MHIREGLEN					(0x0100)
+#define MHIREGLEN_MHIREGLEN_MASK			0xffffffff
+#define MHIREGLEN_MHIREGLEN_SHIFT			0x0
+
+#define MHIVER						(0x0108)
+#define MHIVER_MHIVER_MASK				0xffffffff
+#define MHIVER_MHIVER_SHIFT				0x0
+
+#define MHICFG						(0x0110)
+#define MHICFG_RESERVED_BITS31_24_MASK			0xff000000
+#define MHICFG_RESERVED_BITS31_24_SHIFT			0x18
+#define MHICFG_NER_MASK					0xff0000
+#define MHICFG_NER_SHIFT				0x10
+#define MHICFG_RESERVED_BITS15_8_MASK			0xff00
+#define MHICFG_RESERVED_BITS15_8_SHIFT			0x8
+#define MHICFG_NCH_MASK					0xff
+#define MHICFG_NCH_SHIFT				0x0
+
+#define CHDBOFF						(0x0118)
+#define CHDBOFF_CHDBOFF_MASK				0xffffffff
+#define CHDBOFF_CHDBOFF_SHIFT				0x0
+
+#define ERDBOFF						(0x0120)
+#define ERDBOFF_ERDBOFF_MASK				0xffffffff
+#define ERDBOFF_ERDBOFF_SHIFT				0x0
+
+#define BHIOFF						(0x0128)
+#define BHIOFF_BHIOFF_MASK				0xffffffff
+#define BHIOFF_BHIOFF_SHIFT				0x0
+
+#define DEBUGOFF					(0x0130)
+#define DEBUGOFF_DEBUGOFF_MASK				0xffffffff
+#define DEBUGOFF_DEBUGOFF_SHIFT				0x0
+
+#define MHICTRL						(0x0138)
+#define MHICTRL_MHISTATE_MASK				0x0000FF00
+#define MHICTRL_MHISTATE_SHIFT				0x8
+#define MHICTRL_RESET_MASK				0x2
+#define MHICTRL_RESET_SHIFT				0x1
+
+#define MHISTATUS					(0x0148)
+#define MHISTATUS_MHISTATE_MASK				0x0000ff00
+#define MHISTATUS_MHISTATE_SHIFT			0x8
+#define MHISTATUS_SYSERR_MASK				0x4
+#define MHISTATUS_SYSERR_SHIFT				0x2
+#define MHISTATUS_READY_MASK				0x1
+#define MHISTATUS_READY_SHIFT				0x0
+
+#define CCABAP_LOWER					(0x0158)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK			0xffffffff
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT			0x0
+
+#define CCABAP_HIGHER					(0x015C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK		0xffffffff
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT		0x0
+
+#define ECABAP_LOWER					(0x0160)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK			0xffffffff
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT			0x0
+
+#define ECABAP_HIGHER					(0x0164)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK		0xffffffff
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT		0x0
+
+#define CRCBAP_LOWER					(0x0168)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK			0xffffffff
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT			0x0
+
+#define CRCBAP_HIGHER					(0x016C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK		0xffffffff
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT		0x0
+
+#define CRDB_LOWER					(0x0170)
+#define CRDB_LOWER_CRDB_LOWER_MASK			0xffffffff
+#define CRDB_LOWER_CRDB_LOWER_SHIFT			0x0
+
+#define CRDB_HIGHER					(0x0174)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK			0xffffffff
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT			0x0
+
+#define MHICTRLBASE_LOWER				(0x0180)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK	0xffffffff
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT	0x0
+
+#define MHICTRLBASE_HIGHER				(0x0184)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK	0xffffffff
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT	0x0
+
+#define MHICTRLLIMIT_LOWER				(0x0188)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK	0xffffffff
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT	0x0
+
+#define MHICTRLLIMIT_HIGHER				(0x018C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK	0xffffffff
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT	0x0
+
+#define MHIDATABASE_LOWER				(0x0198)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK	0xffffffff
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT	0x0
+
+#define MHIDATABASE_HIGHER				(0x019C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK	0xffffffff
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT	0x0
+
+#define MHIDATALIMIT_LOWER				(0x01A0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK	0xffffffff
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT	0x0
+
+#define MHIDATALIMIT_HIGHER				(0x01A4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK	0xffffffff
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT	0x0
+
+#define CHDB_LOWER_n(n)					(0x0400 + 0x8 * (n))
+#define CHDB_LOWER_n_CHDB_LOWER_MASK			0xffffffff
+#define CHDB_LOWER_n_CHDB_LOWER_SHIFT			0x0
+
+#define CHDB_HIGHER_n(n)				(0x0404 + 0x8 * (n))
+#define CHDB_HIGHER_n_CHDB_HIGHER_MASK			0xffffffff
+#define CHDB_HIGHER_n_CHDB_HIGHER_SHIFT			0x0
+
+#define ERDB_LOWER_n(n)					(0x0800 + 0x8 * (n))
+#define ERDB_LOWER_n_ERDB_LOWER_MASK			0xffffffff
+#define ERDB_LOWER_n_ERDB_LOWER_SHIFT			0x0
+
+#define ERDB_HIGHER_n(n)				(0x0804 + 0x8 * (n))
+#define ERDB_HIGHER_n_ERDB_HIGHER_MASK			0xffffffff
+#define ERDB_HIGHER_n_ERDB_HIGHER_SHIFT			0x0
+
+#define BHI_EXECENV					(0x228)
+#define BHI_EXECENV_MASK				0xFFFFFFFF
+#define BHI_EXECENV_SHIFT				0
+
+#endif
diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c
new file mode 100644
index 0000000..4043e0b
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c
@@ -0,0 +1,999 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+
+#include "mhi.h"
+#include "mhi_hwio.h"
+
+int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
+			uint32_t *reg_value)
+{
+	void __iomem *addr;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	addr = dev->mmio_base_addr + offset;
+
+	*reg_value = readl_relaxed(addr);
+
+	pr_debug("reg read:0x%x with value 0x%x\n", offset, *reg_value);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read);
+
+int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
+				uint32_t val)
+{
+	void __iomem *addr;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	addr = dev->mmio_base_addr + offset;
+
+	writel_relaxed(val, addr);
+
+	pr_debug("reg write:0x%x with value 0x%x\n", offset, val);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_write);
+
+int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
+						uint32_t mask, uint32_t shift,
+						uint32_t val)
+{
+	uint32_t reg_val;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, offset, &reg_val);
+	if (rc) {
+		pr_err("Read error failed for offset:0x%x\n", offset);
+		return rc;
+	}
+
+	reg_val &= ~mask;
+	reg_val |= ((val << shift) & mask);
+
+	rc = mhi_dev_mmio_write(dev, offset, reg_val);
+	if (rc) {
+		pr_err("Write error failed for offset:0x%x\n", offset);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_masked_write);
+
+int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
+						uint32_t mask, uint32_t shift,
+						uint32_t *reg_val)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, offset, reg_val);
+	if (rc) {
+		pr_err("Read error failed for offset:0x%x\n", offset);
+		return rc;
+	}
+
+	*reg_val &= mask;
+	*reg_val >>= shift;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_masked_read);
+
+static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev,
+						uint32_t chdb_id, bool enable)
+{
+	uint32_t chid_mask, chid_idx, chid_shft, val = 0;
+	int rc = 0;
+
+	chid_shft = chdb_id%32;
+	chid_mask = (1 << chid_shft);
+	chid_idx = chdb_id/32;
+
+	if (enable)
+		val = 1;
+
+	rc = mhi_dev_mmio_masked_write(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx),
+					chid_mask, chid_shft, val);
+	if (rc) {
+		pr_err("Write on channel db interrupt failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, true);
+	if (rc) {
+		pr_err("Setting channel DB failed for ch_id:%d\n", chdb_id);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_a7);
+
+int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, false);
+	if (rc) {
+		pr_err("Disabling channel DB failed for ch_id:%d\n", chdb_id);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_chdb_a7);
+
+static int mhi_dev_mmio_set_erdb_int_a7(struct mhi_dev *dev,
+					uint32_t erdb_ch_id, bool enable)
+{
+	uint32_t erdb_id_shft, erdb_id_mask, erdb_id_idx, val = 0;
+	int rc = 0;
+
+	erdb_id_shft = erdb_ch_id%32;
+	erdb_id_mask = (1 << erdb_id_shft);
+	erdb_id_idx = erdb_ch_id/32;
+
+	if (enable)
+		val = 1;
+
+	rc = mhi_dev_mmio_masked_write(dev,
+			MHI_ERDB_INT_MASK_A7_n(erdb_id_idx),
+			erdb_id_mask, erdb_id_shft, val);
+	if (rc) {
+		pr_err("Error setting event ring db for %d\n", erdb_ch_id);
+		return rc;
+	}
+
+	return rc;
+}
+
+int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, true);
+	if (rc) {
+		pr_err("Error setting event ring db for %d\n", erdb_id);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_a7);
+
+int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, false);
+	if (rc) {
+		pr_err("Error disabling event ring db for %d\n", erdb_id);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_erdb_a7);
+
+int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state)
+{
+	uint32_t reg_value = 0;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_masked_read(dev, MHICTRL,
+		MHISTATUS_MHISTATE_MASK, MHISTATUS_MHISTATE_SHIFT, state);
+	if (rc)
+		return rc;
+
+	rc = mhi_dev_mmio_read(dev, MHICTRL, &reg_value);
+	if (rc)
+		return rc;
+
+	pr_debug("MHICTRL is 0x%x\n", reg_value);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_mhi_state);
+
+static int mhi_dev_mmio_set_chdb_interrupts(struct mhi_dev *dev, bool enable)
+{
+	uint32_t mask = 0, i = 0;
+	int rc = 0;
+
+	if (enable)
+		mask = MHI_CHDB_INT_MASK_A7_n_MASK_MASK;
+
+	for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+		rc = mhi_dev_mmio_write(dev,
+				MHI_CHDB_INT_MASK_A7_n(i), mask);
+		if (rc) {
+			pr_err("Set channel db on row:%d failed\n", i);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_set_chdb_interrupts(dev, true);
+	if (rc) {
+		pr_err("Error setting channel db interrupts\n");
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_interrupts);
+
+int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_set_chdb_interrupts(dev, false);
+	if (rc) {
+		pr_err("Error masking channel db interrupts\n");
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_mask_chdb_interrupts);
+
+int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev)
+{
+	uint32_t i;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+		rc = mhi_dev_mmio_read(dev,
+			MHI_CHDB_INT_STATUS_A7_n(i), &dev->chdb[i].status);
+		if (rc) {
+			pr_err("Error reading chdb status for row:%d\n", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_chdb_status_interrupts);
+
+static int mhi_dev_mmio_set_erdb_interrupts(struct mhi_dev *dev, bool enable)
+{
+	uint32_t mask = 0, i;
+	int rc = 0;
+
+	if (enable)
+		mask = MHI_ERDB_INT_MASK_A7_n_MASK_MASK;
+
+	for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+		rc = mhi_dev_mmio_write(dev,
+				MHI_ERDB_INT_MASK_A7_n(i), mask);
+		if (rc) {
+			pr_err("Error setting erdb status for row:%d\n", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_set_erdb_interrupts(dev, true);
+	if (rc) {
+		pr_err("Error enabling all erdb interrupts\n");
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_interrupts);
+
+int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_set_erdb_interrupts(dev, false);
+	if (rc) {
+		pr_err("Error masking all event db interrupt\n");
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_mask_erdb_interrupts);
+
+int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev)
+{
+	uint32_t i;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+		rc = mhi_dev_mmio_read(dev, MHI_ERDB_INT_STATUS_A7_n(i),
+						&dev->evdb[i].status);
+		if (rc) {
+			pr_err("Error setting erdb status for row:%d\n", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_erdb_status_interrupts);
+
+int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+			MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 1);
+	if (rc) {
+		pr_err("Error enabling control interrupt\n");
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_ctrl_interrupt);
+
+int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+			MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 0);
+	if (rc) {
+		pr_err("Error disabling control interrupt\n");
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_ctrl_interrupt);
+
+int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->ctrl_int);
+	if (rc) {
+		pr_err("Error reading control status interrupt\n");
+		return rc;
+	}
+
+	dev->ctrl_int &= 0x1;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_ctrl_status_interrupt);
+
+int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->cmd_int);
+	if (rc) {
+		pr_err("Error reading cmd status register\n");
+		return rc;
+	}
+
+	dev->cmd_int &= 0x10;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_cmdb_status_interrupt);
+
+int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+			MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 1);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_cmdb_interrupt);
+
+int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+			MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 0);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_cmdb_interrupt);
+
+static void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	rc = mhi_dev_mmio_disable_ctrl_interrupt(dev);
+	if (rc) {
+		pr_err("Error disabling control interrupt\n");
+		return;
+	}
+
+	rc = mhi_dev_mmio_disable_cmdb_interrupt(dev);
+	if (rc) {
+		pr_err("Error disabling command db interrupt\n");
+		return;
+	}
+
+	rc = mhi_dev_mmio_mask_chdb_interrupts(dev);
+	if (rc) {
+		pr_err("Error masking all channel db interrupts\n");
+		return;
+	}
+
+	rc = mhi_dev_mmio_mask_erdb_interrupts(dev);
+	if (rc) {
+		pr_err("Error masking all erdb interrupts\n");
+		return;
+	}
+}
+
+int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev)
+{
+	uint32_t i = 0;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+		rc = mhi_dev_mmio_write(dev, MHI_CHDB_INT_CLEAR_A7_n(i),
+				MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK);
+		if (rc)
+			return rc;
+	}
+
+	for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+		rc = mhi_dev_mmio_write(dev, MHI_ERDB_INT_CLEAR_A7_n(i),
+				MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK);
+		if (rc)
+			return rc;
+	}
+
+	rc = mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7,
+					MHI_CTRL_INT_CRDB_CLEAR);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_clear_interrupts);
+
+int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev)
+{
+	uint32_t ccabap_value = 0, offset = 0;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, CCABAP_HIGHER, &ccabap_value);
+	if (rc)
+		return rc;
+
+	dev->ch_ctx_shadow.host_pa = ccabap_value;
+	dev->ch_ctx_shadow.host_pa <<= 32;
+
+	rc = mhi_dev_mmio_read(dev, CCABAP_LOWER, &ccabap_value);
+	if (rc)
+		return rc;
+
+	dev->ch_ctx_shadow.host_pa |= ccabap_value;
+
+	offset = (uint32_t)(dev->ch_ctx_shadow.host_pa -
+					dev->ctrl_base.host_pa);
+
+	dev->ch_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+	dev->ch_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_chc_base);
+
+int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev)
+{
+	uint32_t ecabap_value = 0, offset = 0;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, ECABAP_HIGHER, &ecabap_value);
+	if (rc)
+		return rc;
+
+	dev->ev_ctx_shadow.host_pa = ecabap_value;
+	dev->ev_ctx_shadow.host_pa <<= 32;
+
+	rc = mhi_dev_mmio_read(dev, ECABAP_LOWER, &ecabap_value);
+	if (rc)
+		return rc;
+
+	dev->ev_ctx_shadow.host_pa |= ecabap_value;
+
+	offset = (uint32_t)(dev->ev_ctx_shadow.host_pa -
+					dev->ctrl_base.host_pa);
+
+	dev->ev_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+	dev->ev_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_erc_base);
+
+int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev)
+{
+	uint32_t crcbap_value = 0, offset = 0;
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, CRCBAP_HIGHER, &crcbap_value);
+	if (rc)
+		return rc;
+
+	dev->cmd_ctx_shadow.host_pa = crcbap_value;
+	dev->cmd_ctx_shadow.host_pa <<= 32;
+
+	rc = mhi_dev_mmio_read(dev, CRCBAP_LOWER, &crcbap_value);
+	if (rc)
+		return rc;
+
+	dev->cmd_ctx_shadow.host_pa |= crcbap_value;
+
+	offset = (uint32_t)(dev->cmd_ctx_shadow.host_pa -
+					dev->ctrl_base.host_pa);
+
+	dev->cmd_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+	dev->cmd_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_crc_base);
+
+int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+	uint32_t value = 0, ch_start_idx = 0;
+	int rc = 0;
+
+	if (!ring) {
+		pr_err("Invalid ring context\n");
+		return -EINVAL;
+	}
+
+	ch_start_idx = ring->mhi_dev->ch_ring_start;
+
+	rc = mhi_dev_mmio_read(ring->mhi_dev,
+			CHDB_HIGHER_n(ring->id-ch_start_idx), &value);
+	if (rc)
+		return rc;
+
+	*wr_offset = value;
+	*wr_offset <<= 32;
+
+	rc = mhi_dev_mmio_read(ring->mhi_dev,
+			CHDB_LOWER_n(ring->id-ch_start_idx), &value);
+	if (rc)
+		return rc;
+
+	*wr_offset |= value;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_ch_db);
+
+int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+	uint32_t value = 0, ev_idx_start = 0;
+	int rc = 0;
+
+	if (!ring) {
+		pr_err("Invalid ring context\n");
+		return -EINVAL;
+	}
+
+	ev_idx_start = ring->mhi_dev->ev_ring_start;
+	rc = mhi_dev_mmio_read(ring->mhi_dev,
+			ERDB_HIGHER_n(ring->id - ev_idx_start), &value);
+	if (rc)
+		return rc;
+
+	*wr_offset = value;
+	*wr_offset <<= 32;
+
+	rc = mhi_dev_mmio_read(ring->mhi_dev,
+			ERDB_LOWER_n(ring->id - ev_idx_start), &value);
+	if (rc)
+		return rc;
+
+	*wr_offset |= value;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_erc_db);
+
+int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+	uint32_t value = 0;
+	int rc = 0;
+
+	if (!ring) {
+		pr_err("Invalid ring context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_HIGHER, &value);
+	if (rc)
+		return rc;
+
+	*wr_offset = value;
+	*wr_offset <<= 32;
+
+	rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_LOWER, &value);
+	if (rc)
+		return rc;
+
+	*wr_offset |= value;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_cmd_db);
+
+int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value)
+{
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	mhi_dev_mmio_write(dev, BHI_EXECENV, value);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_set_env);
+
+int mhi_dev_mmio_reset(struct mhi_dev *dev)
+{
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	mhi_dev_mmio_write(dev, MHICTRL, 0);
+	mhi_dev_mmio_write(dev, MHISTATUS, 0);
+	mhi_dev_mmio_clear_interrupts(dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_reset);
+
+int mhi_dev_restore_mmio(struct mhi_dev *dev)
+{
+	uint32_t i, reg_cntl_value;
+	void *reg_cntl_addr;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	mhi_dev_mmio_mask_interrupts(dev);
+
+	for (i = 0; i < (MHI_DEV_MMIO_RANGE/4); i++) {
+		reg_cntl_addr = dev->mmio_base_addr + (i * 4);
+		reg_cntl_value = dev->mmio_backup[i];
+		writel_relaxed(reg_cntl_value, reg_cntl_addr);
+	}
+
+	mhi_dev_mmio_clear_interrupts(dev);
+	mhi_dev_mmio_enable_ctrl_interrupt(dev);
+
+	/* Mask and enable control interrupt */
+	mb();
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_restore_mmio);
+
+int mhi_dev_backup_mmio(struct mhi_dev *dev)
+{
+	uint32_t i = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++)
+		dev->mmio_backup[i] =
+				readl_relaxed(dev->mmio_base_addr + (i * 4));
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_backup_mmio);
+
+int mhi_dev_get_mhi_addr(struct mhi_dev *dev)
+{
+	uint32_t data_value = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	mhi_dev_mmio_read(dev, MHICTRLBASE_LOWER, &data_value);
+	dev->host_addr.ctrl_base_lsb = data_value;
+
+	mhi_dev_mmio_read(dev, MHICTRLBASE_HIGHER, &data_value);
+	dev->host_addr.ctrl_base_msb = data_value;
+
+	mhi_dev_mmio_read(dev, MHICTRLLIMIT_LOWER, &data_value);
+	dev->host_addr.ctrl_limit_lsb = data_value;
+
+	mhi_dev_mmio_read(dev, MHICTRLLIMIT_HIGHER, &data_value);
+	dev->host_addr.ctrl_limit_msb = data_value;
+
+	mhi_dev_mmio_read(dev, MHIDATABASE_LOWER, &data_value);
+	dev->host_addr.data_base_lsb = data_value;
+
+	mhi_dev_mmio_read(dev, MHIDATABASE_HIGHER, &data_value);
+	dev->host_addr.data_base_msb = data_value;
+
+	mhi_dev_mmio_read(dev, MHIDATALIMIT_LOWER, &data_value);
+	dev->host_addr.data_limit_lsb = data_value;
+
+	mhi_dev_mmio_read(dev, MHIDATALIMIT_HIGHER, &data_value);
+	dev->host_addr.data_limit_msb = data_value;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_get_mhi_addr);
+
+int mhi_dev_mmio_init(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("Invalid MHI dev context\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_mmio_read(dev, MHIREGLEN, &dev->cfg.mhi_reg_len);
+	if (rc)
+		return rc;
+
+	rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK,
+				MHICFG_NER_SHIFT, &dev->cfg.event_rings);
+	if (rc)
+		return rc;
+
+	rc = mhi_dev_mmio_read(dev, CHDBOFF, &dev->cfg.chdb_offset);
+	if (rc)
+		return rc;
+
+	rc = mhi_dev_mmio_read(dev, ERDBOFF, &dev->cfg.erdb_offset);
+	if (rc)
+		return rc;
+
+	dev->cfg.channels = NUM_CHANNELS;
+
+	if (!dev->mmio_initialized) {
+		rc = mhi_dev_mmio_reset(dev);
+		if (rc) {
+			pr_err("Error resetting MMIO\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_init);
+
+int mhi_dev_update_ner(struct mhi_dev *dev)
+{
+	int rc = 0;
+
+	rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK,
+				  MHICFG_NER_SHIFT, &dev->cfg.event_rings);
+	if (rc) {
+		pr_err("Error update NER\n");
+		return rc;
+	}
+
+	pr_debug("NER in HW :%d\n", dev->cfg.event_rings);
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_update_ner);
+
+int mhi_dev_dump_mmio(struct mhi_dev *dev)
+{
+	uint32_t r1, r2, r3, r4, i, offset = 0;
+	int rc = 0;
+
+	for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i += 4) {
+		rc = mhi_dev_mmio_read(dev, offset, &r1);
+		if (rc)
+			return rc;
+
+		rc = mhi_dev_mmio_read(dev, offset+4, &r2);
+		if (rc)
+			return rc;
+
+		rc = mhi_dev_mmio_read(dev, offset+8, &r3);
+		if (rc)
+			return rc;
+
+		rc = mhi_dev_mmio_read(dev, offset+0xC, &r4);
+		if (rc)
+			return rc;
+
+		offset += 0x10;
+		pr_debug("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				offset, r1, r2, r3, r4);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_dev_dump_mmio);
diff --git a/drivers/platform/msm/mhi_dev/mhi_ring.c b/drivers/platform/msm/mhi_dev/mhi_ring.c
new file mode 100644
index 0000000..3007b5a
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_ring.c
@@ -0,0 +1,438 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+
+#include "mhi.h"
+
+static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p)
+{
+	uint64_t rbase;
+
+	rbase = ring->ring_ctx->generic.rbase;
+
+	return (p - rbase)/sizeof(union mhi_dev_ring_element_type);
+}
+
+static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring)
+{
+	return ring->ring_ctx->generic.rlen/
+			sizeof(union mhi_dev_ring_element_type);
+}
+
+/* fetch ring elements from stat->end, take care of wrap-around case */
+int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
+					uint32_t start, uint32_t end)
+{
+	struct mhi_addr host_addr;
+
+	host_addr.device_pa = ring->ring_shadow.device_pa
+			+ sizeof(union mhi_dev_ring_element_type) * start;
+	host_addr.device_va = ring->ring_shadow.device_va
+			+ sizeof(union mhi_dev_ring_element_type) * start;
+	host_addr.host_pa = ring->ring_shadow.host_pa
+			+ sizeof(union mhi_dev_ring_element_type) * start;
+	if (start < end) {
+		mhi_dev_read_from_host(&host_addr,
+			(ring->ring_cache_dma_handle +
+			sizeof(union mhi_dev_ring_element_type) * start),
+			(end-start) *
+			sizeof(union mhi_dev_ring_element_type));
+	} else if (start > end) {
+		/* copy from 'start' to ring end, then ring start to 'end'*/
+		mhi_dev_read_from_host(&host_addr,
+			(ring->ring_cache_dma_handle +
+			sizeof(union mhi_dev_ring_element_type) * start),
+			(ring->ring_size-start) *
+			sizeof(union mhi_dev_ring_element_type));
+		if (end) {
+			/* wrapped around */
+			host_addr.device_pa = ring->ring_shadow.device_pa;
+			host_addr.device_va = ring->ring_shadow.device_va;
+			host_addr.host_pa = ring->ring_shadow.host_pa;
+			mhi_dev_read_from_host(&host_addr,
+				(ring->ring_cache_dma_handle +
+				sizeof(union mhi_dev_ring_element_type) *
+				start),
+				end * sizeof(union mhi_dev_ring_element_type));
+		}
+	}
+
+	return 0;
+}
+
+int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset)
+{
+	uint32_t old_offset = 0;
+	struct mhi_dev *mhi_ctx;
+
+	if (!ring) {
+		pr_err("%s: Invalid ring context\n", __func__);
+		return -EINVAL;
+	}
+
+	mhi_ctx = ring->mhi_dev;
+
+	if (ring->wr_offset == wr_offset) {
+		mhi_log(MHI_MSG_INFO,
+			"nothing to cache for ring %d, local wr_ofst %d\n",
+			ring->id, ring->wr_offset);
+		mhi_log(MHI_MSG_INFO,
+			"new wr_offset %d\n", wr_offset);
+		return 0;
+	}
+
+	old_offset = ring->wr_offset;
+
+	mhi_log(MHI_MSG_ERROR,
+			"caching - rng size :%d local ofst:%d new ofst: %d\n",
+			(uint32_t) ring->ring_size, old_offset,
+			ring->wr_offset);
+
+	/*
+	 * copy the elements starting from old_offset to wr_offset
+	 * take in to account wrap around case event rings are not
+	 * cached, not required
+	 */
+	if (ring->id >= mhi_ctx->ev_ring_start &&
+		ring->id < (mhi_ctx->ev_ring_start +
+				mhi_ctx->cfg.event_rings)) {
+		mhi_log(MHI_MSG_ERROR,
+				"not caching event ring %d\n", ring->id);
+		return 0;
+	}
+
+	mhi_log(MHI_MSG_ERROR, "caching ring %d, start %d, end %d\n",
+			ring->id, old_offset, wr_offset);
+
+	if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) {
+		mhi_log(MHI_MSG_ERROR,
+		"failed to fetch elements for ring %d, start %d, end %d\n",
+		ring->id, old_offset, wr_offset);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_cache_ring);
+
+int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
+{
+	uint64_t wr_offset = 0;
+	uint32_t new_wr_offset = 0;
+	int32_t rc = 0;
+
+	if (!ring) {
+		pr_err("%s: Invalid ring context\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (ring->type) {
+	case RING_TYPE_CMD:
+		rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset);
+		if (rc) {
+			pr_err("%s: CMD DB read failed\n", __func__);
+			return rc;
+		}
+		mhi_log(MHI_MSG_ERROR,
+			"ring %d wr_offset from db 0x%x\n",
+			ring->id, (uint32_t) wr_offset);
+		break;
+	case RING_TYPE_ER:
+		rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset);
+		if (rc) {
+			pr_err("%s: EVT DB read failed\n", __func__);
+			return rc;
+		}
+		break;
+	case RING_TYPE_CH:
+		rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset);
+		if (rc) {
+			pr_err("%s: CH DB read failed\n", __func__);
+			return rc;
+		}
+		mhi_log(MHI_MSG_ERROR,
+			"ring %d wr_offset from db 0x%x\n",
+			ring->id, (uint32_t) wr_offset);
+		break;
+	default:
+		mhi_log(MHI_MSG_ERROR, "invalid ring type\n");
+		return -EINVAL;
+	}
+
+	new_wr_offset = mhi_dev_ring_addr2ofst(ring, wr_offset);
+
+	mhi_dev_cache_ring(ring, new_wr_offset);
+
+	ring->wr_offset = new_wr_offset;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_update_wr_offset);
+
+int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset)
+{
+	union mhi_dev_ring_element_type *el;
+
+	if (!ring) {
+		pr_err("%s: Invalid ring context\n", __func__);
+		return -EINVAL;
+	}
+
+	/* get the element and invoke the respective callback */
+	el = &ring->ring_cache[offset];
+
+	if (ring->ring_cb)
+		ring->ring_cb(ring->mhi_dev, el, (void *)ring);
+	else
+		mhi_log(MHI_MSG_INFO, "No callback registered for ring %d\n",
+				ring->id);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_process_ring_element);
+
+int mhi_dev_process_ring(struct mhi_dev_ring *ring)
+{
+	int rc = 0;
+
+	if (!ring) {
+		pr_err("%s: Invalid ring context\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_update_wr_offset(ring);
+	if (rc) {
+		mhi_log(MHI_MSG_ERROR,
+				"Error updating write-offset for ring %d\n",
+				ring->id);
+		return rc;
+	}
+
+	if (ring->type == RING_TYPE_CH) {
+		/* notify the clients that there are elements in the ring */
+		rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
+		if (rc)
+			pr_err("Error fetching elements\n");
+		return rc;
+	}
+
+	while (ring->rd_offset != ring->wr_offset) {
+		rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
+		if (rc) {
+			mhi_log(MHI_MSG_ERROR,
+				"Error processing ring (%d) element (%d)\n",
+				ring->id, ring->rd_offset);
+			return rc;
+		}
+
+		mhi_log(MHI_MSG_ERROR,
+			"Processing ring (%d) rd_offset:%d, wr_offset:%d\n",
+			ring->id, ring->rd_offset, ring->wr_offset);
+
+		mhi_dev_ring_inc_index(ring, ring->rd_offset);
+	}
+
+	if (!(ring->rd_offset == ring->wr_offset)) {
+		mhi_log(MHI_MSG_ERROR,
+				"Error with the rd offset/wr offset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_process_ring);
+
+int mhi_dev_add_element(struct mhi_dev_ring *ring,
+				union mhi_dev_ring_element_type *element)
+{
+	uint32_t old_offset = 0;
+	struct mhi_addr host_addr;
+
+	if (!ring || !element) {
+		pr_err("%s: Invalid context\n", __func__);
+		return -EINVAL;
+	}
+
+	mhi_dev_update_wr_offset(ring);
+
+	if ((ring->rd_offset + 1) % ring->ring_size == ring->wr_offset) {
+		mhi_log(MHI_MSG_INFO, "ring full to insert element\n");
+		return -EINVAL;
+	}
+
+	old_offset = ring->rd_offset;
+
+	mhi_dev_ring_inc_index(ring, ring->rd_offset);
+
+	ring->ring_ctx->generic.rp = (ring->rd_offset *
+				sizeof(union mhi_dev_ring_element_type)) +
+				ring->ring_ctx->generic.rbase;
+	/*
+	 * Write the element, ring_base has to be the
+	 * iomap of the ring_base for memcpy
+	 */
+	host_addr.host_pa = ring->ring_shadow.host_pa +
+			sizeof(union mhi_dev_ring_element_type) * old_offset;
+	host_addr.device_va = ring->ring_shadow.device_va +
+			sizeof(union mhi_dev_ring_element_type) * old_offset;
+
+	mhi_log(MHI_MSG_ERROR, "adding element to ring (%d)\n", ring->id);
+	mhi_log(MHI_MSG_ERROR, "rd_ofset %d\n", ring->rd_offset);
+	mhi_log(MHI_MSG_ERROR, "type %d\n", element->generic.type);
+
+	mhi_dev_write_to_host(&host_addr, element,
+			sizeof(union mhi_dev_ring_element_type), ring->mhi_dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_add_element);
+
+int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
+							struct mhi_dev *mhi)
+{
+	int rc = 0;
+	uint32_t wr_offset = 0;
+	uint32_t offset = 0;
+
+	if (!ring || !ctx || !mhi) {
+		pr_err("%s: Invalid context\n", __func__);
+		return -EINVAL;
+	}
+
+	ring->ring_ctx = ctx;
+	ring->ring_size = mhi_dev_ring_num_elems(ring);
+	ring->rd_offset = mhi_dev_ring_addr2ofst(ring,
+					ring->ring_ctx->generic.rp);
+	ring->wr_offset = mhi_dev_ring_addr2ofst(ring,
+					ring->ring_ctx->generic.rp);
+	ring->mhi_dev = mhi;
+
+	mhi_ring_set_state(ring, RING_STATE_IDLE);
+
+	wr_offset = mhi_dev_ring_addr2ofst(ring,
+					ring->ring_ctx->generic.wp);
+
+	ring->ring_cache = dma_alloc_coherent(mhi->dev,
+			ring->ring_size *
+			sizeof(union mhi_dev_ring_element_type),
+			&ring->ring_cache_dma_handle,
+			GFP_KERNEL);
+	if (!ring->ring_cache)
+		return -ENOMEM;
+
+	offset = (uint32_t)(ring->ring_ctx->generic.rbase -
+					mhi->ctrl_base.host_pa);
+
+	ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset;
+	ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset;
+	ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset;
+
+	if (ring->type == RING_TYPE_ER)
+		ring->ring_ctx_shadow =
+		(union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va +
+			(ring->id - mhi->ev_ring_start) *
+			sizeof(union mhi_dev_ring_ctx));
+	else if (ring->type == RING_TYPE_CMD)
+		ring->ring_ctx_shadow =
+		(union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va;
+	else if (ring->type == RING_TYPE_CH)
+		ring->ring_ctx_shadow =
+		(union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
+		(ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));
+
+
+	ring->ring_ctx_shadow = ring->ring_ctx;
+
+	if (ring->type != RING_TYPE_ER) {
+		rc = mhi_dev_cache_ring(ring, wr_offset);
+		if (rc)
+			return rc;
+	}
+
+	mhi_log(MHI_MSG_ERROR, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n",
+			(uint32_t)ring->ring_ctx->generic.rbase,
+			(uint32_t)ring->ring_ctx->generic.rp,
+			(uint32_t)ring->ring_ctx->generic.wp);
+	ring->wr_offset = wr_offset;
+
+	return rc;
+}
+EXPORT_SYMBOL(mhi_ring_start);
+
+void mhi_ring_init(struct mhi_dev_ring *ring, enum mhi_dev_ring_type type,
+								int id)
+{
+	if (!ring) {
+		pr_err("%s: Invalid ring context\n", __func__);
+		return;
+	}
+
+	ring->id = id;
+	ring->state = RING_STATE_UINT;
+	ring->ring_cb = NULL;
+	ring->type = type;
+}
+EXPORT_SYMBOL(mhi_ring_init);
+
+void mhi_ring_set_cb(struct mhi_dev_ring *ring,
+			void (*ring_cb)(struct mhi_dev *dev,
+			union mhi_dev_ring_element_type *el, void *ctx))
+{
+	if (!ring || !ring_cb) {
+		pr_err("%s: Invalid context\n", __func__);
+		return;
+	}
+
+	ring->ring_cb = ring_cb;
+}
+EXPORT_SYMBOL(mhi_ring_set_cb);
+
+void mhi_ring_set_state(struct mhi_dev_ring *ring,
+				enum mhi_dev_ring_state state)
+{
+	if (!ring) {
+		pr_err("%s: Invalid ring context\n", __func__);
+		return;
+	}
+
+	if (state > RING_STATE_PENDING) {
+		pr_err("%s: Invalid ring state\n", __func__);
+		return;
+	}
+
+	ring->state = state;
+}
+EXPORT_SYMBOL(mhi_ring_set_state);
+
+enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring)
+{
+	if (!ring) {
+		pr_err("%s: Invalid ring context\n", __func__);
+		return -EINVAL;
+	}
+
+	return ring->state;
+}
+EXPORT_SYMBOL(mhi_ring_get_state);
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c
new file mode 100644
index 0000000..8179fad
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.c
@@ -0,0 +1,1318 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include <linux/ipa_mhi.h>
+#include "mhi_hwio.h"
+#include "mhi_sm.h"
+
+#define MHI_SM_DBG(fmt, args...) \
+	mhi_log(MHI_MSG_DBG, fmt, ##args)
+
+#define MHI_SM_ERR(fmt, args...) \
+	mhi_log(MHI_MSG_ERROR, fmt, ##args)
+
+#define MHI_SM_FUNC_ENTRY() MHI_SM_DBG("ENTRY\n")
+#define MHI_SM_FUNC_EXIT() MHI_SM_DBG("EXIT\n")
+
+
+static inline const char *mhi_sm_dev_event_str(enum mhi_dev_event state)
+{
+	const char *str;
+
+	switch (state) {
+	case MHI_DEV_EVENT_CTRL_TRIG:
+		str = "MHI_DEV_EVENT_CTRL_TRIG";
+		break;
+	case MHI_DEV_EVENT_M0_STATE:
+		str = "MHI_DEV_EVENT_M0_STATE";
+		break;
+	case MHI_DEV_EVENT_M1_STATE:
+		str = "MHI_DEV_EVENT_M1_STATE";
+		break;
+	case MHI_DEV_EVENT_M2_STATE:
+		str = "MHI_DEV_EVENT_M2_STATE";
+		break;
+	case MHI_DEV_EVENT_M3_STATE:
+		str = "MHI_DEV_EVENT_M3_STATE";
+		break;
+	case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+		str = "MHI_DEV_EVENT_HW_ACC_WAKEUP";
+		break;
+	case MHI_DEV_EVENT_CORE_WAKEUP:
+		str = "MHI_DEV_EVENT_CORE_WAKEUP";
+		break;
+	default:
+		str = "INVALID MHI_DEV_EVENT";
+	}
+
+	return str;
+}
+
+static inline const char *mhi_sm_mstate_str(enum mhi_dev_state state)
+{
+	const char *str;
+
+	switch (state) {
+	case MHI_DEV_RESET_STATE:
+		str = "RESET";
+		break;
+	case MHI_DEV_READY_STATE:
+		str = "READY";
+		break;
+	case MHI_DEV_M0_STATE:
+		str = "M0";
+		break;
+	case MHI_DEV_M1_STATE:
+		str = "M1";
+		break;
+	case MHI_DEV_M2_STATE:
+		str = "M2";
+		break;
+	case MHI_DEV_M3_STATE:
+		str = "M3";
+		break;
+	case MHI_DEV_SYSERR_STATE:
+		str = "SYSTEM ERROR";
+		break;
+	default:
+		str = "INVALID";
+		break;
+	}
+
+	return str;
+}
+enum mhi_sm_ep_pcie_state {
+	MHI_SM_EP_PCIE_LINK_DISABLE,
+	MHI_SM_EP_PCIE_D0_STATE,
+	MHI_SM_EP_PCIE_D3_HOT_STATE,
+	MHI_SM_EP_PCIE_D3_COLD_STATE,
+};
+
+static inline const char *mhi_sm_dstate_str(enum mhi_sm_ep_pcie_state state)
+{
+	const char *str;
+
+	switch (state) {
+	case MHI_SM_EP_PCIE_LINK_DISABLE:
+		str = "EP_PCIE_LINK_DISABLE";
+		break;
+	case MHI_SM_EP_PCIE_D0_STATE:
+		str = "D0_STATE";
+		break;
+	case MHI_SM_EP_PCIE_D3_HOT_STATE:
+		str = "D3_HOT_STATE";
+		break;
+	case MHI_SM_EP_PCIE_D3_COLD_STATE:
+		str = "D3_COLD_STATE";
+		break;
+	default:
+		str = "INVALID D-STATE";
+		break;
+	}
+
+	return str;
+}
+
+static inline const char *mhi_sm_pcie_event_str(enum ep_pcie_event event)
+{
+	const char *str;
+
+	switch (event) {
+	case EP_PCIE_EVENT_LINKDOWN:
+		str = "EP_PCIE_LINKDOWN_EVENT";
+		break;
+	case EP_PCIE_EVENT_LINKUP:
+		str = "EP_PCIE_LINKUP_EVENT";
+		break;
+	case EP_PCIE_EVENT_PM_D3_HOT:
+		str = "EP_PCIE_PM_D3_HOT_EVENT";
+		break;
+	case EP_PCIE_EVENT_PM_D3_COLD:
+		str = "EP_PCIE_PM_D3_COLD_EVENT";
+		break;
+	case EP_PCIE_EVENT_PM_RST_DEAST:
+		str = "EP_PCIE_PM_RST_DEAST_EVENT";
+		break;
+	case EP_PCIE_EVENT_PM_D0:
+		str = "EP_PCIE_PM_D0_EVENT";
+		break;
+	case EP_PCIE_EVENT_MHI_A7:
+		str = "EP_PCIE_MHI_A7";
+		break;
+	default:
+		str = "INVALID_PCIE_EVENT";
+		break;
+	}
+
+	return str;
+}
+
+/**
+ * struct mhi_sm_device_event - mhi-core event work
+ * @event: mhi core state change event
+ * @work: work struct
+ *
+ * used to add work for mhi state change event to mhi_sm_wq
+ */
+struct mhi_sm_device_event {
+	enum mhi_dev_event event;
+	struct work_struct work;
+};
+
+/**
+ * struct mhi_sm_ep_pcie_event - ep-pcie event work
+ * @event: ep-pcie link state change event
+ * @work: work struct
+ *
+ * used to add work for ep-pcie link state change event to mhi_sm_wq
+ */
+struct mhi_sm_ep_pcie_event {
+	enum ep_pcie_event event;
+	struct work_struct work;
+};
+
+/**
+ * struct mhi_sm_stats - MHI state machine statistics, viewable using debugfs
+ * @m0_event_cnt: total number of MHI_DEV_EVENT_M0_STATE events
+ * @m3_event_cnt: total number of MHI_DEV_EVENT_M3_STATE events
+ * @hw_acc_wakeup_event_cnt: total number of MHI_DEV_EVENT_HW_ACC_WAKEUP events
+ * @mhi_core_wakeup_event_cnt: total number of MHI_DEV_EVENT_CORE_WAKEUP events
+ * @linkup_event_cnt: total number of EP_PCIE_EVENT_LINKUP events
+ * @rst_deast_event_cnt: total number of EP_PCIE_EVENT_PM_RST_DEAST events
+ * @d3_hot_event_cnt: total number of EP_PCIE_EVENT_PM_D3_HOT events
+ * @d3_cold_event_cnt: total number of EP_PCIE_EVENT_PM_D3_COLD events
+ * @d0_event_cnt: total number of EP_PCIE_EVENT_PM_D0 events
+ * @linkdown_event_cnt: total number of EP_PCIE_EVENT_LINKDOWN events
+ */
+struct mhi_sm_stats {
+	int m0_event_cnt;
+	int m3_event_cnt;
+	int hw_acc_wakeup_event_cnt;
+	int mhi_core_wakeup_event_cnt;
+	int linkup_event_cnt;
+	int rst_deast_event_cnt;
+	int d3_hot_event_cnt;
+	int d3_cold_event_cnt;
+	int d0_event_cnt;
+	int linkdown_event_cnt;
+};
+
+/**
+ * struct mhi_sm_dev - MHI state manager context information
+ * @mhi_state: MHI M state of the MHI device
+ * @d_state: EP-PCIe D state of the MHI device
+ * @mhi_dev: MHI device struct pointer
+ * @mhi_state_lock: mutex for mhi_state
+ * @syserr_occurred:flag to indicate if a syserr condition has occurred.
+ * @mhi_sm_wq: workqueue for state change events
+ * @pending_device_events: number of pending mhi state change events in sm_wq
+ * @pending_pcie_events: number of pending mhi state change events in sm_wq
+ * @stats: stats on the handled and pending events
+ */
+struct mhi_sm_dev {
+	enum mhi_dev_state mhi_state;
+	enum mhi_sm_ep_pcie_state d_state;
+	struct mhi_dev *mhi_dev;
+	struct mutex mhi_state_lock;
+	bool syserr_occurred;
+	struct workqueue_struct *mhi_sm_wq;
+	atomic_t pending_device_events;
+	atomic_t pending_pcie_events;
+	struct mhi_sm_stats stats;
+};
+static struct mhi_sm_dev *mhi_sm_ctx;
+
+
+#ifdef CONFIG_DEBUG_FS
+#define MHI_SM_MAX_MSG_LEN 1024
+static char dbg_buff[MHI_SM_MAX_MSG_LEN];
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+
+static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos);
+static ssize_t mhi_sm_debugfs_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos);
+
+const struct file_operations mhi_sm_stats_ops = {
+	.read = mhi_sm_debugfs_read,
+	.write = mhi_sm_debugfs_write,
+};
+
+static void mhi_sm_debugfs_init(void)
+{
+	const mode_t read_write_mode = 0666;
+
+	dent = debugfs_create_dir("mhi_sm", 0);
+	if (IS_ERR(dent)) {
+		MHI_SM_ERR("fail to create folder mhi_sm\n");
+		return;
+	}
+
+	dfile_stats =
+		debugfs_create_file("stats", read_write_mode, dent,
+				0, &mhi_sm_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		MHI_SM_ERR("fail to create file stats\n");
+		goto fail;
+	}
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void mhi_sm_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+#else
+static inline void mhi_sm_debugfs_init(void) {}
+static inline void mhi_sm_debugfs_destroy(void) {}
+#endif /*CONFIG_DEBUG_FS*/
+
+
+static void mhi_sm_mmio_set_mhistatus(enum mhi_dev_state state)
+{
+	struct mhi_dev *dev = mhi_sm_ctx->mhi_dev;
+
+	MHI_SM_FUNC_ENTRY();
+
+	switch (state) {
+	case MHI_DEV_READY_STATE:
+		MHI_SM_DBG("set MHISTATUS to READY mode\n");
+		mhi_dev_mmio_masked_write(dev, MHISTATUS,
+				MHISTATUS_READY_MASK,
+				MHISTATUS_READY_SHIFT, 1);
+
+		mhi_dev_mmio_masked_write(dev, MHISTATUS,
+				MHISTATUS_MHISTATE_MASK,
+				MHISTATUS_MHISTATE_SHIFT, state);
+		break;
+	case MHI_DEV_SYSERR_STATE:
+		MHI_SM_DBG("set MHISTATUS to SYSTEM ERROR mode\n");
+		mhi_dev_mmio_masked_write(dev, MHISTATUS,
+				MHISTATUS_SYSERR_MASK,
+				MHISTATUS_SYSERR_SHIFT, 1);
+
+		mhi_dev_mmio_masked_write(dev, MHISTATUS,
+				MHISTATUS_MHISTATE_MASK,
+				MHISTATUS_MHISTATE_SHIFT, state);
+		break;
+	case MHI_DEV_M1_STATE:
+	case MHI_DEV_M2_STATE:
+		MHI_SM_ERR("Not supported state, can't set MHISTATUS to %s\n",
+			mhi_sm_mstate_str(state));
+		goto exit;
+	case MHI_DEV_M0_STATE:
+	case MHI_DEV_M3_STATE:
+		MHI_SM_DBG("set MHISTATUS.MHISTATE to %s state\n",
+			mhi_sm_mstate_str(state));
+		mhi_dev_mmio_masked_write(dev, MHISTATUS,
+				MHISTATUS_MHISTATE_MASK,
+				MHISTATUS_MHISTATE_SHIFT, state);
+		break;
+	default:
+		MHI_SM_ERR("Invalid mhi state: 0x%x state", state);
+		goto exit;
+	}
+
+	mhi_sm_ctx->mhi_state = state;
+
+exit:
+	MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_sm_is_legal_event_on_state() - Determine if MHI state transition is valid
+ * @curr_state: current MHI state
+ * @event: MHI state change event
+ *
+ * Determine according to MHI state management if the state change event
+ * is valid on the current mhi state.
+ * Note: The decision doesn't take into account M1 and M2 states.
+ *
+ * Return:	true: transition is valid
+ *		false: transition is not valid
+ */
+static bool mhi_sm_is_legal_event_on_state(enum mhi_dev_state curr_state,
+	enum mhi_dev_event event)
+{
+	bool res;
+
+	switch (event) {
+	case MHI_DEV_EVENT_M0_STATE:
+		res = (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_D0_STATE &&
+			curr_state != MHI_DEV_RESET_STATE);
+		break;
+	case MHI_DEV_EVENT_M3_STATE:
+	case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+	case MHI_DEV_EVENT_CORE_WAKEUP:
+		res = (curr_state == MHI_DEV_M3_STATE ||
+			curr_state == MHI_DEV_M0_STATE);
+		break;
+	default:
+		MHI_SM_ERR("Received invalid event: %s\n",
+			mhi_sm_dev_event_str(event));
+		res = false;
+		break;
+	}
+
+	return res;
+}
+
+/**
+ * mhi_sm_is_legal_pcie_event_on_state() - Determine if EP-PCIe linke state
+ * transition is valid on the current system state.
+ * @curr_mstate: current MHI state
+ * @curr_dstate: current ep-pcie link, d, state
+ * @event: ep-pcie link state change event
+ *
+ * Return:	true: transition is valid
+ *		false: transition is not valid
+ */
+static bool mhi_sm_is_legal_pcie_event_on_state(enum mhi_dev_state curr_mstate,
+	enum mhi_sm_ep_pcie_state curr_dstate, enum ep_pcie_event event)
+{
+	bool res;
+
+	switch (event) {
+	case EP_PCIE_EVENT_LINKUP:
+	case EP_PCIE_EVENT_LINKDOWN:
+		res = true;
+		break;
+	case EP_PCIE_EVENT_PM_D3_HOT:
+		res = (curr_mstate == MHI_DEV_M3_STATE &&
+			curr_dstate != MHI_SM_EP_PCIE_LINK_DISABLE);
+		break;
+	case EP_PCIE_EVENT_PM_D3_COLD:
+		res = (curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE ||
+			curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
+		break;
+	case EP_PCIE_EVENT_PM_RST_DEAST:
+		res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
+			curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
+		break;
+	case EP_PCIE_EVENT_PM_D0:
+		res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
+			curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE);
+		break;
+	case EP_PCIE_EVENT_MHI_A7:
+		res = true;
+		break;
+	default:
+		MHI_SM_ERR("Invalid ep_pcie event, received: %s\n",
+			mhi_sm_pcie_event_str(event));
+		res = false;
+		break;
+	}
+
+	return res;
+}
+
+/**
+ * mhi_sm_change_to_M0() - switch to M0 state.
+ *
+ * Switch MHI-device state to M0, if possible according to MHI state machine.
+ * Notify the MHI-host on the transition, in case MHI is suspended- resume MHI.
+ *
+ * Return:	0: success
+ *		negative: failure
+ */
+static int mhi_sm_change_to_M0(void)
+{
+	enum mhi_dev_state old_state;
+	struct ep_pcie_msi_config cfg;
+	int res;
+
+	MHI_SM_FUNC_ENTRY();
+
+	old_state = mhi_sm_ctx->mhi_state;
+
+	if (old_state == MHI_DEV_M0_STATE) {
+		MHI_SM_DBG("Nothing to do, already in M0 state\n");
+		res = 0;
+		goto exit;
+	} else if (old_state == MHI_DEV_M3_STATE ||
+				old_state == MHI_DEV_READY_STATE) {
+		/*  Retrieve MHI configuration*/
+		res = mhi_dev_config_outbound_iatu(mhi_sm_ctx->mhi_dev);
+		if (res) {
+			MHI_SM_ERR("Fail to configure iATU, returned %d\n",
+			res);
+			goto exit;
+		}
+		res = ep_pcie_get_msi_config(mhi_sm_ctx->mhi_dev->phandle,
+			&cfg);
+		if (res) {
+			MHI_SM_ERR("Error retrieving pcie msi logic\n");
+			goto exit;
+		}
+		res = mhi_pcie_config_db_routing(mhi_sm_ctx->mhi_dev);
+		if (res) {
+			MHI_SM_ERR("Error configuring db routing\n");
+			goto exit;
+
+		}
+	} else {
+		MHI_SM_ERR("unexpected old_state: %s\n",
+			mhi_sm_mstate_str(old_state));
+		goto exit;
+	}
+	mhi_sm_mmio_set_mhistatus(MHI_DEV_M0_STATE);
+
+	/* Tell the host, device move to M0 */
+	res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+				MHI_DEV_M0_STATE);
+	if (res) {
+		MHI_SM_ERR("Failed to send event %s to host, returned %d\n",
+			mhi_sm_dev_event_str(MHI_DEV_EVENT_M0_STATE), res);
+		goto exit;
+	}
+
+	if (old_state == MHI_DEV_READY_STATE) {
+		/* Tell the host the EE */
+		res = mhi_dev_send_ee_event(mhi_sm_ctx->mhi_dev, 2);
+		if (res) {
+			MHI_SM_ERR("failed sending EE event to host\n");
+			goto exit;
+		}
+	} else if (old_state == MHI_DEV_M3_STATE) {
+		/*Resuming MHI operation*/
+		res = mhi_dev_resume(mhi_sm_ctx->mhi_dev);
+		if (res) {
+			MHI_SM_ERR("Failed resuming mhi core, returned %d",
+				res);
+			goto exit;
+		}
+		res = ipa_mhi_resume();
+		if (res) {
+			MHI_SM_ERR("Failed resuming ipa_mhi, returned %d",
+				res);
+			goto exit;
+		}
+	}
+	res  = 0;
+
+exit:
+	MHI_SM_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * mhi_sm_change_to_M3() - switch to M3 state
+ *
+ * Switch MHI-device state to M3, if possible according to MHI state machine.
+ * Suspend MHI traffic and notify the host on the transition.
+ *
+ * Return:	0: success
+ *		negative: failure
+ */
+static int mhi_sm_change_to_M3(void)
+{
+	enum mhi_dev_state old_state;
+	int res = 0;
+
+	MHI_SM_FUNC_ENTRY();
+
+	old_state = mhi_sm_ctx->mhi_state;
+	if (old_state == MHI_DEV_M3_STATE) {
+		MHI_SM_DBG("Nothing to do, already in M3 state\n");
+		res = 0;
+		goto exit;
+	}
+	/* Suspending MHI operation*/
+	res = mhi_dev_suspend(mhi_sm_ctx->mhi_dev);
+	if (res) {
+		MHI_SM_ERR("Failed to suspend mhi_core, returned %d\n", res);
+		goto exit;
+	}
+	res = ipa_mhi_suspend(true);
+	if (res) {
+		MHI_SM_ERR("Failed to suspend ipa_mhi, returned %d\n", res);
+		goto exit;
+	}
+	mhi_sm_mmio_set_mhistatus(MHI_DEV_M3_STATE);
+
+	 /* tell the host, device move to M3 */
+	res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+				MHI_DEV_M3_STATE);
+	if (res) {
+		MHI_SM_ERR("Failed sendind event: %s to mhi_host\n",
+			mhi_sm_dev_event_str(MHI_DEV_EVENT_M3_STATE));
+		goto exit;
+	}
+
+exit:
+	MHI_SM_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * mhi_sm_wakeup_host() - wakeup MHI-host
+ *@event: MHI state chenge event
+ *
+ * Sends wekup event to MHI-host via EP-PCIe, in case MHI is in M3 state.
+ *
+ * Return:	0:success
+ *		negative: failure
+ */
+static int mhi_sm_wakeup_host(enum mhi_dev_event event)
+{
+	int res = 0;
+
+	MHI_SM_FUNC_ENTRY();
+
+	if (mhi_sm_ctx->mhi_state == MHI_DEV_M3_STATE) {
+		/*
+		 * ep_pcie driver is responsible to send the right wakeup
+		 * event, assert WAKE#, according to Link state
+		 */
+		res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle);
+		if (res) {
+			MHI_SM_ERR("Failed to wakeup MHI host, returned %d\n",
+				res);
+			goto exit;
+		}
+	} else {
+		MHI_SM_DBG("Nothing to do, Host is already awake\n");
+	}
+
+exit:
+	MHI_SM_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * mhi_sm_handle_syserr() - switch to system error state.
+ *
+ * Called on system error condition.
+ * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device.
+ * Synchronic function.
+ *
+ * Return:	0: success
+ *		negative: failure
+ */
+static int mhi_sm_handle_syserr(void)
+{
+	int res;
+	enum ep_pcie_link_status link_status;
+	bool link_enabled = false;
+
+	MHI_SM_FUNC_ENTRY();
+
+	MHI_SM_ERR("Start handling SYSERR, MHI state: %s and %s",
+		mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+		mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+	if (mhi_sm_ctx->mhi_state == MHI_DEV_SYSERR_STATE) {
+		MHI_SM_DBG("Nothing to do, already in SYSERR state\n");
+		return 0;
+	}
+
+	mhi_sm_ctx->syserr_occurred = true;
+	link_status = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle);
+	if (link_status == EP_PCIE_LINK_DISABLED) {
+		/* try to power on ep-pcie, restore mmio, and wakup host */
+		res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+			EP_PCIE_OPT_POWER_ON);
+		if (res) {
+			MHI_SM_ERR("Failed to power on ep-pcie, returned %d\n",
+				res);
+			goto exit;
+		}
+		mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
+		res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+			EP_PCIE_OPT_AST_WAKE | EP_PCIE_OPT_ENUM);
+		if (res) {
+			MHI_SM_ERR("Failed to wakup host and enable ep-pcie\n");
+			goto exit;
+		}
+	}
+
+	link_enabled = true;
+	mhi_sm_mmio_set_mhistatus(MHI_DEV_SYSERR_STATE);
+
+	/* Tell the host, device move to SYSERR state */
+	res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+				MHI_DEV_SYSERR_STATE);
+	if (res) {
+		MHI_SM_ERR("Failed to send %s state change event to host\n",
+			mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE));
+		goto exit;
+	}
+
+exit:
+	if (!link_enabled)
+		MHI_SM_ERR("EP-PCIE Link is disable cannot set MMIO to %s\n",
+			mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE));
+
+	MHI_SM_ERR("/n/n/nASSERT ON DEVICE !!!!/n/n/n");
+	WARN_ON();
+
+	MHI_SM_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * mhi_sm_dev_event_manager() - performs MHI state change
+ * @work: work_struct used by the work queue
+ *
+ * This function is called from mhi_sm_wq, and performs mhi state change
+ * if possible according to MHI state machine
+ */
+static void mhi_sm_dev_event_manager(struct work_struct *work)
+{
+	int res;
+	struct mhi_sm_device_event *chg_event = container_of(work,
+		struct mhi_sm_device_event, work);
+
+	MHI_SM_FUNC_ENTRY();
+
+	mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+	MHI_SM_DBG("Start handling %s event, current states: %s & %s\n",
+		mhi_sm_dev_event_str(chg_event->event),
+		mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+		mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+	if (mhi_sm_ctx->syserr_occurred) {
+		MHI_SM_DBG("syserr occurred, Ignoring %s\n",
+			mhi_sm_dev_event_str(chg_event->event));
+		goto unlock_and_exit;
+	}
+
+	if (!mhi_sm_is_legal_event_on_state(mhi_sm_ctx->mhi_state,
+		chg_event->event)) {
+		MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n",
+			mhi_sm_dev_event_str(chg_event->event),
+			mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+			mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+		res = mhi_sm_handle_syserr();
+		if (res)
+			MHI_SM_ERR("Failed switching to SYSERR state\n");
+		goto unlock_and_exit;
+	}
+
+	switch (chg_event->event) {
+	case MHI_DEV_EVENT_M0_STATE:
+		res = mhi_sm_change_to_M0();
+		if (res)
+			MHI_SM_ERR("Failed switching to M0 state\n");
+		break;
+	case MHI_DEV_EVENT_M3_STATE:
+		res = mhi_sm_change_to_M3();
+		if (res)
+			MHI_SM_ERR("Failed switching to M3 state\n");
+		break;
+	case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+	case MHI_DEV_EVENT_CORE_WAKEUP:
+		res = mhi_sm_wakeup_host(chg_event->event);
+		if (res)
+			MHI_SM_ERR("Failed to wakeup MHI host\n");
+		break;
+	case MHI_DEV_EVENT_CTRL_TRIG:
+	case MHI_DEV_EVENT_M1_STATE:
+	case MHI_DEV_EVENT_M2_STATE:
+		MHI_SM_ERR("Error: %s event is not supported\n",
+			mhi_sm_dev_event_str(chg_event->event));
+		break;
+	default:
+		MHI_SM_ERR("Error: Invalid event, 0x%x", chg_event->event);
+		break;
+	}
+unlock_and_exit:
+	mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+	atomic_dec(&mhi_sm_ctx->pending_device_events);
+	kfree(chg_event);
+
+	MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_sm_pcie_event_manager() - performs EP-PCIe linke state change
+ * @work: work_struct used by the work queue
+ *
+ * This function is called from mhi_sm_wq, and performs ep-pcie link state
+ * change if possible according to current system state and MHI state machine
+ */
+static void mhi_sm_pcie_event_manager(struct work_struct *work)
+{
+	int res;
+	enum mhi_sm_ep_pcie_state old_dstate;
+	struct mhi_sm_ep_pcie_event *chg_event = container_of(work,
+		struct mhi_sm_ep_pcie_event, work);
+	enum ep_pcie_event pcie_event = chg_event->event;
+
+	MHI_SM_FUNC_ENTRY();
+
+	mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+	old_dstate = mhi_sm_ctx->d_state;
+
+	MHI_SM_DBG("Start handling %s event, current MHI state %s and %s\n",
+		mhi_sm_pcie_event_str(chg_event->event),
+		mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+		mhi_sm_dstate_str(old_dstate));
+
+	if (mhi_sm_ctx->syserr_occurred &&
+			pcie_event != EP_PCIE_EVENT_LINKDOWN) {
+		MHI_SM_DBG("SYSERR occurred. Ignoring %s",
+			mhi_sm_pcie_event_str(pcie_event));
+		goto unlock_and_exit;
+	}
+
+	if (!mhi_sm_is_legal_pcie_event_on_state(mhi_sm_ctx->mhi_state,
+		old_dstate, pcie_event)) {
+		MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n",
+			mhi_sm_pcie_event_str(pcie_event),
+			mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+			mhi_sm_dstate_str(old_dstate));
+		res = mhi_sm_handle_syserr();
+		if (res)
+			MHI_SM_ERR("Failed switching to SYSERR state\n");
+		goto unlock_and_exit;
+	}
+
+	switch (pcie_event) {
+	case EP_PCIE_EVENT_LINKUP:
+		if (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_LINK_DISABLE)
+			mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+		break;
+	case EP_PCIE_EVENT_LINKDOWN:
+		res = mhi_sm_handle_syserr();
+		if (res)
+			MHI_SM_ERR("Failed switching to SYSERR state\n");
+		goto unlock_and_exit;
+	case EP_PCIE_EVENT_PM_D3_HOT:
+		if (old_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE) {
+			MHI_SM_DBG("cannot move to D3_HOT from D3_COLD\n");
+			break;
+		}
+		/* Backup MMIO is done on the callback function*/
+		mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_HOT_STATE;
+		break;
+	case EP_PCIE_EVENT_PM_D3_COLD:
+		if (old_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE) {
+			MHI_SM_DBG("Nothing to do, already in D3_COLD state\n");
+			break;
+		}
+		ep_pcie_disable_endpoint(mhi_sm_ctx->mhi_dev->phandle);
+		mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_COLD_STATE;
+		break;
+	case EP_PCIE_EVENT_PM_RST_DEAST:
+		if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) {
+			MHI_SM_DBG("Nothing to do, already in D0 state\n");
+			break;
+		}
+		res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+			EP_PCIE_OPT_POWER_ON);
+		if (res) {
+			MHI_SM_ERR("Failed to power on ep_pcie, returned %d\n",
+				res);
+			goto unlock_and_exit;
+		}
+
+		mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
+
+		res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+			EP_PCIE_OPT_ENUM);
+		if (res) {
+			MHI_SM_ERR("ep-pcie failed to link train, return %d\n",
+				res);
+			goto unlock_and_exit;
+		}
+		mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+		break;
+	case EP_PCIE_EVENT_PM_D0:
+		if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) {
+			MHI_SM_DBG("Nothing to do, already in D0 state\n");
+			break;
+		}
+		mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+		break;
+	default:
+		MHI_SM_ERR("Invalid EP_PCIE event, received 0x%x\n",
+			pcie_event);
+		break;
+	}
+
+unlock_and_exit:
+	mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+	atomic_dec(&mhi_sm_ctx->pending_pcie_events);
+	kfree(chg_event);
+
+	MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_dev_sm_init() - Initialize MHI state machine.
+ * @mhi_dev: pointer to mhi device instance
+ *
+ * Assuming MHISTATUS register is in RESET state.
+ *
+ * Return:	0 success
+ *		-EINVAL: invalid param
+ *		-ENOMEM: allocating memory error
+ */
+int mhi_dev_sm_init(struct mhi_dev *mhi_dev)
+{
+	int res;
+	enum ep_pcie_link_status link_state;
+
+	MHI_SM_FUNC_ENTRY();
+
+	if (!mhi_dev) {
+		MHI_SM_ERR("Fail: Null argument\n");
+		return -EINVAL;
+	}
+
+	mhi_sm_ctx = devm_kzalloc(mhi_dev->dev, sizeof(*mhi_sm_ctx),
+		GFP_KERNEL);
+	if (!mhi_sm_ctx) {
+		MHI_SM_ERR("devm_kzalloc err: mhi_sm_ctx\n");
+		return -ENOMEM;
+	}
+
+	/*init debugfs*/
+	mhi_sm_debugfs_init();
+	mhi_sm_ctx->mhi_sm_wq = create_singlethread_workqueue("mhi_sm_wq");
+	if (!mhi_sm_ctx->mhi_sm_wq) {
+		MHI_SM_ERR("Failed to create singlethread_workqueue: sm_wq\n");
+		res = -ENOMEM;
+		goto fail_init_wq;
+	}
+
+	mutex_init(&mhi_sm_ctx->mhi_state_lock);
+	mhi_sm_ctx->mhi_dev = mhi_dev;
+	mhi_sm_ctx->mhi_state = MHI_DEV_RESET_STATE;
+	mhi_sm_ctx->syserr_occurred = false;
+	atomic_set(&mhi_sm_ctx->pending_device_events, 0);
+	atomic_set(&mhi_sm_ctx->pending_pcie_events, 0);
+
+	link_state = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle);
+	if (link_state == EP_PCIE_LINK_ENABLED)
+		mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+	else
+		mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_LINK_DISABLE;
+
+	MHI_SM_FUNC_EXIT();
+	return 0;
+
+fail_init_wq:
+	mhi_sm_ctx = NULL;
+	mhi_sm_debugfs_destroy();
+	return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_init);
+
+/**
+ * mhi_dev_sm_get_mhi_state() -Get current MHI state.
+ * @state: return param
+ *
+ * Returns the current MHI state of the state machine.
+ *
+ * Return:	0 success
+ *		-EINVAL: invalid param
+ *		-EFAULT: state machine isn't initialized
+ */
+int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state)
+{
+	MHI_SM_FUNC_ENTRY();
+
+	if (!state) {
+		MHI_SM_ERR("Fail: Null argument\n");
+		return -EINVAL;
+	}
+	if (!mhi_sm_ctx) {
+		MHI_SM_ERR("Fail: MHI SM is not initialized\n");
+		return -EFAULT;
+	}
+	*state = mhi_sm_ctx->mhi_state;
+	MHI_SM_DBG("state machine states are: %s and %s\n",
+		mhi_sm_mstate_str(*state),
+		mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+	MHI_SM_FUNC_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(mhi_dev_sm_get_mhi_state);
+
+/**
+ * mhi_dev_sm_set_ready() -Set MHI state to ready.
+ *
+ * Set MHISTATUS register in mmio to READY.
+ * Synchronic function.
+ *
+ * Return:	0: success
+ *		EINVAL: mhi state manager is not initialized
+ *		EPERM: Operation not permitted as EP PCIE link is desable.
+ *		EFAULT: MHI state is not RESET
+ *		negative: other failure
+ */
+int mhi_dev_sm_set_ready(void)
+{
+	int res;
+	int is_ready;
+	enum mhi_dev_state state;
+
+	MHI_SM_FUNC_ENTRY();
+
+	if (!mhi_sm_ctx) {
+		MHI_SM_ERR("Failed, MHI SM isn't initialized\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+	if (mhi_sm_ctx->mhi_state != MHI_DEV_RESET_STATE) {
+		MHI_SM_ERR("Can not switch to READY state from %s state\n",
+			mhi_sm_mstate_str(mhi_sm_ctx->mhi_state));
+		res = -EFAULT;
+		goto unlock_and_exit;
+	}
+
+	if (mhi_sm_ctx->d_state != MHI_SM_EP_PCIE_D0_STATE) {
+		if (ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle) ==
+		    EP_PCIE_LINK_ENABLED) {
+			mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+		} else {
+			MHI_SM_ERR("ERROR: ep-pcie link is not enabled\n");
+			res = -EPERM;
+			goto unlock_and_exit;
+		}
+	}
+
+	/* verify that MHISTATUS is configured to RESET*/
+	mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev,
+		MHISTATUS, MHISTATUS_MHISTATE_MASK,
+		MHISTATUS_MHISTATE_SHIFT, &state);
+
+	mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev, MHISTATUS,
+		MHISTATUS_READY_MASK,
+		MHISTATUS_READY_SHIFT, &is_ready);
+
+	if (state != MHI_DEV_RESET_STATE || is_ready) {
+		MHI_SM_ERR("Cannot switch to READY, MHI is not in RESET state");
+		MHI_SM_ERR("-MHISTATE: %s, READY bit: 0x%x\n",
+			mhi_sm_mstate_str(state), is_ready);
+		res = -EFAULT;
+		goto unlock_and_exit;
+	}
+	mhi_sm_mmio_set_mhistatus(MHI_DEV_READY_STATE);
+
+unlock_and_exit:
+	mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+	MHI_SM_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_set_ready);
+
+/**
+ * mhi_dev_notify_sm_event() - MHI-core notify SM on trigger occurred
+ * @event - enum of the requierd operation.
+ *
+ * Asynchronic function.
+ * No trigger is sent after operation is done.
+ *
+ * Return:	0: success
+ *		-EFAULT: SM isn't initialized or event isn't supported
+ *		-ENOMEM: allocating memory error
+ *		-EINVAL: invalied event
+ */
+int mhi_dev_notify_sm_event(enum mhi_dev_event event)
+{
+	struct mhi_sm_device_event *state_change_event;
+	int res;
+
+	MHI_SM_FUNC_ENTRY();
+
+	if (!mhi_sm_ctx) {
+		MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+		return -EFAULT;
+	}
+
+	MHI_SM_DBG("received: %s\n",
+		mhi_sm_dev_event_str(event));
+
+	switch (event) {
+	case MHI_DEV_EVENT_M0_STATE:
+		mhi_sm_ctx->stats.m0_event_cnt++;
+		break;
+	case MHI_DEV_EVENT_M3_STATE:
+		mhi_sm_ctx->stats.m3_event_cnt++;
+		break;
+	case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+		mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt++;
+		break;
+	case MHI_DEV_EVENT_CORE_WAKEUP:
+		mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt++;
+		break;
+	case MHI_DEV_EVENT_CTRL_TRIG:
+	case MHI_DEV_EVENT_M1_STATE:
+	case MHI_DEV_EVENT_M2_STATE:
+		MHI_SM_ERR("Not supported event: %s\n",
+			mhi_sm_dev_event_str(event));
+		res  = -EFAULT;
+		goto exit;
+	default:
+		MHI_SM_ERR("Invalid event, received: 0x%x event\n", event);
+		res =  -EINVAL;
+		goto exit;
+	}
+
+	/*init work and push to queue*/
+	state_change_event = kzalloc(sizeof(*state_change_event), GFP_ATOMIC);
+	if (!state_change_event) {
+		MHI_SM_ERR("kzalloc error\n");
+		res = -ENOMEM;
+		goto exit;
+	}
+
+	state_change_event->event = event;
+	INIT_WORK(&state_change_event->work, mhi_sm_dev_event_manager);
+	atomic_inc(&mhi_sm_ctx->pending_device_events);
+	queue_work(mhi_sm_ctx->mhi_sm_wq, &state_change_event->work);
+	res = 0;
+
+exit:
+	MHI_SM_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(mhi_dev_notify_sm_event);
+
+/**
+ * mhi_dev_sm_pcie_handler() - handler of ep_pcie events
+ * @notify - pointer to structure contains the ep_pcie event
+ *
+ * Callback function, called by ep_pcie driver to notify on pcie state change
+ * Asynchronic function
+ */
+void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify)
+{
+	struct mhi_sm_ep_pcie_event *dstate_change_evt;
+	enum ep_pcie_event event;
+
+	MHI_SM_FUNC_ENTRY();
+
+	if (!notify) {
+		MHI_SM_ERR("Null argument - notify\n");
+		return;
+	}
+
+	if (!mhi_sm_ctx) {
+		MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+		return;
+	}
+
+	event = notify->event;
+	MHI_SM_DBG("received: %s\n",
+		mhi_sm_pcie_event_str(event));
+
+	dstate_change_evt = kzalloc(sizeof(*dstate_change_evt), GFP_ATOMIC);
+	if (!dstate_change_evt) {
+		MHI_SM_ERR("kzalloc error\n");
+		goto exit;
+	}
+
+	switch (event) {
+	case EP_PCIE_EVENT_LINKUP:
+		mhi_sm_ctx->stats.linkup_event_cnt++;
+		break;
+	case EP_PCIE_EVENT_PM_D3_COLD:
+		mhi_sm_ctx->stats.d3_cold_event_cnt++;
+		break;
+	case EP_PCIE_EVENT_PM_D3_HOT:
+		mhi_sm_ctx->stats.d3_hot_event_cnt++;
+		mhi_dev_backup_mmio(mhi_sm_ctx->mhi_dev);
+		break;
+	case EP_PCIE_EVENT_PM_RST_DEAST:
+		mhi_sm_ctx->stats.rst_deast_event_cnt++;
+		break;
+	case EP_PCIE_EVENT_PM_D0:
+		mhi_sm_ctx->stats.d0_event_cnt++;
+		break;
+	case EP_PCIE_EVENT_LINKDOWN:
+		mhi_sm_ctx->stats.linkdown_event_cnt++;
+		mhi_sm_ctx->syserr_occurred = true;
+		MHI_SM_ERR("got %s, ERROR occurred\n",
+			mhi_sm_pcie_event_str(event));
+		break;
+	case EP_PCIE_EVENT_MHI_A7:
+		ep_pcie_mask_irq_event(mhi_sm_ctx->mhi_dev->phandle,
+				EP_PCIE_INT_EVT_MHI_A7, false);
+		mhi_dev_notify_a7_event(mhi_sm_ctx->mhi_dev);
+		goto exit;
+	default:
+		MHI_SM_ERR("Invalid ep_pcie event, received 0x%x event\n",
+			event);
+		kfree(dstate_change_evt);
+		goto exit;
+	}
+
+	dstate_change_evt->event = event;
+	INIT_WORK(&dstate_change_evt->work, mhi_sm_pcie_event_manager);
+	queue_work(mhi_sm_ctx->mhi_sm_wq, &dstate_change_evt->work);
+	atomic_inc(&mhi_sm_ctx->pending_pcie_events);
+
+exit:
+	MHI_SM_FUNC_EXIT();
+}
+EXPORT_SYMBOL(mhi_dev_sm_pcie_handler);
+
+/**
+ * mhi_dev_sm_syserr() - switch to system error state.
+ *
+ * Called on system error condition.
+ * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device.
+ * Synchronic function.
+ *
+ * Return:	0: success
+ *		negative: failure
+ */
+int mhi_dev_sm_syserr(void)
+{
+	int res;
+
+	MHI_SM_FUNC_ENTRY();
+
+	if (!mhi_sm_ctx) {
+		MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+	res = mhi_sm_handle_syserr();
+	if (res)
+		MHI_SM_ERR("mhi_sm_handle_syserr failed %d\n", res);
+	mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+
+	MHI_SM_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_syserr);
+
+static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+
+	if (!mhi_sm_ctx) {
+		nbytes = scnprintf(dbg_buff, MHI_SM_MAX_MSG_LEN,
+				"Not initialized\n");
+	} else {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"*************** MHI State machine status ***************\n");
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"D state: %s\n",
+			mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"M state: %s\n",
+			mhi_sm_mstate_str(mhi_sm_ctx->mhi_state));
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"pending device events: %d\n",
+			atomic_read(&mhi_sm_ctx->pending_device_events));
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"pending pcie events: %d\n",
+			atomic_read(&mhi_sm_ctx->pending_pcie_events));
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"*************** Statistics ***************\n");
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"M0 events: %d\n", mhi_sm_ctx->stats.m0_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"M3 events: %d\n", mhi_sm_ctx->stats.m3_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"HW_ACC wakeup events: %d\n",
+			mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"CORE wakeup events: %d\n",
+			mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"Linkup events: %d\n",
+			mhi_sm_ctx->stats.linkup_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"De-assert PERST events: %d\n",
+			mhi_sm_ctx->stats.rst_deast_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"D0 events: %d\n",
+			mhi_sm_ctx->stats.d0_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"D3_HOT events: %d\n",
+			mhi_sm_ctx->stats.d3_hot_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"D3_COLD events:%d\n",
+			mhi_sm_ctx->stats.d3_cold_event_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			MHI_SM_MAX_MSG_LEN - nbytes,
+			"Linkdown events: %d\n",
+			mhi_sm_ctx->stats.linkdown_event_cnt);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t mhi_sm_debugfs_write(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	unsigned long missing;
+	s8 in_num = 0;
+
+	if (!mhi_sm_ctx) {
+		MHI_SM_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &in_num))
+		return -EFAULT;
+
+	switch (in_num) {
+	case 0:
+		if (atomic_read(&mhi_sm_ctx->pending_device_events) ||
+			atomic_read(&mhi_sm_ctx->pending_pcie_events))
+			MHI_SM_DBG("Note, there are pending events in sm_wq\n");
+
+		memset(&mhi_sm_ctx->stats, 0, sizeof(struct mhi_sm_stats));
+		break;
+	default:
+		MHI_SM_ERR("invalid argument: To reset statistics echo 0\n");
+		break;
+	}
+
+	return count;
+}
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.h b/drivers/platform/msm/mhi_dev/mhi_sm.h
new file mode 100644
index 0000000..d477880
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MHI_SM_H
+#define MHI_SM_H
+
+#include "mhi.h"
+#include <linux/slab.h>
+#include <linux/msm_ep_pcie.h>
+
+
+/**
+ * enum mhi_dev_event - MHI state change events
+ * @MHI_DEV_EVENT_CTRL_TRIG: CTRL register change event.
+ *				Not supported,for future use
+ * @MHI_DEV_EVENT_M0_STATE: M0 state change event
+ * @MHI_DEV_EVENT_M1_STATE: M1 state change event. Not supported, for future use
+ * @MHI_DEV_EVENT_M2_STATE: M2 state change event. Not supported, for future use
+ * @MHI_DEV_EVENT_M3_STATE: M0 state change event
+ * @MHI_DEV_EVENT_HW_ACC_WAKEUP: pendding data on IPA, initiate Host wakeup
+ * @MHI_DEV_EVENT_CORE_WAKEUP: MHI core initiate Host wakup
+ */
+enum mhi_dev_event {
+	MHI_DEV_EVENT_CTRL_TRIG,
+	MHI_DEV_EVENT_M0_STATE,
+	MHI_DEV_EVENT_M1_STATE,
+	MHI_DEV_EVENT_M2_STATE,
+	MHI_DEV_EVENT_M3_STATE,
+	MHI_DEV_EVENT_HW_ACC_WAKEUP,
+	MHI_DEV_EVENT_CORE_WAKEUP,
+	MHI_DEV_EVENT_MAX
+};
+
+int mhi_dev_sm_init(struct mhi_dev *dev);
+int mhi_dev_sm_set_ready(void);
+int mhi_dev_notify_sm_event(enum mhi_dev_event event);
+int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state);
+int mhi_dev_sm_syserr(void);
+void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify);
+
+#endif /* MHI_SM_H */
+
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
new file mode 100644
index 0000000..3279fa8
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -0,0 +1,835 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/ipc_logging.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ipa.h>
+#include <linux/ipa.h>
+#include <uapi/linux/mhi.h>
+#include "mhi.h"
+
+#define MHI_DEV_NODE_NAME_LEN		13
+#define MHI_MAX_NR_OF_CLIENTS		23
+#define MHI_SOFTWARE_CLIENT_START	0
+#define MHI_SOFTWARE_CLIENT_LIMIT	(MHI_MAX_SOFTWARE_CHANNELS/2)
+#define MHI_UCI_IPC_LOG_PAGES		(100)
+
+#define MAX_NR_TRBS_PER_CHAN		1
+#define MHI_QTI_IFACE_ID		4
+#define DEVICE_NAME "mhi"
+
+enum uci_dbg_level {
+	UCI_DBG_VERBOSE = 0x0,
+	UCI_DBG_INFO = 0x1,
+	UCI_DBG_DBG = 0x2,
+	UCI_DBG_WARNING = 0x3,
+	UCI_DBG_ERROR = 0x4,
+	UCI_DBG_CRITICAL = 0x5,
+	UCI_DBG_reserved = 0x80000000
+};
+
+static enum uci_dbg_level mhi_uci_msg_lvl = UCI_DBG_CRITICAL;
+static enum uci_dbg_level mhi_uci_ipc_log_lvl = UCI_DBG_INFO;
+static void *mhi_uci_ipc_log;
+
+
+enum mhi_chan_dir {
+	MHI_DIR_INVALID = 0x0,
+	MHI_DIR_OUT = 0x1,
+	MHI_DIR_IN = 0x2,
+	MHI_DIR__reserved = 0x80000000
+};
+
+struct chan_attr {
+	/* SW maintained channel id */
+	enum mhi_client_channel chan_id;
+	/* maximum buffer size for this channel */
+	size_t max_packet_size;
+	/* number of buffers supported in this channel */
+	u32 nr_trbs;
+	/* direction of the channel, see enum mhi_chan_dir */
+	enum mhi_chan_dir dir;
+	u32 uci_ownership;
+};
+
+struct uci_client {
+	u32 client_index;
+	/* write channel - always odd*/
+	u32 out_chan;
+	/* read channel - always even */
+	u32 in_chan;
+	struct mhi_dev_client *out_handle;
+	struct mhi_dev_client *in_handle;
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	atomic_t read_data_ready;
+	struct device *dev;
+	atomic_t ref_count;
+	int mhi_status;
+	void *pkt_loc;
+	size_t pkt_size;
+	struct mhi_dev_iov *in_buf_list;
+	atomic_t write_data_ready;
+	atomic_t mhi_chans_open;
+	struct mhi_uci_ctxt_t *uci_ctxt;
+	struct mutex in_chan_lock;
+	struct mutex out_chan_lock;
+};
+
+struct mhi_uci_ctxt_t {
+	struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS];
+	struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
+	void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
+	dev_t start_ctrl_nr;
+	struct cdev cdev[MHI_MAX_SOFTWARE_CHANNELS];
+	struct class *mhi_uci_class;
+	atomic_t mhi_disabled;
+	atomic_t mhi_enable_notif_wq_active;
+};
+
+#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
+
+#define uci_log(_msg_lvl, _msg, ...) do { \
+	if (_msg_lvl >= mhi_uci_msg_lvl) { \
+		pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
+	} \
+	if (mhi_uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \
+		ipc_log_string(mhi_uci_ipc_log,                     \
+			"[%s] " _msg, __func__, ##__VA_ARGS__);     \
+	} \
+} while (0)
+
+
+module_param(mhi_uci_msg_lvl, uint, 0644);
+MODULE_PARM_DESC(mhi_uci_msg_lvl, "uci dbg lvl");
+
+module_param(mhi_uci_ipc_log_lvl, uint, 0644);
+MODULE_PARM_DESC(mhi_uci_ipc_log_lvl, "ipc dbg lvl");
+
+static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
+		size_t count, loff_t *offp);
+static ssize_t mhi_uci_client_write(struct file *file,
+		const char __user *buf, size_t count, loff_t *offp);
+static int mhi_uci_client_open(struct inode *mhi_inode, struct file*);
+static int mhi_uci_client_release(struct inode *mhi_inode,
+		struct file *file_handle);
+static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait);
+static struct mhi_uci_ctxt_t uci_ctxt;
+
+static int mhi_init_read_chan(struct uci_client *client_handle,
+		enum mhi_client_channel chan)
+{
+	int rc = 0;
+	u32 i, j;
+	struct chan_attr *chan_attributes;
+	size_t buf_size;
+	void *data_loc;
+
+	if (client_handle == NULL) {
+		uci_log(UCI_DBG_ERROR, "Bad Input data, quitting\n");
+		return -EINVAL;
+	}
+	if (chan >= MHI_MAX_SOFTWARE_CHANNELS) {
+		uci_log(UCI_DBG_ERROR, "Incorrect channel number %d\n", chan);
+		return -EINVAL;
+	}
+
+	chan_attributes = &uci_ctxt.chan_attrib[chan];
+	buf_size = chan_attributes->max_packet_size;
+
+	for (i = 0; i < (chan_attributes->nr_trbs); i++) {
+		data_loc = kmalloc(buf_size, GFP_KERNEL);
+		if (!data_loc) {
+			rc = -ENOMEM;
+			goto free_memory;
+		}
+		client_handle->in_buf_list[i].addr = data_loc;
+		client_handle->in_buf_list[i].buf_size = buf_size;
+	}
+
+	return rc;
+
+free_memory:
+	for (j = 0; j < i; j++)
+		kfree(client_handle->in_buf_list[j].addr);
+
+	return rc;
+}
+
+static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, void *buf,
+		u32 size, u32 is_uspace_buf)
+{
+	void *data_loc = NULL;
+	uintptr_t memcpy_result = 0;
+	u32 data_inserted_so_far = 0;
+	struct uci_client *uci_handle;
+
+	uci_handle = container_of(client_handle, struct uci_client,
+					out_handle);
+
+	if (!client_handle || !buf ||
+		!size || !uci_handle)
+		return -EINVAL;
+
+	if (is_uspace_buf) {
+		data_loc = kmalloc(size, GFP_KERNEL);
+		if (!data_loc) {
+			uci_log(UCI_DBG_ERROR,
+				"Failed to allocate memory 0x%x\n",
+				size);
+			return -ENOMEM;
+		}
+		memcpy_result = copy_from_user(data_loc, buf, size);
+		if (memcpy_result)
+			goto error_memcpy;
+	} else {
+		data_loc = buf;
+	}
+
+	data_inserted_so_far = mhi_dev_write_channel(*client_handle, data_loc,
+							size);
+
+error_memcpy:
+	kfree(data_loc);
+	return data_inserted_so_far;
+}
+
+static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask = 0;
+	struct uci_client *uci_handle;
+
+	uci_handle = file->private_data;
+
+	if (!uci_handle)
+		return -ENODEV;
+
+	poll_wait(file, &uci_handle->read_wq, wait);
+	poll_wait(file, &uci_handle->write_wq, wait);
+	if (!atomic_read(&uci_ctxt.mhi_disabled) &&
+		!mhi_dev_channel_isempty(uci_handle->in_handle)) {
+		uci_log(UCI_DBG_VERBOSE,
+		"Client can read chan %d\n", uci_handle->in_chan);
+		mask |= POLLIN | POLLRDNORM;
+	}
+	if (!atomic_read(&uci_ctxt.mhi_disabled) &&
+		!mhi_dev_channel_isempty(uci_handle->out_handle)) {
+		uci_log(UCI_DBG_VERBOSE,
+		"Client can write chan %d\n", uci_handle->out_chan);
+		mask |= POLLOUT | POLLWRNORM;
+	}
+
+	uci_log(UCI_DBG_VERBOSE,
+		"Client attempted to poll chan %d, returning mask 0x%x\n",
+		uci_handle->in_chan, mask);
+	return mask;
+}
+
+static int open_client_mhi_channels(struct uci_client *uci_client)
+{
+	int rc = 0;
+
+	uci_log(UCI_DBG_DBG,
+			"Starting channels %d %d.\n",
+			uci_client->out_chan,
+			uci_client->in_chan);
+	mutex_lock(&uci_client->out_chan_lock);
+	mutex_lock(&uci_client->in_chan_lock);
+	uci_log(UCI_DBG_DBG,
+			"Initializing inbound chan %d.\n",
+			uci_client->in_chan);
+
+	rc = mhi_init_read_chan(uci_client, uci_client->in_chan);
+	if (rc < 0) {
+		uci_log(UCI_DBG_ERROR,
+			"Failed to init inbound 0x%x, ret 0x%x\n",
+			uci_client->in_chan, rc);
+	}
+
+	rc = mhi_dev_open_channel(uci_client->out_chan,
+			&uci_client->out_handle,
+			uci_ctxt.event_notifier);
+	if (rc < 0)
+		goto handle_not_rdy_err;
+
+	rc = mhi_dev_open_channel(uci_client->in_chan,
+			&uci_client->in_handle,
+			uci_ctxt.event_notifier);
+
+	if (rc < 0) {
+		uci_log(UCI_DBG_ERROR,
+			"Failed to open chan %d, ret 0x%x\n",
+			uci_client->out_chan, rc);
+		goto handle_in_err;
+	}
+	atomic_set(&uci_client->mhi_chans_open, 1);
+	mutex_unlock(&uci_client->in_chan_lock);
+	mutex_unlock(&uci_client->out_chan_lock);
+
+	return 0;
+
+handle_in_err:
+	mhi_dev_close_channel(uci_client->out_handle);
+handle_not_rdy_err:
+	mutex_unlock(&uci_client->in_chan_lock);
+	mutex_unlock(&uci_client->out_chan_lock);
+	return rc;
+}
+
+static int mhi_uci_client_open(struct inode *mhi_inode,
+				struct file *file_handle)
+{
+	struct uci_client *uci_handle;
+	int rc = 0;
+
+	uci_handle =
+		&uci_ctxt.client_handles[iminor(mhi_inode)];
+
+	uci_log(UCI_DBG_DBG,
+		"Client opened struct device node 0x%x, ref count 0x%x\n",
+		iminor(mhi_inode), atomic_read(&uci_handle->ref_count));
+	if (atomic_add_return(1, &uci_handle->ref_count) == 1) {
+		if (!uci_handle) {
+			atomic_dec(&uci_handle->ref_count);
+			return -ENOMEM;
+		}
+		uci_handle->uci_ctxt = &uci_ctxt;
+		if (!atomic_read(&uci_handle->mhi_chans_open)) {
+			uci_log(UCI_DBG_INFO,
+				"Opening channels client %d\n",
+				iminor(mhi_inode));
+			rc = open_client_mhi_channels(uci_handle);
+			if (rc) {
+				uci_log(UCI_DBG_INFO,
+					"Failed to open channels ret %d\n", rc);
+				return rc;
+			}
+		}
+	}
+	file_handle->private_data = uci_handle;
+
+	return 0;
+
+}
+
+static int mhi_uci_client_release(struct inode *mhi_inode,
+		struct file *file_handle)
+{
+	struct uci_client *uci_handle = file_handle->private_data;
+	struct mhi_uci_ctxt_t *uci_ctxt = uci_handle->uci_ctxt;
+	u32 nr_in_bufs = 0;
+	int rc = 0;
+	int in_chan = 0;
+	u32 buf_size = 0;
+
+	in_chan = iminor(mhi_inode) + 1;
+	nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs;
+	buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size;
+
+	if (!uci_handle)
+		return -EINVAL;
+	if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
+		uci_log(UCI_DBG_DBG,
+				"Last client left, closing channel 0x%x\n",
+				iminor(mhi_inode));
+		if (atomic_read(&uci_handle->mhi_chans_open)) {
+			atomic_set(&uci_handle->mhi_chans_open, 0);
+
+			mutex_lock(&uci_handle->out_chan_lock);
+			rc = mhi_dev_close_channel(uci_handle->out_handle);
+			wake_up(&uci_handle->write_wq);
+			mutex_unlock(&uci_handle->out_chan_lock);
+
+			mutex_lock(&uci_handle->in_chan_lock);
+			rc = mhi_dev_close_channel(uci_handle->in_handle);
+			wake_up(&uci_handle->read_wq);
+			mutex_unlock(&uci_handle->in_chan_lock);
+
+		}
+		atomic_set(&uci_handle->read_data_ready, 0);
+		atomic_set(&uci_handle->write_data_ready, 0);
+		file_handle->private_data = NULL;
+	} else {
+		uci_log(UCI_DBG_DBG,
+			"Client close chan %d, ref count 0x%x\n",
+			iminor(mhi_inode),
+			atomic_read(&uci_handle->ref_count));
+	}
+	return rc;
+}
+
+static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
+		size_t uspace_buf_size, loff_t *bytes_pending)
+{
+	struct uci_client *uci_handle = NULL;
+	struct mhi_dev_client *client_handle = NULL;
+	int bytes_avail = 0;
+	int ret_val = 0;
+	struct mutex *mutex;
+	u32 chan = 0;
+	ssize_t bytes_copied = 0;
+	u32 addr_offset = 0;
+	uint32_t buf_size;
+	uint32_t chained = 0;
+	void *local_buf = NULL;
+
+	if (!file || !buf || !uspace_buf_size ||
+			!file->private_data)
+		return -EINVAL;
+
+	uci_handle = file->private_data;
+	client_handle = uci_handle->in_handle;
+	mutex = &uci_handle->in_chan_lock;
+	chan = uci_handle->in_chan;
+
+	mutex_lock(mutex);
+
+	local_buf = uci_handle->in_buf_list[0].addr;
+	buf_size = uci_handle->in_buf_list[0].buf_size;
+
+
+	uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n", chan);
+	do {
+		if (!uci_handle->pkt_loc &&
+				!atomic_read(&uci_ctxt.mhi_disabled)) {
+
+			bytes_avail = mhi_dev_read_channel(client_handle,
+					local_buf, buf_size, &chained);
+
+			uci_log(UCI_DBG_VERBOSE,
+				"reading from mhi_core local_buf = %p,buf_size = 0x%x bytes_read = 0x%x\n",
+				local_buf, buf_size, bytes_avail);
+
+			if (bytes_avail < 0) {
+				uci_log(UCI_DBG_ERROR,
+				"Failed to read channel ret %d\n",
+					bytes_avail);
+				ret_val =  -EIO;
+				goto error;
+			}
+
+			if (bytes_avail > 0) {
+				uci_handle->pkt_loc = (void *)local_buf;
+				uci_handle->pkt_size = bytes_avail;
+
+				*bytes_pending = (loff_t)uci_handle->pkt_size;
+				uci_log(UCI_DBG_VERBOSE,
+					"Got pkt of size 0x%x at addr %p, chan %d\n",
+					uci_handle->pkt_size, local_buf, chan);
+			} else {
+				uci_handle->pkt_loc = 0;
+				uci_handle->pkt_size = 0;
+			}
+		}
+		if (bytes_avail == 0) {
+
+			/* If nothing was copied yet, wait for data */
+			uci_log(UCI_DBG_VERBOSE,
+				"No data read_data_ready %d, chan %d\n",
+				atomic_read(&uci_handle->read_data_ready),
+				chan);
+
+			ret_val = wait_event_interruptible(uci_handle->read_wq,
+				(!mhi_dev_channel_isempty(client_handle)));
+
+			if (ret_val == -ERESTARTSYS) {
+				uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
+				goto error;
+			}
+			uci_log(UCI_DBG_VERBOSE,
+				"Thread woke up. Got data on chan %d read_data_ready %d\n",
+				chan,
+				atomic_read(&uci_handle->read_data_ready));
+
+			/* A valid packet was returned from MHI */
+		} else if (bytes_avail > 0) {
+			uci_log(UCI_DBG_VERBOSE,
+				"Got packet: avail pkts %d phy_adr %p, chan %d\n",
+				atomic_read(&uci_handle->read_data_ready),
+				local_buf,
+				chan);
+			break;
+			/*
+			 * MHI did not return a valid packet, but we have one
+			 * which we did not finish returning to user
+			 */
+		} else {
+			uci_log(UCI_DBG_CRITICAL,
+				"chan %d err: avail pkts %d phy_adr %p",
+				chan,
+				atomic_read(&uci_handle->read_data_ready),
+				local_buf);
+			return -EIO;
+		}
+	} while (!uci_handle->pkt_loc);
+
+	if (uspace_buf_size >= *bytes_pending) {
+		addr_offset = uci_handle->pkt_size - *bytes_pending;
+		if (copy_to_user(buf, uci_handle->pkt_loc + addr_offset,
+							*bytes_pending)) {
+			ret_val = -EIO;
+			goto error;
+		}
+
+		bytes_copied = *bytes_pending;
+		*bytes_pending = 0;
+		uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x, chan %d\n",
+				bytes_copied, (u32)*bytes_pending, chan);
+	} else {
+		addr_offset = uci_handle->pkt_size - *bytes_pending;
+		if (copy_to_user(buf, (void *) (uintptr_t)uci_handle->pkt_loc +
+					addr_offset, uspace_buf_size)) {
+			ret_val = -EIO;
+			goto error;
+		}
+		bytes_copied = uspace_buf_size;
+		*bytes_pending -= uspace_buf_size;
+		uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x,chan %d\n",
+				bytes_copied,
+				(u32)*bytes_pending,
+				chan);
+	}
+	/* We finished with this buffer, map it back */
+	if (*bytes_pending == 0) {
+		uci_log(UCI_DBG_VERBOSE,
+				"All data consumed. Pkt loc %p ,chan %d\n",
+				uci_handle->pkt_loc, chan);
+		uci_handle->pkt_loc = 0;
+		uci_handle->pkt_size = 0;
+	}
+	uci_log(UCI_DBG_VERBOSE,
+			"Returning 0x%x bytes, 0x%x bytes left\n",
+			bytes_copied, (u32)*bytes_pending);
+	mutex_unlock(mutex);
+	return bytes_copied;
+error:
+	mutex_unlock(mutex);
+	uci_log(UCI_DBG_ERROR, "Returning %d\n", ret_val);
+	return ret_val;
+}
+
+static ssize_t mhi_uci_client_write(struct file *file,
+		const char __user *buf,
+		size_t count, loff_t *offp)
+{
+	struct uci_client *uci_handle = NULL;
+	int ret_val = 0;
+	u32 chan = 0xFFFFFFFF;
+
+	if (file == NULL || buf == NULL ||
+			!count || file->private_data == NULL)
+		return -EINVAL;
+
+	uci_handle = file->private_data;
+
+	if (atomic_read(&uci_ctxt.mhi_disabled)) {
+		uci_log(UCI_DBG_ERROR,
+			"Client %d attempted to write while MHI is disabled\n",
+			uci_handle->out_chan);
+		return -EIO;
+	}
+	chan = uci_handle->out_chan;
+	mutex_lock(&uci_handle->out_chan_lock);
+	while (!ret_val) {
+		ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
+				(void *)buf, count, 1);
+		if (ret_val < 0) {
+			uci_log(UCI_DBG_ERROR,
+				"Error while writing data to MHI, chan %d, buf %p, size %d\n",
+				chan, (void *)buf, count);
+			ret_val = -EIO;
+			break;
+		}
+		if (!ret_val) {
+			uci_log(UCI_DBG_VERBOSE,
+				"No descriptors available, did we poll, chan %d?\n",
+				chan);
+			mutex_unlock(&uci_handle->out_chan_lock);
+			ret_val = wait_event_interruptible(uci_handle->write_wq,
+				!mhi_dev_channel_isempty(
+					uci_handle->out_handle));
+
+			mutex_lock(&uci_handle->out_chan_lock);
+			if (-ERESTARTSYS == ret_val) {
+				uci_log(UCI_DBG_WARNING,
+					    "Waitqueue cancelled by system\n");
+				break;
+			}
+		}
+	}
+	mutex_unlock(&uci_handle->out_chan_lock);
+	return ret_val;
+}
+
+static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
+{
+	u32 i = 0;
+	u32 data_size = TRB_MAX_DATA_SIZE;
+	u32 index = 0;
+	struct uci_client *client;
+	struct chan_attr *chan_attrib = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(uci_ctxt->chan_attrib); i++) {
+		chan_attrib = &uci_ctxt->chan_attrib[i];
+		switch (i) {
+		case MHI_CLIENT_LOOPBACK_OUT:
+		case MHI_CLIENT_LOOPBACK_IN:
+		case MHI_CLIENT_SAHARA_OUT:
+		case MHI_CLIENT_SAHARA_IN:
+		case MHI_CLIENT_EFS_OUT:
+		case MHI_CLIENT_EFS_IN:
+		case MHI_CLIENT_QMI_OUT:
+		case MHI_CLIENT_QMI_IN:
+		case MHI_CLIENT_IP_CTRL_0_OUT:
+		case MHI_CLIENT_IP_CTRL_0_IN:
+		case MHI_CLIENT_IP_CTRL_1_OUT:
+		case MHI_CLIENT_IP_CTRL_1_IN:
+		case MHI_CLIENT_DUN_OUT:
+		case MHI_CLIENT_DUN_IN:
+			chan_attrib->uci_ownership = 1;
+			break;
+		default:
+			chan_attrib->uci_ownership = 0;
+			break;
+		}
+		if (chan_attrib->uci_ownership) {
+			chan_attrib->chan_id = i;
+			chan_attrib->max_packet_size = data_size;
+			index = CHAN_TO_CLIENT(i);
+			client = &uci_ctxt->client_handles[index];
+			chan_attrib->nr_trbs = 9;
+			client->in_buf_list =
+			      kmalloc(sizeof(struct mhi_dev_iov) *
+					      chan_attrib->nr_trbs,
+					GFP_KERNEL);
+			if (client->in_buf_list == NULL)
+				return -ENOMEM;
+		}
+		if (i % 2 == 0)
+			chan_attrib->dir = MHI_DIR_OUT;
+		else
+			chan_attrib->dir = MHI_DIR_IN;
+	}
+	return 0;
+}
+
+
+static void uci_event_notifier(struct mhi_dev_client_cb_reason *reason)
+{
+	int client_index = 0;
+	struct uci_client *uci_handle = NULL;
+
+	if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
+		client_index = reason->ch_id / 2;
+		uci_handle = &uci_ctxt.client_handles[client_index];
+		uci_log(UCI_DBG_DBG,
+			"recived TRE available event for chan %d\n",
+					uci_handle->in_chan);
+
+		if (reason->ch_id % 2) {
+			atomic_set(&uci_handle->write_data_ready, 1);
+			wake_up(&uci_handle->write_wq);
+		} else {
+			atomic_set(&uci_handle->read_data_ready, 1);
+			wake_up(&uci_handle->read_wq);
+		}
+	}
+}
+
+static int mhi_register_client(struct uci_client *mhi_client, int index)
+{
+	init_waitqueue_head(&mhi_client->read_wq);
+	init_waitqueue_head(&mhi_client->write_wq);
+	mhi_client->out_chan = index * 2 + 1;
+	mhi_client->in_chan = index * 2;
+	mhi_client->client_index = index;
+
+	mutex_init(&mhi_client->in_chan_lock);
+	mutex_init(&mhi_client->out_chan_lock);
+
+	uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan);
+	return 0;
+}
+
+static long mhi_uci_client_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	struct uci_client *uci_handle = NULL;
+	int rc = 0;
+	struct ep_info epinfo;
+
+	if (file == NULL || file->private_data == NULL)
+		return -EINVAL;
+
+	uci_handle = file->private_data;
+
+	uci_log(UCI_DBG_DBG, "Received command %d for client:%d\n",
+		cmd, uci_handle->client_index);
+
+	if (cmd == MHI_UCI_EP_LOOKUP) {
+		uci_log(UCI_DBG_DBG, "EP_LOOKUP for client:%d\n",
+						uci_handle->client_index);
+		epinfo.ph_ep_info.ep_type = DATA_EP_TYPE_PCIE;
+		epinfo.ph_ep_info.peripheral_iface_id = MHI_QTI_IFACE_ID;
+		epinfo.ipa_ep_pair.cons_pipe_num =
+			ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD);
+		epinfo.ipa_ep_pair.prod_pipe_num =
+			ipa_get_ep_mapping(IPA_CLIENT_MHI_CONS);
+
+		uci_log(UCI_DBG_DBG, "client:%d ep_type:%d intf:%d\n",
+			uci_handle->client_index,
+			epinfo.ph_ep_info.ep_type,
+			epinfo.ph_ep_info.peripheral_iface_id);
+
+		uci_log(UCI_DBG_DBG, "ipa_cons_idx:%d ipa_prod_idx:%d\n",
+			epinfo.ipa_ep_pair.cons_pipe_num,
+			epinfo.ipa_ep_pair.prod_pipe_num);
+
+		rc = copy_to_user((void __user *)arg, &epinfo,
+			sizeof(epinfo));
+		if (rc)
+			uci_log(UCI_DBG_ERROR, "copying to user space failed");
+	} else {
+		uci_log(UCI_DBG_ERROR, "wrong parameter:%d\n", cmd);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static const struct file_operations mhi_uci_client_fops = {
+	.read = mhi_uci_client_read,
+	.write = mhi_uci_client_write,
+	.open = mhi_uci_client_open,
+	.release = mhi_uci_client_release,
+	.poll = mhi_uci_client_poll,
+	.unlocked_ioctl = mhi_uci_client_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = mhi_uci_client_ioctl,
+#endif
+};
+
+int mhi_uci_init(void)
+{
+	u32 i = 0;
+	int ret_val = 0;
+	struct uci_client *mhi_client = NULL;
+	s32 r = 0;
+
+	mhi_uci_ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES,
+						"mhi-uci", 0);
+	if (mhi_uci_ipc_log == NULL) {
+		uci_log(UCI_DBG_WARNING,
+				"Failed to create IPC logging context\n");
+	}
+	uci_ctxt.event_notifier = uci_event_notifier;
+
+	uci_log(UCI_DBG_DBG, "Setting up channel attributes.\n");
+
+	ret_val = uci_init_client_attributes(&uci_ctxt);
+	if (ret_val < 0) {
+		uci_log(UCI_DBG_ERROR,
+				"Failed to init client attributes\n");
+		return -EIO;
+	}
+
+	uci_log(UCI_DBG_DBG, "Initializing clients\n");
+	uci_log(UCI_DBG_INFO, "Registering for MHI events.\n");
+
+	for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
+		if (uci_ctxt.chan_attrib[i * 2].uci_ownership) {
+			mhi_client = &uci_ctxt.client_handles[i];
+
+			r = mhi_register_client(mhi_client, i);
+
+			if (r) {
+				uci_log(UCI_DBG_CRITICAL,
+					"Failed to reg client %d ret %d\n",
+					r, i);
+			}
+		}
+	}
+	uci_log(UCI_DBG_INFO, "Allocating char devices.\n");
+	r = alloc_chrdev_region(&uci_ctxt.start_ctrl_nr,
+			0, MHI_MAX_SOFTWARE_CHANNELS,
+			DEVICE_NAME);
+
+	if (IS_ERR_VALUE(r)) {
+		uci_log(UCI_DBG_ERROR,
+				"Failed to alloc char devs, ret 0x%x\n", r);
+		goto failed_char_alloc;
+	}
+	uci_log(UCI_DBG_INFO, "Creating class\n");
+	uci_ctxt.mhi_uci_class = class_create(THIS_MODULE,
+						DEVICE_NAME);
+	if (IS_ERR(uci_ctxt.mhi_uci_class)) {
+		uci_log(UCI_DBG_ERROR,
+			"Failed to instantiate class, ret 0x%x\n", r);
+		r = -ENOMEM;
+		goto failed_class_add;
+	}
+
+	uci_log(UCI_DBG_INFO, "Setting up device nodes.\n");
+	for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
+		if (uci_ctxt.chan_attrib[i*2].uci_ownership) {
+			cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops);
+			uci_ctxt.cdev[i].owner = THIS_MODULE;
+			r = cdev_add(&uci_ctxt.cdev[i],
+					uci_ctxt.start_ctrl_nr + i, 1);
+			if (IS_ERR_VALUE(r)) {
+				uci_log(UCI_DBG_ERROR,
+					"Failed to add cdev %d, ret 0x%x\n",
+					i, r);
+				goto failed_char_add;
+			}
+			uci_ctxt.client_handles[i].dev =
+				device_create(uci_ctxt.mhi_uci_class, NULL,
+						uci_ctxt.start_ctrl_nr + i,
+						NULL, DEVICE_NAME "_pipe_%d",
+						i * 2);
+
+			if (IS_ERR(uci_ctxt.client_handles[i].dev)) {
+				uci_log(UCI_DBG_ERROR,
+						"Failed to add cdev %d\n", i);
+				cdev_del(&uci_ctxt.cdev[i]);
+				goto failed_device_create;
+			}
+		}
+	}
+	return 0;
+
+failed_char_add:
+failed_device_create:
+	while (--i >= 0) {
+		cdev_del(&uci_ctxt.cdev[i]);
+		device_destroy(uci_ctxt.mhi_uci_class,
+		MKDEV(MAJOR(uci_ctxt.start_ctrl_nr), i * 2));
+	};
+	class_destroy(uci_ctxt.mhi_uci_class);
+failed_class_add:
+	unregister_chrdev_region(MAJOR(uci_ctxt.start_ctrl_nr),
+			MHI_MAX_SOFTWARE_CHANNELS);
+failed_char_alloc:
+	return r;
+}
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index de108f6..505ba01 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -231,6 +231,7 @@
 	POWER_SUPPLY_ATTR(pe_start),
 	POWER_SUPPLY_ATTR(set_ship_mode),
 	POWER_SUPPLY_ATTR(boost_current),
+	POWER_SUPPLY_ATTR(force_tlim),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 30d2a7f..474f914 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -35,11 +35,11 @@
 	  Level Cache. This provides interfaces to client's that use the LLCC.
 	  Say yes here to enable LLCC slice driver.
 
-config QCOM_MSMSKUNK_LLCC
-	tristate "Qualcomm Technologies, Inc. MSMSKUNK LLCC driver"
+config QCOM_SDM845_LLCC
+	tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
 	depends on QCOM_LLCC
 	help
-	  Say yes here to enable the LLCC driver for MSMSKUNK. This is provides
+	  Say yes here to enable the LLCC driver for SDM845. This is provides
 	  data required to configure LLCC so that clients can start using the
 	  LLCC slices.
 
@@ -302,6 +302,19 @@
 	  allows for G-Link communication with remote subsystems that are
 	  external to the System-on-Chip.
 
+config MSM_SPCOM
+	depends on MSM_GLINK
+	bool "Secure Processor Communication over GLINK"
+	help
+	  spcom driver allows loading Secure Processor Applications and
+	  sending messages to Secure Processor Applications.
+	  spcom provides interface to both user space app and kernel driver.
+	  It is using glink as the transport layer, which provides multiple
+	  logical channels over single physical channel.
+	  The physical layer is based on shared memory and interrupts.
+	  spcom provides clients/server API, although currently only one client
+	  or server is allowed per logical channel.
+
 config TRACER_PKT
 	bool "Tracer Packet"
 	help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index c55ebf1..531685c 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,7 +1,7 @@
 obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
-obj-$(CONFIG_QCOM_MSMSKUNK_LLCC) += llcc-msmskunk.o
+obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
 obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
@@ -29,6 +29,7 @@
 obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER) += glink_loopback_server.o
 obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT) += glink_smem_native_xprt.o
 obj-$(CONFIG_MSM_GLINK_SPI_XPRT) += glink_spi_xprt.o
+obj-$(CONFIG_MSM_SPCOM) += spcom.o
 obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
 obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
 obj-$(CONFIG_QTI_RPMH_API) += rpmh.o
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 59d7f64..7c08c28 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,7 @@
 #include <linux/thread_info.h>
 #include <linux/uaccess.h>
 #include <linux/qpnp/qpnp-adc.h>
+#include <linux/etherdevice.h>
 #include <soc/qcom/memory_dump.h>
 #include <soc/qcom/icnss.h>
 #include <soc/qcom/msm_qmi_interface.h>
@@ -50,12 +51,12 @@
 #include "wlan_firmware_service_v01.h"
 
 #ifdef CONFIG_ICNSS_DEBUG
-unsigned long qmi_timeout = 3000;
+unsigned long qmi_timeout = 10000;
 module_param(qmi_timeout, ulong, 0600);
 
 #define WLFW_TIMEOUT_MS			qmi_timeout
 #else
-#define WLFW_TIMEOUT_MS			3000
+#define WLFW_TIMEOUT_MS			10000
 #endif
 #define WLFW_SERVICE_INS_ID_V01		0
 #define WLFW_CLIENT_ID			0x4b4e454c
@@ -64,126 +65,6 @@
 #define NUM_REG_LOG_PAGES		4
 #define ICNSS_MAGIC			0x5abc5abc
 
-/*
- * Registers: MPM2_PSHOLD
- * Base Address: 0x10AC000
- */
-#define MPM_WCSSAON_CONFIG_OFFSET				0x18
-#define MPM_WCSSAON_CONFIG_ARES_N				BIT(0)
-#define MPM_WCSSAON_CONFIG_WLAN_DISABLE				BIT(1)
-#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD			BIT(6)
-#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL		BIT(7)
-#define MPM_WCSSAON_CONFIG_FORCE_ACTIVE				BIT(14)
-#define MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE			BIT(19)
-#define MPM_WCSSAON_CONFIG_DISCONNECT_CLR			BIT(21)
-#define MPM_WCSSAON_CONFIG_M2W_CLAMP_EN				BIT(22)
-
-/*
- * Registers: WCSS_SR_SHADOW_REGISTERS
- * Base Address: 0x18820000
- */
-#define SR_WCSSAON_SR_LSB_OFFSET				0x22070
-#define SR_WCSSAON_SR_LSB_RETENTION_STATUS			BIT(20)
-
-#define SR_PMM_SR_MSB						0x2206C
-#define SR_PMM_SR_MSB_AHB_CLOCK_MASK				GENMASK(26, 22)
-#define SR_PMM_SR_MSB_XO_CLOCK_MASK				GENMASK(31, 27)
-
-/*
- * Registers: WCSS_HM_A_WCSS_CLK_CTL_WCSS_CC_REG
- * Base Address: 0x189D0000
- */
-#define WCSS_WLAN1_GDSCR_OFFSET					0x1D3004
-#define WCSS_WLAN1_GDSCR_SW_COLLAPSE				BIT(0)
-#define WCSS_WLAN1_GDSCR_HW_CONTROL				BIT(1)
-#define WCSS_WLAN1_GDSCR_PWR_ON					BIT(31)
-
-#define WCSS_RFACTRL_GDSCR_OFFSET				0x1D60C8
-#define WCSS_RFACTRL_GDSCR_SW_COLLAPSE				BIT(0)
-#define WCSS_RFACTRL_GDSCR_HW_CONTROL				BIT(1)
-#define WCSS_RFACTRL_GDSCR_PWR_ON				BIT(31)
-
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET			0x1D1004
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_SW_COLLAPSE			BIT(0)
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_HW_CONTROL			BIT(1)
-#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON			BIT(31)
-
-#define WCSS_CLK_CTL_NOC_CMD_RCGR_OFFSET			0x1D1030
-#define WCSS_CLK_CTL_NOC_CMD_RCGR_UPDATE			BIT(0)
-
-#define WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET			0x1D1034
-#define WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL			GENMASK(10, 8)
-
-#define WCSS_CLK_CTL_REF_CMD_RCGR_OFFSET			0x1D602C
-#define WCSS_CLK_CTL_REF_CMD_RCGR_UPDATE			BIT(0)
-
-#define WCSS_CLK_CTL_REF_CFG_RCGR_OFFSET			0x1D6030
-#define WCSS_CLK_CTL_REF_CFG_RCGR_SRC_SEL			GENMASK(10, 8)
-
-/*
- * Registers: WCSS_HM_A_WIFI_APB_3_A_WCMN_MAC_WCMN_REG
- * Base Address: 0x18AF0000
- */
-#define WCMN_PMM_WLAN1_CFG_REG1_OFFSET				0x2F0804
-#define WCMN_PMM_WLAN1_CFG_REG1_RFIF_ADC_PORDN_N		BIT(9)
-#define WCMN_PMM_WLAN1_CFG_REG1_ADC_DIGITAL_CLAMP		BIT(10)
-
-/*
- * Registers: WCSS_HM_A_PMM_PMM
- * Base Address: 0x18880000
- */
-#define WCSS_HM_A_PMM_ROOT_CLK_ENABLE				0x80010
-#define PMM_TCXO_CLK_ENABLE					BIT(13)
-
-#define PMM_COMMON_IDLEREQ_CSR_OFFSET				0x80120
-#define PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET		BIT(16)
-#define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK			BIT(26)
-#define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLE			BIT(27)
-
-#define PMM_RFACTRL_IDLEREQ_CSR_OFFSET				0x80164
-#define PMM_RFACTRL_IDLEREQ_CSR_SW_RFACTRL_IDLEREQ_SET		BIT(16)
-#define PMM_RFACTRL_IDLEREQ_CSR_RFACTRL_IDLETACK		BIT(26)
-
-#define PMM_WSI_CMD_OFFSET					0x800E0
-#define PMM_WSI_CMD_USE_WLAN1_WSI				BIT(0)
-#define PMM_WSI_CMD_SW_USE_PMM_WSI				BIT(2)
-#define PMM_WSI_CMD_SW_BUS_SYNC					BIT(3)
-#define PMM_WSI_CMD_SW_RF_RESET					BIT(4)
-#define PMM_WSI_CMD_SW_REG_READ					BIT(5)
-#define PMM_WSI_CMD_SW_XO_DIS					BIT(8)
-#define PMM_WSI_CMD_SW_FORCE_IDLE				BIT(9)
-#define PMM_WSI_CMD_PMM_WSI_SM					GENMASK(24, 16)
-#define PMM_WSI_CMD_RF_CMD_IP					BIT(31)
-
-#define PMM_REG_RW_ADDR_OFFSET					0x800F0
-#define PMM_REG_RW_ADDR_SW_REG_RW_ADDR				GENMASK(15, 0)
-
-#define PMM_REG_READ_DATA_OFFSET				0x800F8
-
-#define PMM_RF_VAULT_REG_ADDR_OFFSET				0x800FC
-#define PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR			GENMASK(15, 0)
-
-#define PMM_RF_VAULT_REG_DATA_OFFSET				0x80100
-#define PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA			GENMASK(31, 0)
-
-#define PMM_XO_DIS_ADDR_OFFSET					0x800E8
-#define PMM_XO_DIS_ADDR_XO_DIS_ADDR				GENMASK(15, 0)
-
-#define PMM_XO_DIS_DATA_OFFSET					0x800EC
-#define PMM_XO_DIS_DATA_XO_DIS_DATA				GENMASK(31, 0)
-
-#define PMM_RF_RESET_ADDR_OFFSET				0x80104
-#define PMM_RF_RESET_ADDR_RF_RESET_ADDR				GENMASK(15, 0)
-
-#define PMM_RF_RESET_DATA_OFFSET				0x80108
-#define PMM_RF_RESET_DATA_RF_RESET_DATA				GENMASK(31, 0)
-
-#define ICNSS_HW_REG_RETRY					10
-
-#define WCSS_HM_A_PMM_HW_VERSION_V10				0x40000000
-#define WCSS_HM_A_PMM_HW_VERSION_V20				0x40010000
-#define WCSS_HM_A_PMM_HW_VERSION_Q10				0x40010001
-
 #define ICNSS_SERVICE_LOCATION_CLIENT_NAME			"ICNSS-WLAN"
 #define ICNSS_WLAN_SERVICE_NAME					"wlan/fw"
 
@@ -262,13 +143,18 @@
 	SSR_ONLY,
 	PDR_ONLY,
 	VBATT_DISABLE,
+	FW_REJUVENATE_ENABLE,
 };
 
-#define ICNSS_QUIRKS_DEFAULT		BIT(VBATT_DISABLE)
+#define ICNSS_QUIRKS_DEFAULT		(BIT(VBATT_DISABLE) | \
+					 BIT(FW_REJUVENATE_ENABLE))
 
 unsigned long quirks = ICNSS_QUIRKS_DEFAULT;
 module_param(quirks, ulong, 0600);
 
+uint64_t dynamic_feature_mask = QMI_WLFW_FW_REJUVENATE_V01;
+module_param(dynamic_feature_mask, ullong, 0600);
+
 void *icnss_ipc_log_context;
 
 #ifdef CONFIG_ICNSS_DEBUG
@@ -294,6 +180,7 @@
 
 struct icnss_event_pd_service_down_data {
 	bool crashed;
+	bool fw_rejuvenate;
 };
 
 struct icnss_driver_event {
@@ -325,38 +212,6 @@
 	irqreturn_t (*handler)(int, void *);
 };
 
-struct icnss_vreg_info {
-	struct regulator *reg;
-	const char *name;
-	u32 min_v;
-	u32 max_v;
-	u32 load_ua;
-	unsigned long settle_delay;
-	bool required;
-};
-
-struct icnss_clk_info {
-	struct clk *handle;
-	const char *name;
-	u32 freq;
-	bool required;
-};
-
-static struct icnss_vreg_info icnss_vreg_info[] = {
-	{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, true},
-	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
-	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
-	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
-};
-
-#define ICNSS_VREG_INFO_SIZE		ARRAY_SIZE(icnss_vreg_info)
-
-static struct icnss_clk_info icnss_clk_info[] = {
-	{NULL, "cxo_ref_clk_pin", 0, false},
-};
-
-#define ICNSS_CLK_INFO_SIZE		ARRAY_SIZE(icnss_clk_info)
-
 struct icnss_stats {
 	struct {
 		uint32_t posted;
@@ -407,6 +262,21 @@
 	uint32_t vbatt_req;
 	uint32_t vbatt_resp;
 	uint32_t vbatt_req_err;
+	uint32_t rejuvenate_ack_req;
+	uint32_t rejuvenate_ack_resp;
+	uint32_t rejuvenate_ack_err;
+};
+
+#define MAX_NO_OF_MAC_ADDR 4
+struct icnss_wlan_mac_addr {
+	u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+	uint32_t no_of_mac_addr_set;
+};
+
+struct service_notifier_context {
+	void *handle;
+	uint32_t instance_id;
+	char name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
 };
 
 static struct icnss_priv {
@@ -414,13 +284,9 @@
 	struct platform_device *pdev;
 	struct icnss_driver_ops *ops;
 	struct ce_irq_list ce_irq_list[ICNSS_MAX_IRQ_REGISTRATIONS];
-	struct icnss_vreg_info vreg_info[ICNSS_VREG_INFO_SIZE];
-	struct icnss_clk_info clk_info[ICNSS_CLK_INFO_SIZE];
 	u32 ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
 	phys_addr_t mem_base_pa;
 	void __iomem *mem_base_va;
-	phys_addr_t mpm_config_pa;
-	void __iomem *mpm_config_va;
 	struct dma_iommu_mapping *smmu_mapping;
 	dma_addr_t smmu_iova_start;
 	size_t smmu_iova_len;
@@ -450,7 +316,7 @@
 	spinlock_t on_off_lock;
 	struct icnss_stats stats;
 	struct work_struct service_notifier_work;
-	void **service_notifier;
+	struct service_notifier_context *service_notifier;
 	struct notifier_block service_notifier_nb;
 	int total_domains;
 	struct notifier_block get_service_nb;
@@ -466,73 +332,10 @@
 	uint64_t vph_pwr;
 	atomic_t pm_count;
 	struct ramdump_device *msa0_dump_dev;
+	bool is_wlan_mac_set;
+	struct icnss_wlan_mac_addr wlan_mac_addr;
 } *penv;
 
-static void icnss_hw_write_reg(void *base, u32 offset, u32 val)
-{
-	writel_relaxed(val, base + offset);
-	wmb(); /* Ensure data is written to hardware register */
-}
-
-static u32 icnss_hw_read_reg(void *base, u32 offset)
-{
-	u32 rdata = readl_relaxed(base + offset);
-
-	icnss_reg_dbg(" READ: offset: 0x%06x 0x%08x\n", offset, rdata);
-
-	return rdata;
-}
-
-static void icnss_hw_write_reg_field(void *base, u32 offset, u32 mask, u32 val)
-{
-	u32 shift = find_first_bit((void *)&mask, 32);
-	u32 rdata = readl_relaxed(base + offset);
-
-	val = (rdata & ~mask) | (val << shift);
-
-	icnss_reg_dbg("WRITE: offset: 0x%06x 0x%08x -> 0x%08x\n",
-		     offset, rdata, val);
-
-	icnss_hw_write_reg(base, offset, val);
-}
-
-static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val,
-				    unsigned long usecs, int retry)
-{
-	u32 shift;
-	u32 rdata;
-	int r = 0;
-
-	shift = find_first_bit((void *)&mask, 32);
-
-	val = val << shift;
-
-	rdata  = readl_relaxed(base + offset);
-
-	icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
-		     offset, val, rdata, mask);
-
-	while ((rdata & mask) != val) {
-		if (retry != 0 && r >= retry) {
-			icnss_pr_err("POLL FAILED: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
-				     offset, val, rdata, mask);
-
-			return -EIO;
-		}
-
-		r++;
-		udelay(usecs);
-		rdata = readl_relaxed(base + offset);
-
-		if (retry)
-			icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
-					 offset, val, rdata, mask);
-
-	}
-
-	return 0;
-}
-
 static void icnss_pm_stay_awake(struct icnss_priv *priv)
 {
 	if (atomic_inc_return(&priv->pm_count) != 1)
@@ -877,683 +680,6 @@
 	return ret;
 }
 
-static int icnss_vreg_on(struct icnss_priv *priv)
-{
-	int ret = 0;
-	struct icnss_vreg_info *vreg_info;
-	int i;
-
-	for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
-		vreg_info = &priv->vreg_info[i];
-
-		if (!vreg_info->reg)
-			continue;
-
-		icnss_pr_dbg("Regulator %s being enabled\n", vreg_info->name);
-
-		ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
-					    vreg_info->max_v);
-
-		if (ret) {
-			icnss_pr_err("Regulator %s, can't set voltage: min_v: %u, max_v: %u, ret: %d\n",
-				     vreg_info->name, vreg_info->min_v,
-				     vreg_info->max_v, ret);
-			break;
-		}
-
-		if (vreg_info->load_ua) {
-			ret = regulator_set_load(vreg_info->reg,
-						 vreg_info->load_ua);
-
-			if (ret < 0) {
-				icnss_pr_err("Regulator %s, can't set load: %u, ret: %d\n",
-					     vreg_info->name,
-					     vreg_info->load_ua, ret);
-				break;
-			}
-		}
-
-		ret = regulator_enable(vreg_info->reg);
-		if (ret) {
-			icnss_pr_err("Regulator %s, can't enable: %d\n",
-				     vreg_info->name, ret);
-			break;
-		}
-
-		if (vreg_info->settle_delay)
-			udelay(vreg_info->settle_delay);
-	}
-
-	if (!ret)
-		return 0;
-
-	for (; i >= 0; i--) {
-		vreg_info = &priv->vreg_info[i];
-
-		if (!vreg_info->reg)
-			continue;
-
-		regulator_disable(vreg_info->reg);
-
-		regulator_set_load(vreg_info->reg, 0);
-
-		regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
-	}
-
-	return ret;
-}
-
-static int icnss_vreg_off(struct icnss_priv *priv)
-{
-	int ret = 0;
-	struct icnss_vreg_info *vreg_info;
-	int i;
-
-	for (i = ICNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
-		vreg_info = &priv->vreg_info[i];
-
-		if (!vreg_info->reg)
-			continue;
-
-		icnss_pr_dbg("Regulator %s being disabled\n", vreg_info->name);
-
-		ret = regulator_disable(vreg_info->reg);
-		if (ret)
-			icnss_pr_err("Regulator %s, can't disable: %d\n",
-				     vreg_info->name, ret);
-
-		ret = regulator_set_load(vreg_info->reg, 0);
-		if (ret < 0)
-			icnss_pr_err("Regulator %s, can't set load: %d\n",
-				     vreg_info->name, ret);
-
-		ret = regulator_set_voltage(vreg_info->reg, 0,
-					    vreg_info->max_v);
-
-		if (ret)
-			icnss_pr_err("Regulator %s, can't set voltage: %d\n",
-				     vreg_info->name, ret);
-	}
-
-	return ret;
-}
-
-static int icnss_clk_init(struct icnss_priv *priv)
-{
-	struct icnss_clk_info *clk_info;
-	int i;
-	int ret = 0;
-
-	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
-		clk_info = &priv->clk_info[i];
-
-		if (!clk_info->handle)
-			continue;
-
-		icnss_pr_dbg("Clock %s being enabled\n", clk_info->name);
-
-		if (clk_info->freq) {
-			ret = clk_set_rate(clk_info->handle, clk_info->freq);
-
-			if (ret) {
-				icnss_pr_err("Clock %s, can't set frequency: %u, ret: %d\n",
-					     clk_info->name, clk_info->freq,
-					     ret);
-				break;
-			}
-		}
-
-		ret = clk_prepare_enable(clk_info->handle);
-
-		if (ret) {
-			icnss_pr_err("Clock %s, can't enable: %d\n",
-				     clk_info->name, ret);
-			break;
-		}
-	}
-
-	if (ret == 0)
-		return 0;
-
-	for (; i >= 0; i--) {
-		clk_info = &priv->clk_info[i];
-
-		if (!clk_info->handle)
-			continue;
-
-		clk_disable_unprepare(clk_info->handle);
-	}
-
-	return ret;
-}
-
-static int icnss_clk_deinit(struct icnss_priv *priv)
-{
-	struct icnss_clk_info *clk_info;
-	int i;
-
-	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
-		clk_info = &priv->clk_info[i];
-
-		if (!clk_info->handle)
-			continue;
-
-		icnss_pr_dbg("Clock %s being disabled\n", clk_info->name);
-
-		clk_disable_unprepare(clk_info->handle);
-	}
-
-	return 0;
-}
-
-static void icnss_hw_top_level_release_reset(struct icnss_priv *priv)
-{
-	icnss_pr_dbg("RESET: HW Release reset: state: 0x%lx\n", priv->state);
-
-	icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_ARES_N, 1);
-
-	icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_WLAN_DISABLE, 0x0);
-
-	icnss_hw_poll_reg_field(priv->mpm_config_va,
-				MPM_WCSSAON_CONFIG_OFFSET,
-				MPM_WCSSAON_CONFIG_ARES_N, 1, 10,
-				ICNSS_HW_REG_RETRY);
-}
-
-static void icnss_hw_top_level_reset(struct icnss_priv *priv)
-{
-	icnss_pr_dbg("RESET: HW top level reset: state: 0x%lx\n", priv->state);
-
-	icnss_hw_write_reg_field(priv->mpm_config_va,
-				 MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_ARES_N, 0);
-
-	icnss_hw_poll_reg_field(priv->mpm_config_va,
-				MPM_WCSSAON_CONFIG_OFFSET,
-				MPM_WCSSAON_CONFIG_ARES_N, 0, 10,
-				ICNSS_HW_REG_RETRY);
-}
-
-static void icnss_hw_io_reset(struct icnss_priv *priv, bool on)
-{
-	u32 hw_version = priv->soc_info.soc_id;
-
-	if (on && !test_bit(ICNSS_FW_READY, &priv->state))
-		return;
-
-	icnss_pr_dbg("HW io reset: %s, SoC: 0x%x, state: 0x%lx\n",
-		     on ? "ON" : "OFF", priv->soc_info.soc_id, priv->state);
-
-	if (hw_version == WCSS_HM_A_PMM_HW_VERSION_V10 ||
-	    hw_version == WCSS_HM_A_PMM_HW_VERSION_V20) {
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-				MPM_WCSSAON_CONFIG_OFFSET,
-				MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL, 0);
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-				MPM_WCSSAON_CONFIG_OFFSET,
-				MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD, on);
-	} else if (hw_version == WCSS_HM_A_PMM_HW_VERSION_Q10) {
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-				MPM_WCSSAON_CONFIG_OFFSET,
-				MPM_WCSSAON_CONFIG_M2W_CLAMP_EN,
-				on);
-	}
-}
-
-static int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv)
-{
-	u32 rdata;
-
-	icnss_pr_dbg("RESET: WLAN SS power down, state: 0x%lx\n", priv->state);
-
-	rdata = icnss_hw_read_reg(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET);
-
-	if ((rdata & WCSS_WLAN1_GDSCR_PWR_ON) == 0)
-		return 0;
-
-	icnss_hw_write_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
-				 WCSS_WLAN1_GDSCR_HW_CONTROL, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
-				 WCSS_WLAN1_GDSCR_SW_COLLAPSE, 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va, WCSS_WLAN1_GDSCR_OFFSET,
-				WCSS_WLAN1_GDSCR_PWR_ON, 0, 10,
-				ICNSS_HW_REG_RETRY);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCMN_PMM_WLAN1_CFG_REG1_OFFSET,
-				 WCMN_PMM_WLAN1_CFG_REG1_ADC_DIGITAL_CLAMP, 1);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCMN_PMM_WLAN1_CFG_REG1_OFFSET,
-				 WCMN_PMM_WLAN1_CFG_REG1_RFIF_ADC_PORDN_N, 0);
-
-	return 0;
-}
-
-static int icnss_hw_reset_common_ss_power_down(struct icnss_priv *priv)
-{
-	u32 rdata;
-
-	icnss_pr_dbg("RESET: Common SS power down, state: 0x%lx\n",
-		     priv->state);
-
-	rdata = icnss_hw_read_reg(priv->mem_base_va,
-				  WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET);
-
-	if ((rdata & WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON) == 0)
-		return 0;
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 PMM_COMMON_IDLEREQ_CSR_OFFSET,
-				 PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET,
-				 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va,
-				PMM_COMMON_IDLEREQ_CSR_OFFSET,
-				PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK,
-				1, 20, ICNSS_HW_REG_RETRY);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va,
-				PMM_COMMON_IDLEREQ_CSR_OFFSET,
-				PMM_COMMON_IDLEREQ_CSR_WNOC_IDLE,
-				1, 10, ICNSS_HW_REG_RETRY);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
-				 WCSS_CLK_CTL_WCSS_CSS_GDSCR_HW_CONTROL, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
-				 WCSS_CLK_CTL_WCSS_CSS_GDSCR_SW_COLLAPSE, 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va,
-				WCSS_CLK_CTL_WCSS_CSS_GDSCR_OFFSET,
-				WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON, 0, 10,
-				ICNSS_HW_REG_RETRY);
-
-	return 0;
-
-}
-
-static int icnss_hw_reset_wlan_rfactrl_power_down(struct icnss_priv *priv)
-{
-	u32 rdata;
-
-	icnss_pr_dbg("RESET: RFACTRL power down, state: 0x%lx\n", priv->state);
-
-	rdata = icnss_hw_read_reg(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET);
-
-	if ((rdata & WCSS_RFACTRL_GDSCR_PWR_ON) == 0)
-		return 0;
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 PMM_RFACTRL_IDLEREQ_CSR_OFFSET,
-				 PMM_RFACTRL_IDLEREQ_CSR_SW_RFACTRL_IDLEREQ_SET,
-				 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va,
-				PMM_RFACTRL_IDLEREQ_CSR_OFFSET,
-				PMM_RFACTRL_IDLEREQ_CSR_RFACTRL_IDLETACK,
-				1, 10, ICNSS_HW_REG_RETRY);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET,
-				 WCSS_RFACTRL_GDSCR_HW_CONTROL, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, WCSS_RFACTRL_GDSCR_OFFSET,
-				 WCSS_RFACTRL_GDSCR_SW_COLLAPSE, 1);
-
-	return 0;
-}
-
-static void icnss_hw_wsi_cmd_error_recovery(struct icnss_priv *priv)
-{
-	icnss_pr_dbg("RESET: WSI CMD Error recovery, state: 0x%lx\n",
-		     priv->state);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_FORCE_IDLE, 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				PMM_WSI_CMD_PMM_WSI_SM, 1, 100, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_FORCE_IDLE, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_BUS_SYNC, 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				PMM_WSI_CMD_RF_CMD_IP, 0, 100, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_BUS_SYNC, 0);
-}
-
-static u32 icnss_hw_rf_register_read_command(struct icnss_priv *priv, u32 addr)
-{
-	u32 rdata = 0;
-	int ret;
-	int i;
-
-	icnss_pr_dbg("RF register read command, addr: 0x%04x, state: 0x%lx\n",
-		     addr, priv->state);
-
-	for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
-		icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-					 PMM_WSI_CMD_USE_WLAN1_WSI, 1);
-
-		icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-					 PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
-
-		icnss_hw_write_reg_field(priv->mem_base_va,
-					 PMM_REG_RW_ADDR_OFFSET,
-					 PMM_REG_RW_ADDR_SW_REG_RW_ADDR,
-					 addr & 0xFFFF);
-
-		icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-					 PMM_WSI_CMD_SW_REG_READ, 1);
-
-		ret = icnss_hw_poll_reg_field(priv->mem_base_va,
-					      PMM_WSI_CMD_OFFSET,
-					      PMM_WSI_CMD_RF_CMD_IP, 0, 10,
-					      ICNSS_HW_REG_RETRY);
-		if (ret == 0)
-			break;
-
-		icnss_hw_wsi_cmd_error_recovery(priv);
-	}
-
-
-	rdata = icnss_hw_read_reg(priv->mem_base_va, PMM_REG_READ_DATA_OFFSET);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_REG_READ, 0);
-
-	icnss_pr_dbg("RF register read command, data: 0x%08x, state: 0x%lx\n",
-		     rdata, priv->state);
-
-	return rdata;
-}
-
-static int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
-{
-	u32 rdata;
-	int ret;
-
-	icnss_pr_dbg("RESET: RF reset command, state: 0x%lx\n", priv->state);
-
-	rdata = icnss_hw_rf_register_read_command(priv, 0x5080);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_USE_WLAN1_WSI, 1);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 PMM_RF_VAULT_REG_ADDR_OFFSET,
-				 PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR,
-				 0x5082);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 PMM_RF_VAULT_REG_DATA_OFFSET,
-				 PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA,
-				 0x12AB8FAD);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_RF_RESET_ADDR_OFFSET,
-				 PMM_RF_RESET_ADDR_RF_RESET_ADDR, 0x5080);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_RF_RESET_DATA_OFFSET,
-				 PMM_RF_RESET_DATA_RF_RESET_DATA,
-				 rdata & 0xBFFF);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_RF_RESET, 1);
-
-	ret = icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				      PMM_WSI_CMD_RF_CMD_IP, 0, 10,
-				      ICNSS_HW_REG_RETRY);
-
-	if (ret) {
-		icnss_pr_err("RESET: RF reset command failed, state: 0x%lx\n",
-			     priv->state);
-		return ret;
-	}
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_RF_RESET, 0);
-
-	return 0;
-}
-
-static int icnss_hw_reset_switch_to_cxo(struct icnss_priv *priv)
-{
-	u32 rdata;
-
-	icnss_pr_dbg("RESET: Switch to CXO, state: 0x%lx\n", priv->state);
-
-	rdata = icnss_hw_read_reg(priv->mem_base_va,
-				  WCSS_HM_A_PMM_ROOT_CLK_ENABLE);
-
-	icnss_pr_dbg("RESET: PMM_TCXO_CLK_ENABLE : 0x%05lx\n",
-		     rdata & PMM_TCXO_CLK_ENABLE);
-
-	if ((rdata & PMM_TCXO_CLK_ENABLE) == 0) {
-		icnss_pr_dbg("RESET: Set PMM_TCXO_CLK_ENABLE to 1\n");
-
-		icnss_hw_write_reg_field(priv->mem_base_va,
-					 WCSS_HM_A_PMM_ROOT_CLK_ENABLE,
-					 PMM_TCXO_CLK_ENABLE, 1);
-		icnss_hw_poll_reg_field(priv->mem_base_va,
-					WCSS_HM_A_PMM_ROOT_CLK_ENABLE,
-					PMM_TCXO_CLK_ENABLE, 1, 10,
-					ICNSS_HW_REG_RETRY);
-	}
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET,
-				 WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCSS_CLK_CTL_NOC_CMD_RCGR_OFFSET,
-				 WCSS_CLK_CTL_NOC_CMD_RCGR_UPDATE, 1);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCSS_CLK_CTL_REF_CFG_RCGR_OFFSET,
-				 WCSS_CLK_CTL_REF_CFG_RCGR_SRC_SEL, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 WCSS_CLK_CTL_REF_CMD_RCGR_OFFSET,
-				 WCSS_CLK_CTL_REF_CMD_RCGR_UPDATE, 1);
-
-	return 0;
-}
-
-static int icnss_hw_reset_xo_disable_cmd(struct icnss_priv *priv)
-{
-	int ret;
-
-	icnss_pr_dbg("RESET: XO disable command, state: 0x%lx\n", priv->state);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_USE_WLAN1_WSI, 1);
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_USE_PMM_WSI, 1);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 PMM_RF_VAULT_REG_ADDR_OFFSET,
-				 PMM_RF_VAULT_REG_ADDR_RF_VAULT_REG_ADDR,
-				 0x5082);
-
-	icnss_hw_write_reg_field(priv->mem_base_va,
-				 PMM_RF_VAULT_REG_DATA_OFFSET,
-				 PMM_RF_VAULT_REG_DATA_RF_VAULT_REG_DATA,
-				 0x12AB8FAD);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_XO_DIS_ADDR_OFFSET,
-				 PMM_XO_DIS_ADDR_XO_DIS_ADDR, 0x5081);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_XO_DIS_DATA_OFFSET,
-				 PMM_XO_DIS_DATA_XO_DIS_DATA, 1);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_XO_DIS, 1);
-
-	ret = icnss_hw_poll_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				      PMM_WSI_CMD_RF_CMD_IP, 0, 10,
-				      ICNSS_HW_REG_RETRY);
-	if (ret) {
-		icnss_pr_err("RESET: XO disable command failed, state: 0x%lx\n",
-			     priv->state);
-		return ret;
-	}
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_USE_PMM_WSI, 0);
-
-	icnss_hw_write_reg_field(priv->mem_base_va, PMM_WSI_CMD_OFFSET,
-				 PMM_WSI_CMD_SW_XO_DIS, 0);
-
-	return 0;
-}
-
-static int icnss_hw_reset(struct icnss_priv *priv)
-{
-	u32 rdata;
-	u32 rdata1;
-	int i;
-	int ret = 0;
-
-	if (test_bit(HW_ONLY_TOP_LEVEL_RESET, &quirks))
-		goto top_level_reset;
-
-	icnss_pr_dbg("RESET: START, state: 0x%lx\n", priv->state);
-
-	icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
-				SR_WCSSAON_SR_LSB_RETENTION_STATUS, 1, 200,
-				ICNSS_HW_REG_RETRY);
-
-	for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
-		rdata = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
-		udelay(10);
-		rdata1 = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
-
-		icnss_pr_dbg("RESET: XO: 0x%05lx/0x%05lx, AHB: 0x%05lx/0x%05lx\n",
-			     rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK,
-			     rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK,
-			     rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK,
-			     rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK);
-
-		if ((rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK) !=
-		    (rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK) &&
-		    (rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK) !=
-		    (rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK))
-			break;
-
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-					 MPM_WCSSAON_CONFIG_OFFSET,
-					 MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE,
-					 0x1);
-		usleep_range(2000, 3000);
-	}
-
-	if (i >= ICNSS_HW_REG_RETRY)
-		goto top_level_reset;
-
-	icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0x1);
-
-	usleep_range(200, 300);
-
-	icnss_hw_reset_wlan_ss_power_down(priv);
-
-	icnss_hw_reset_common_ss_power_down(priv);
-
-	icnss_hw_reset_wlan_rfactrl_power_down(priv);
-
-	ret = icnss_hw_reset_rf_reset_cmd(priv);
-	if (ret) {
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-					 MPM_WCSSAON_CONFIG_OFFSET,
-					 MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-					 MPM_WCSSAON_CONFIG_OFFSET,
-					 MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-					 MPM_WCSSAON_CONFIG_OFFSET,
-					 MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
-		goto top_level_reset;
-	}
-
-	icnss_hw_reset_switch_to_cxo(priv);
-
-	for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
-		rdata = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
-		usleep_range(5, 10);
-		rdata1 = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB);
-
-		icnss_pr_dbg("RESET: SR_PMM_SR_MSB: 0x%08x/0x%08x, XO: 0x%05lx/0x%05lx, AHB: 0x%05lx/0x%05lx\n",
-			     rdata, rdata1,
-			     rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK,
-			     rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK,
-			     rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK,
-			     rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK);
-
-		if ((rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK) !=
-		    (rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK) &&
-		    (rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK) !=
-		    (rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK))
-			break;
-		usleep_range(5, 10);
-	}
-
-	ret = icnss_hw_reset_xo_disable_cmd(priv);
-	if (ret) {
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-					 MPM_WCSSAON_CONFIG_OFFSET,
-					 MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-					 MPM_WCSSAON_CONFIG_OFFSET,
-					 MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
-		icnss_hw_write_reg_field(priv->mpm_config_va,
-					 MPM_WCSSAON_CONFIG_OFFSET,
-					 MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
-		goto top_level_reset;
-	}
-
-	icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
-
-	icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
-
-	icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
-				 MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
-
-	icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
-				BIT(26), 1, 200, ICNSS_HW_REG_RETRY);
-
-top_level_reset:
-	icnss_hw_top_level_reset(priv);
-
-	icnss_pr_dbg("RESET: DONE, state: 0x%lx\n", priv->state);
-
-	return 0;
-}
-
 static int icnss_hw_power_on(struct icnss_priv *priv)
 {
 	int ret = 0;
@@ -1569,21 +695,6 @@
 	set_bit(ICNSS_POWER_ON, &priv->state);
 	spin_unlock_irqrestore(&priv->on_off_lock, flags);
 
-	ret = icnss_vreg_on(priv);
-	if (ret)
-		goto out;
-
-	ret = icnss_clk_init(priv);
-	if (ret)
-		goto out;
-
-	icnss_hw_top_level_release_reset(priv);
-
-	icnss_hw_io_reset(penv, 1);
-
-	return ret;
-out:
-	clear_bit(ICNSS_POWER_ON, &priv->state);
 	return ret;
 }
 
@@ -1605,19 +716,6 @@
 	clear_bit(ICNSS_POWER_ON, &priv->state);
 	spin_unlock_irqrestore(&priv->on_off_lock, flags);
 
-	icnss_hw_io_reset(penv, 0);
-
-	icnss_hw_reset(priv);
-
-	icnss_clk_deinit(priv);
-
-	ret = icnss_vreg_off(priv);
-	if (ret)
-		goto out;
-
-	return ret;
-out:
-	set_bit(ICNSS_POWER_ON, &priv->state);
 	return ret;
 }
 
@@ -1637,6 +735,15 @@
 }
 EXPORT_SYMBOL(icnss_power_on);
 
+bool icnss_is_fw_ready(void)
+{
+	if (!penv)
+		return false;
+	else
+		return test_bit(ICNSS_FW_READY, &penv->state);
+}
+EXPORT_SYMBOL(icnss_is_fw_ready);
+
 int icnss_power_off(struct device *dev)
 {
 	struct icnss_priv *priv = dev_get_drvdata(dev);
@@ -1698,7 +805,7 @@
 	u32 size;
 	u32 dest_vmids[1] = {VMID_HLOS};
 	int source_vmlist[3] = {VMID_MSS_MSA, VMID_WLAN, 0};
-	int dest_perms[1] = {PERM_READ|PERM_WRITE};
+	int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
 	int source_nelems = 0;
 	int dest_nelems = sizeof(dest_vmids)/sizeof(u32);
 
@@ -1909,6 +1016,10 @@
 	req.msa_ready_enable = 1;
 	req.pin_connect_result_enable_valid = 1;
 	req.pin_connect_result_enable = 1;
+	if (test_bit(FW_REJUVENATE_ENABLE, &quirks)) {
+		req.rejuvenate_enable_valid = 1;
+		req.rejuvenate_enable = 1;
+	}
 
 	req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
 	req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
@@ -2124,7 +1235,7 @@
 	return ret;
 }
 
-static int wlfw_ini_send_sync_msg(bool enable_fw_log)
+static int wlfw_ini_send_sync_msg(uint8_t fw_log_mode)
 {
 	int ret;
 	struct wlfw_ini_req_msg_v01 req;
@@ -2134,14 +1245,14 @@
 	if (!penv || !penv->wlfw_clnt)
 		return -ENODEV;
 
-	icnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log: %d\n",
-		     penv->state, enable_fw_log);
+	icnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
+		     penv->state, fw_log_mode);
 
 	memset(&req, 0, sizeof(req));
 	memset(&resp, 0, sizeof(resp));
 
 	req.enablefwlog_valid = 1;
-	req.enablefwlog = enable_fw_log;
+	req.enablefwlog = fw_log_mode;
 
 	req_desc.max_msg_len = WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN;
 	req_desc.msg_id = QMI_WLFW_INI_REQ_V01;
@@ -2156,14 +1267,14 @@
 	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
 			&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
 	if (ret < 0) {
-		icnss_pr_err("Send INI req failed fw_log: %d, ret: %d\n",
-			     enable_fw_log, ret);
+		icnss_pr_err("Send INI req failed fw_log_mode: %d, ret: %d\n",
+			     fw_log_mode, ret);
 		goto out;
 	}
 
 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
-		icnss_pr_err("QMI INI request rejected, fw_log:%d result:%d error:%d\n",
-			     enable_fw_log, resp.resp.result, resp.resp.error);
+		icnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n",
+			     fw_log_mode, resp.resp.result, resp.resp.error);
 		ret = resp.resp.result;
 		goto out;
 	}
@@ -2298,6 +1409,114 @@
 	return ret;
 }
 
+static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv)
+{
+	int ret;
+	struct wlfw_rejuvenate_ack_req_msg_v01 req;
+	struct wlfw_rejuvenate_ack_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	icnss_pr_dbg("Sending rejuvenate ack request, state: 0x%lx\n",
+		     priv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req_desc.max_msg_len = WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_REJUVENATE_ACK_REQ_V01;
+	req_desc.ei_array = wlfw_rejuvenate_ack_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_REJUVENATE_ACK_RESP_V01;
+	resp_desc.ei_array = wlfw_rejuvenate_ack_resp_msg_v01_ei;
+
+	priv->stats.rejuvenate_ack_req++;
+	ret = qmi_send_req_wait(priv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send rejuvenate ack req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI rejuvenate ack request rejected, result:%d error %d\n",
+			     resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+	priv->stats.rejuvenate_ack_resp++;
+	return 0;
+
+out:
+	priv->stats.rejuvenate_ack_err++;
+	ICNSS_ASSERT(false);
+	return ret;
+}
+
+static int wlfw_dynamic_feature_mask_send_sync_msg(struct icnss_priv *priv,
+					   uint64_t dynamic_feature_mask)
+{
+	int ret;
+	struct wlfw_dynamic_feature_mask_req_msg_v01 req;
+	struct wlfw_dynamic_feature_mask_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!test_bit(ICNSS_WLFW_QMI_CONNECTED, &priv->state)) {
+		icnss_pr_err("Invalid state for dynamic feature: 0x%lx\n",
+			     priv->state);
+		return -EINVAL;
+	}
+
+	if (!test_bit(FW_REJUVENATE_ENABLE, &quirks)) {
+		icnss_pr_dbg("FW rejuvenate is disabled from quirks\n");
+		return 0;
+	}
+
+	icnss_pr_dbg("Sending dynamic feature mask request, val 0x%llx, state: 0x%lx\n",
+		     dynamic_feature_mask, priv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.mask_valid = 1;
+	req.mask = dynamic_feature_mask;
+
+	req_desc.max_msg_len =
+		WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01;
+	req_desc.ei_array = wlfw_dynamic_feature_mask_req_msg_v01_ei;
+
+	resp_desc.max_msg_len =
+		WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01;
+	resp_desc.ei_array = wlfw_dynamic_feature_mask_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(priv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send dynamic feature mask req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI dynamic feature mask request rejected, result:%d error %d\n",
+			     resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	icnss_pr_dbg("prev_mask_valid %u, prev_mask 0x%llx, curr_maks_valid %u, curr_mask 0x%llx\n",
+		     resp.prev_mask_valid, resp.prev_mask,
+		     resp.curr_mask_valid, resp.curr_mask);
+
+	return 0;
+
+out:
+	return ret;
+}
+
 static void icnss_qmi_wlfw_clnt_notify_work(struct work_struct *work)
 {
 	int ret;
@@ -2338,6 +1557,8 @@
 			  unsigned int msg_id, void *msg,
 			  unsigned int msg_len, void *ind_cb_priv)
 {
+	struct icnss_event_pd_service_down_data *event_data;
+
 	if (!penv)
 		return;
 
@@ -2358,6 +1579,17 @@
 			     msg_id);
 		icnss_qmi_pin_connect_result_ind(msg, msg_len);
 		break;
+	case QMI_WLFW_REJUVENATE_IND_V01:
+		icnss_pr_dbg("Received Rejuvenate Indication msg_id 0x%x, state: 0x%lx\n",
+			     msg_id, penv->state);
+		event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+		if (event_data == NULL)
+			return;
+		event_data->crashed = true;
+		event_data->fw_rejuvenate = true;
+		icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+					0, event_data);
+		break;
 	default:
 		icnss_pr_err("Invalid msg_id 0x%x\n", msg_id);
 		break;
@@ -2430,6 +1662,9 @@
 	if (ret < 0)
 		goto err_setup_msa;
 
+	wlfw_dynamic_feature_mask_send_sync_msg(penv,
+						dynamic_feature_mask);
+
 	icnss_init_vph_monitor(penv);
 
 	return ret;
@@ -2489,19 +1724,21 @@
 
 out:
 	icnss_hw_power_off(priv);
-	penv->ops = NULL;
 	return ret;
 }
 
-static int icnss_call_driver_reinit(struct icnss_priv *priv)
+static int icnss_pd_restart_complete(struct icnss_priv *priv)
 {
-	int ret = 0;
+	int ret;
+
+	clear_bit(ICNSS_PD_RESTART, &priv->state);
+	icnss_pm_relax(priv);
 
 	if (!priv->ops || !priv->ops->reinit)
 		goto out;
 
 	if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
-		goto out;
+		goto call_probe;
 
 	icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
 
@@ -2516,18 +1753,14 @@
 	}
 
 out:
-	clear_bit(ICNSS_PD_RESTART, &priv->state);
-
-	icnss_pm_relax(priv);
-
 	return 0;
 
+call_probe:
+	return icnss_call_driver_probe(priv);
+
 out_power_off:
 	icnss_hw_power_off(priv);
 
-	clear_bit(ICNSS_PD_RESTART, &priv->state);
-
-	icnss_pm_relax(priv);
 	return ret;
 }
 
@@ -2552,7 +1785,7 @@
 	}
 
 	if (test_bit(ICNSS_PD_RESTART, &penv->state))
-		ret = icnss_call_driver_reinit(penv);
+		ret = icnss_pd_restart_complete(penv);
 	else
 		ret = icnss_call_driver_probe(penv);
 
@@ -2681,6 +1914,9 @@
 	else
 		icnss_call_driver_remove(priv);
 
+	if (event_data->fw_rejuvenate)
+		wlfw_rejuvenate_ack_send_sync_msg(priv);
+
 out:
 	ret = icnss_hw_power_off(priv);
 
@@ -2813,7 +2049,8 @@
 
 	icnss_pr_dbg("Modem-Notify: event %lu\n", code);
 
-	if (code == SUBSYS_AFTER_SHUTDOWN) {
+	if (code == SUBSYS_AFTER_SHUTDOWN &&
+		notif->crashed == CRASH_STATUS_ERR_FATAL) {
 		icnss_remove_msa_permissions(priv);
 		icnss_pr_info("Collecting msa0 segment dump\n");
 		icnss_msa0_ramdump(priv);
@@ -2828,7 +2065,7 @@
 
 	icnss_pr_info("Modem went down, state: %lx\n", priv->state);
 
-	event_data = kzalloc(sizeof(*data), GFP_KERNEL);
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
 
 	if (event_data == NULL)
 		return notifier_from_errno(-ENOMEM);
@@ -2880,8 +2117,9 @@
 		return 0;
 
 	for (i = 0; i < priv->total_domains; i++)
-		service_notif_unregister_notifier(priv->service_notifier[i],
-						  &priv->service_notifier_nb);
+		service_notif_unregister_notifier(
+				priv->service_notifier[i].handle,
+				&priv->service_notifier_nb);
 
 	kfree(priv->service_notifier);
 
@@ -2902,7 +2140,7 @@
 	case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
 		icnss_pr_info("Service down, data: 0x%p, state: 0x%lx\n", data,
 			      priv->state);
-		event_data = kzalloc(sizeof(*data), GFP_KERNEL);
+		event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
 
 		if (event_data == NULL)
 			return notifier_from_errno(-ENOMEM);
@@ -2934,7 +2172,7 @@
 	int curr_state;
 	int ret;
 	int i;
-	void **handle;
+	struct service_notifier_context *notifier;
 
 	icnss_pr_dbg("Get service notify opcode: %lu, state: 0x%lx\n", opcode,
 		     priv->state);
@@ -2948,9 +2186,10 @@
 		goto out;
 	}
 
-	handle = kcalloc(pd->total_domains, sizeof(void *), GFP_KERNEL);
-
-	if (!handle) {
+	notifier = kcalloc(pd->total_domains,
+				sizeof(struct service_notifier_context),
+				GFP_KERNEL);
+	if (!notifier) {
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -2962,21 +2201,24 @@
 			     pd->domain_list[i].name,
 			     pd->domain_list[i].instance_id);
 
-		handle[i] =
+		notifier[i].handle =
 			service_notif_register_notifier(pd->domain_list[i].name,
 				pd->domain_list[i].instance_id,
 				&priv->service_notifier_nb, &curr_state);
+		notifier[i].instance_id = pd->domain_list[i].instance_id;
+		strlcpy(notifier[i].name, pd->domain_list[i].name,
+			QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
 
-		if (IS_ERR(handle[i])) {
+		if (IS_ERR(notifier[i].handle)) {
 			icnss_pr_err("%d: Unable to register notifier for %s(0x%x)\n",
 				     i, pd->domain_list->name,
 				     pd->domain_list->instance_id);
-			ret = PTR_ERR(handle[i]);
+			ret = PTR_ERR(notifier[i].handle);
 			goto free_handle;
 		}
 	}
 
-	priv->service_notifier = handle;
+	priv->service_notifier = notifier;
 	priv->total_domains = pd->total_domains;
 
 	set_bit(ICNSS_PDR_ENABLED, &priv->state);
@@ -2987,11 +2229,11 @@
 
 free_handle:
 	for (i = 0; i < pd->total_domains; i++) {
-		if (handle[i])
-			service_notif_unregister_notifier(handle[i],
+		if (notifier[i].handle)
+			service_notif_unregister_notifier(notifier[i].handle,
 					&priv->service_notifier_nb);
 	}
-	kfree(handle);
+	kfree(notifier);
 
 out:
 	icnss_pr_err("PD restart not enabled: %d, state: 0x%lx\n", ret,
@@ -3274,21 +2516,19 @@
 }
 EXPORT_SYMBOL(icnss_get_soc_info);
 
-int icnss_set_fw_debug_mode(bool enable_fw_log)
+int icnss_set_fw_log_mode(uint8_t fw_log_mode)
 {
 	int ret;
 
-	icnss_pr_dbg("%s FW debug mode",
-		     enable_fw_log ? "Enalbing" : "Disabling");
+	icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
 
-	ret = wlfw_ini_send_sync_msg(enable_fw_log);
+	ret = wlfw_ini_send_sync_msg(fw_log_mode);
 	if (ret)
-		icnss_pr_err("Fail to send ini, ret = %d, fw_log: %d\n", ret,
-		       enable_fw_log);
-
+		icnss_pr_err("Fail to send ini, ret = %d, fw_log_mode: %u\n",
+			     ret, fw_log_mode);
 	return ret;
 }
-EXPORT_SYMBOL(icnss_set_fw_debug_mode);
+EXPORT_SYMBOL(icnss_set_fw_log_mode);
 
 int icnss_athdiag_read(struct device *dev, uint32_t offset,
 		       uint32_t mem_type, uint32_t data_len,
@@ -3548,6 +2788,114 @@
 }
 EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
 
+int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len)
+{
+	struct icnss_priv *priv = penv;
+	uint32_t no_of_mac_addr;
+	struct icnss_wlan_mac_addr *addr = NULL;
+	int iter;
+	u8 *temp = NULL;
+
+	if (!priv) {
+		icnss_pr_err("Priv data is NULL\n");
+		return -EINVAL;
+	}
+
+	if (priv->is_wlan_mac_set) {
+		icnss_pr_dbg("WLAN MAC address is already set\n");
+		return 0;
+	}
+
+	if (len == 0 || (len % ETH_ALEN) != 0) {
+		icnss_pr_err("Invalid length %d\n", len);
+		return -EINVAL;
+	}
+
+	no_of_mac_addr = len / ETH_ALEN;
+	if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
+		icnss_pr_err("Exceed maxinum supported MAC address %u %u\n",
+			     MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
+		return -EINVAL;
+	}
+
+	priv->is_wlan_mac_set = true;
+	addr = &priv->wlan_mac_addr;
+	addr->no_of_mac_addr_set = no_of_mac_addr;
+	temp = &addr->mac_addr[0][0];
+
+	for (iter = 0; iter < no_of_mac_addr;
+	     ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
+		ether_addr_copy(temp, in);
+		icnss_pr_dbg("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			     temp[0], temp[1], temp[2],
+			     temp[3], temp[4], temp[5]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_set_wlan_mac_address);
+
+u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	struct icnss_wlan_mac_addr *addr = NULL;
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		goto out;
+	}
+
+	if (!priv->is_wlan_mac_set) {
+		icnss_pr_dbg("WLAN MAC address is not set\n");
+		goto out;
+	}
+
+	addr = &priv->wlan_mac_addr;
+	*num = addr->no_of_mac_addr_set;
+	return &addr->mac_addr[0][0];
+out:
+	*num = 0;
+	return NULL;
+}
+EXPORT_SYMBOL(icnss_get_wlan_mac_address);
+
+int icnss_trigger_recovery(struct device *dev)
+{
+	int ret = 0;
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata: magic 0x%x\n", priv->magic);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+		icnss_pr_err("PD recovery already in progress: state: 0x%lx\n",
+			     priv->state);
+		ret = -EPERM;
+		goto out;
+	}
+
+	if (!priv->service_notifier[0].handle) {
+		icnss_pr_err("Invalid handle during recovery\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Initiate PDR, required only for the first instance
+	 */
+	ret = service_notif_pd_restart(priv->service_notifier[0].name,
+		priv->service_notifier[0].instance_id);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(icnss_trigger_recovery);
+
+
 static int icnss_smmu_init(struct icnss_priv *priv)
 {
 	struct dma_iommu_mapping *mapping;
@@ -3611,144 +2959,35 @@
 	priv->smmu_mapping = NULL;
 }
 
-static int icnss_get_vreg_info(struct device *dev,
-			       struct icnss_vreg_info *vreg_info)
-{
-	int ret = 0;
-	char prop_name[MAX_PROP_SIZE];
-	struct regulator *reg;
-	const __be32 *prop;
-	int len = 0;
-	int i;
-
-	reg = devm_regulator_get_optional(dev, vreg_info->name);
-
-	if (PTR_ERR(reg) == -EPROBE_DEFER) {
-		icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
-			     vreg_info->name);
-		ret = PTR_ERR(reg);
-		goto out;
-	}
-
-	if (IS_ERR(reg)) {
-		ret = PTR_ERR(reg);
-
-		if (vreg_info->required) {
-
-			icnss_pr_err("Regulator %s doesn't exist: %d\n",
-				     vreg_info->name, ret);
-			goto out;
-		} else {
-			icnss_pr_dbg("Optional regulator %s doesn't exist: %d\n",
-				     vreg_info->name, ret);
-			goto done;
-		}
-	}
-
-	vreg_info->reg = reg;
-
-	snprintf(prop_name, MAX_PROP_SIZE,
-		 "qcom,%s-config", vreg_info->name);
-
-	prop = of_get_property(dev->of_node, prop_name, &len);
-
-	icnss_pr_dbg("Got regulator config, prop: %s, len: %d\n",
-		     prop_name, len);
-
-	if (!prop || len < (2 * sizeof(__be32))) {
-		icnss_pr_dbg("Property %s %s\n", prop_name,
-			     prop ? "invalid format" : "doesn't exist");
-		goto done;
-	}
-
-	for (i = 0; (i * sizeof(__be32)) < len; i++) {
-		switch (i) {
-		case 0:
-			vreg_info->min_v = be32_to_cpup(&prop[0]);
-			break;
-		case 1:
-			vreg_info->max_v = be32_to_cpup(&prop[1]);
-			break;
-		case 2:
-			vreg_info->load_ua = be32_to_cpup(&prop[2]);
-			break;
-		case 3:
-			vreg_info->settle_delay = be32_to_cpup(&prop[3]);
-			break;
-		default:
-			icnss_pr_dbg("Property %s, ignoring value at %d\n",
-				     prop_name, i);
-			break;
-		}
-	}
-
-done:
-	icnss_pr_dbg("Regulator: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
-		     vreg_info->name, vreg_info->min_v, vreg_info->max_v,
-		     vreg_info->load_ua, vreg_info->settle_delay);
-
-	return 0;
-
-out:
-	return ret;
-}
-
-static int icnss_get_clk_info(struct device *dev,
-			      struct icnss_clk_info *clk_info)
-{
-	struct clk *handle;
-	int ret = 0;
-
-	handle = devm_clk_get(dev, clk_info->name);
-
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		if (clk_info->required) {
-			icnss_pr_err("Clock %s isn't available: %d\n",
-				     clk_info->name, ret);
-			goto out;
-		} else {
-			icnss_pr_dbg("Ignoring clock %s: %d\n", clk_info->name,
-				     ret);
-			ret = 0;
-			goto out;
-		}
-	}
-
-	icnss_pr_dbg("Clock: %s, freq: %u\n", clk_info->name, clk_info->freq);
-
-	clk_info->handle = handle;
-out:
-	return ret;
-}
-
-static int icnss_test_mode_show(struct seq_file *s, void *data)
+static int icnss_fw_debug_show(struct seq_file *s, void *data)
 {
 	struct icnss_priv *priv = s->private;
 
-	seq_puts(s, "0 : Test mode disable\n");
-	seq_puts(s, "1 : WLAN Firmware test\n");
-	seq_puts(s, "2 : CCPM test\n");
+	seq_puts(s, "\nUsage: echo <CMD> <VAL> > <DEBUGFS>/icnss/fw_debug\n");
 
-	seq_puts(s, "\n");
+	seq_puts(s, "\nCMD: test_mode\n");
+	seq_puts(s, "  VAL: 0 (Test mode disable)\n");
+	seq_puts(s, "  VAL: 1 (WLAN FW test)\n");
+	seq_puts(s, "  VAL: 2 (CCPM test)\n");
+
+	seq_puts(s, "\nCMD: dynamic_feature_mask\n");
+	seq_puts(s, "  VAL: (64 bit feature mask)\n");
 
 	if (!test_bit(ICNSS_FW_READY, &priv->state)) {
-		seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+		seq_puts(s, "Firmware is not ready yet, can't run test_mode!\n");
 		goto out;
 	}
 
 	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
-		seq_puts(s, "Machine mode is running, can't run test mode!\n");
+		seq_puts(s, "Machine mode is running, can't run test_mode!\n");
 		goto out;
 	}
 
 	if (test_bit(ICNSS_FW_TEST_MODE, &priv->state)) {
-		seq_puts(s, "Test mode is running!\n");
+		seq_puts(s, "test_mode is running, can't run test_mode!\n");
 		goto out;
 	}
 
-	seq_puts(s, "Test can be run, Have fun!\n");
-
 out:
 	seq_puts(s, "\n");
 	return 0;
@@ -3834,31 +3073,61 @@
 	return ret;
 }
 
-static ssize_t icnss_test_mode_write(struct file *fp, const char __user *buf,
+static ssize_t icnss_fw_debug_write(struct file *fp,
+				    const char __user *user_buf,
 				    size_t count, loff_t *off)
 {
 	struct icnss_priv *priv =
 		((struct seq_file *)fp->private_data)->private;
-	int ret;
-	u32 val;
+	char buf[64];
+	char *sptr, *token;
+	unsigned int len = 0;
+	char *cmd;
+	uint64_t val;
+	const char *delim = " ";
+	int ret = 0;
 
-	ret = kstrtou32_from_user(buf, count, 0, &val);
-	if (ret)
-		return ret;
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EINVAL;
 
-	switch (val) {
-	case 0:
-		ret = icnss_test_mode_fw_test_off(priv);
-		break;
-	case 1:
-		ret = icnss_test_mode_fw_test(priv, ICNSS_WALTEST);
-		break;
-	case 2:
-		ret = icnss_test_mode_fw_test(priv, ICNSS_CCPM);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+	if (!sptr)
+		return -EINVAL;
+	cmd = token;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+	if (kstrtou64(token, 0, &val))
+		return -EINVAL;
+
+	if (strcmp(cmd, "test_mode") == 0) {
+		switch (val) {
+		case 0:
+			ret = icnss_test_mode_fw_test_off(priv);
+			break;
+		case 1:
+			ret = icnss_test_mode_fw_test(priv, ICNSS_WALTEST);
+			break;
+		case 2:
+			ret = icnss_test_mode_fw_test(priv, ICNSS_CCPM);
+			break;
+		case 3:
+			ret = icnss_trigger_recovery(&priv->pdev->dev);
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else if (strcmp(cmd, "dynamic_feature_mask") == 0) {
+		ret = wlfw_dynamic_feature_mask_send_sync_msg(priv, val);
+	} else {
+		return -EINVAL;
 	}
 
 	if (ret)
@@ -3870,16 +3139,16 @@
 	return count;
 }
 
-static int icnss_test_mode_open(struct inode *inode, struct file *file)
+static int icnss_fw_debug_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, icnss_test_mode_show, inode->i_private);
+	return single_open(file, icnss_fw_debug_show, inode->i_private);
 }
 
-static const struct file_operations icnss_test_mode_fops = {
+static const struct file_operations icnss_fw_debug_fops = {
 	.read		= seq_read,
-	.write		= icnss_test_mode_write,
+	.write		= icnss_fw_debug_write,
 	.release	= single_release,
-	.open		= icnss_test_mode_open,
+	.open		= icnss_fw_debug_open,
 	.owner		= THIS_MODULE,
 	.llseek		= seq_lseek,
 };
@@ -4049,6 +3318,9 @@
 	ICNSS_STATS_DUMP(s, priv, vbatt_req);
 	ICNSS_STATS_DUMP(s, priv, vbatt_resp);
 	ICNSS_STATS_DUMP(s, priv, vbatt_req_err);
+	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
+	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
+	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
 
 	seq_puts(s, "\n<------------------ PM stats ------------------->\n");
 	ICNSS_STATS_DUMP(s, priv, pm_suspend);
@@ -4304,8 +3576,8 @@
 
 	priv->root_dentry = root_dentry;
 
-	debugfs_create_file("test_mode", 0644, root_dentry, priv,
-			    &icnss_test_mode_fops);
+	debugfs_create_file("fw_debug", 0644, root_dentry, priv,
+			    &icnss_fw_debug_fops);
 
 	debugfs_create_file("stats", 0644, root_dentry, priv,
 			    &icnss_stats_fops);
@@ -4394,21 +3666,6 @@
 	if (ret == -EPROBE_DEFER)
 		goto out;
 
-	memcpy(priv->vreg_info, icnss_vreg_info, sizeof(icnss_vreg_info));
-	for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
-		ret = icnss_get_vreg_info(dev, &priv->vreg_info[i]);
-
-		if (ret)
-			goto out;
-	}
-
-	memcpy(priv->clk_info, icnss_clk_info, sizeof(icnss_clk_info));
-	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
-		ret = icnss_get_clk_info(dev, &priv->clk_info[i]);
-		if (ret)
-			goto out;
-	}
-
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
 	if (!res) {
 		icnss_pr_err("Memory base not found in DT\n");
@@ -4428,26 +3685,6 @@
 	icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%p\n", &priv->mem_base_pa,
 		     priv->mem_base_va);
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   "mpm_config");
-	if (!res) {
-		icnss_pr_err("MPM Config not found\n");
-		ret = -EINVAL;
-		goto out;
-	}
-	priv->mpm_config_pa = res->start;
-	priv->mpm_config_va = devm_ioremap(dev, priv->mpm_config_pa,
-					   resource_size(res));
-	if (!priv->mpm_config_va) {
-		icnss_pr_err("MPM Config ioremap failed, phy addr: %pa\n",
-			     &priv->mpm_config_pa);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	icnss_pr_dbg("MPM_CONFIG pa: %pa, va: 0x%p\n", &priv->mpm_config_pa,
-		     priv->mpm_config_va);
-
 	for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
 		res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i);
 		if (!res) {
diff --git a/drivers/soc/qcom/llcc-msmskunk.c b/drivers/soc/qcom/llcc-sdm845.c
similarity index 82%
rename from drivers/soc/qcom/llcc-msmskunk.c
rename to drivers/soc/qcom/llcc-sdm845.c
index 41f55eb..0a28ee0 100644
--- a/drivers/soc/qcom/llcc-msmskunk.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -56,7 +56,7 @@
 		.activate_on_init = a,		\
 	}
 
-static struct llcc_slice_config msmskunk_data[] =  {
+static struct llcc_slice_config sdm845_data[] =  {
 	SCT_ENTRY("cpuss",       1, 1, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1),
 	SCT_ENTRY("vidsc0",      2, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
 	SCT_ENTRY("vidsc1",      3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
@@ -81,38 +81,38 @@
 	SCT_ENTRY("audiohw",     22, 22, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
 };
 
-static int msmskunk_qcom_llcc_probe(struct platform_device *pdev)
+static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
 {
-	return qcom_llcc_probe(pdev, msmskunk_data,
-				 ARRAY_SIZE(msmskunk_data));
+	return qcom_llcc_probe(pdev, sdm845_data,
+				 ARRAY_SIZE(sdm845_data));
 }
 
-static const struct of_device_id msmskunk_qcom_llcc_of_match[] = {
-	{ .compatible = "qcom,msmskunk-llcc", },
+static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
+	{ .compatible = "qcom,sdm845-llcc", },
 	{ },
 };
 
-static struct platform_driver msmskunk_qcom_llcc_driver = {
+static struct platform_driver sdm845_qcom_llcc_driver = {
 	.driver = {
-		.name = "msmskunk-llcc",
+		.name = "sdm845-llcc",
 		.owner = THIS_MODULE,
-		.of_match_table = msmskunk_qcom_llcc_of_match,
+		.of_match_table = sdm845_qcom_llcc_of_match,
 	},
-	.probe = msmskunk_qcom_llcc_probe,
+	.probe = sdm845_qcom_llcc_probe,
 	.remove = qcom_llcc_remove,
 };
 
-static int __init msmskunk_init_qcom_llcc_init(void)
+static int __init sdm845_init_qcom_llcc_init(void)
 {
-	return platform_driver_register(&msmskunk_qcom_llcc_driver);
+	return platform_driver_register(&sdm845_qcom_llcc_driver);
 }
-module_init(msmskunk_init_qcom_llcc_init);
+module_init(sdm845_init_qcom_llcc_init);
 
-static void __exit msmskunk_exit_qcom_llcc_exit(void)
+static void __exit sdm845_exit_qcom_llcc_exit(void)
 {
-	platform_driver_unregister(&msmskunk_qcom_llcc_driver);
+	platform_driver_unregister(&sdm845_qcom_llcc_driver);
 }
-module_exit(msmskunk_exit_qcom_llcc_exit);
+module_exit(sdm845_exit_qcom_llcc_exit);
 
-MODULE_DESCRIPTION("QTI msmskunk LLCC driver");
+MODULE_DESCRIPTION("QTI sdm845 LLCC driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 6ba20a4..5606a73 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -27,7 +27,6 @@
 #include <linux/of_device.h>
 #include <linux/msm_audio_ion.h>
 #include <linux/export.h>
-#include <linux/qcom_iommu.h>
 #include <asm/dma-iommu.h>
 
 #define MSM_AUDIO_ION_PROBED (1 << 0)
@@ -40,16 +39,6 @@
 
 #define MSM_AUDIO_SMMU_SID_OFFSET 32
 
-struct addr_range {
-	dma_addr_t start;
-	size_t size;
-};
-
-struct context_bank_info {
-	const char *name;
-	struct addr_range addr_range;
-};
-
 struct msm_audio_ion_private {
 	bool smmu_enabled;
 	bool audioheap_enabled;
@@ -676,74 +665,12 @@
 	return rc;
 }
 
-static int msm_audio_smmu_init_legacy(struct device *dev)
-{
-	struct dma_iommu_mapping *mapping;
-	struct device_node *ctx_node = NULL;
-	struct context_bank_info *cb;
-	int ret;
-	u32 read_val[2];
-
-	cb = devm_kzalloc(dev, sizeof(struct context_bank_info), GFP_KERNEL);
-	if (!cb)
-		return -ENOMEM;
-
-	ctx_node = of_parse_phandle(dev->of_node, "iommus", 0);
-	if (!ctx_node) {
-		dev_err(dev, "%s Could not find any iommus for audio\n",
-			__func__);
-		return -EINVAL;
-	}
-	ret = of_property_read_string(ctx_node, "label", &(cb->name));
-	if (ret) {
-		dev_err(dev, "%s Could not find label\n", __func__);
-		return -EINVAL;
-	}
-	pr_debug("label found : %s\n", cb->name);
-	ret = of_property_read_u32_array(ctx_node,
-				"qcom,virtual-addr-pool",
-				read_val, 2);
-	if (ret) {
-		dev_err(dev, "%s Could not read addr pool for group : (%d)\n",
-			__func__, ret);
-		return -EINVAL;
-	}
-	msm_audio_ion_data.cb_dev = msm_iommu_get_ctx(cb->name);
-	cb->addr_range.start = (dma_addr_t) read_val[0];
-	cb->addr_range.size = (size_t) read_val[1];
-	dev_dbg(dev, "%s Legacy iommu usage\n", __func__);
-	mapping = arm_iommu_create_mapping(
-				msm_iommu_get_bus(msm_audio_ion_data.cb_dev),
-					   cb->addr_range.start,
-					   cb->addr_range.size);
-	if (IS_ERR(mapping))
-		return PTR_ERR(mapping);
-
-	ret = arm_iommu_attach_device(msm_audio_ion_data.cb_dev, mapping);
-	if (ret) {
-		dev_err(dev, "%s: Attach failed, err = %d\n",
-			__func__, ret);
-		goto fail_attach;
-	}
-
-	msm_audio_ion_data.mapping = mapping;
-	INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
-	mutex_init(&(msm_audio_ion_data.list_mutex));
-
-	return 0;
-
-fail_attach:
-	arm_iommu_release_mapping(mapping);
-	return ret;
-}
-
 static int msm_audio_smmu_init(struct device *dev)
 {
 	struct dma_iommu_mapping *mapping;
 	int ret;
 
-	mapping = arm_iommu_create_mapping(
-					msm_iommu_get_bus(dev),
+	mapping = arm_iommu_create_mapping(&platform_bus_type,
 					   MSM_AUDIO_ION_VA_START,
 					   MSM_AUDIO_ION_VA_LEN);
 	if (IS_ERR(mapping))
@@ -853,9 +780,7 @@
 		msm_audio_ion_data.smmu_sid_bits =
 			smmu_sid << MSM_AUDIO_SMMU_SID_OFFSET;
 
-		if (msm_audio_ion_data.smmu_version == 0x1) {
-			rc = msm_audio_smmu_init_legacy(dev);
-		} else if (msm_audio_ion_data.smmu_version == 0x2) {
+		if (msm_audio_ion_data.smmu_version == 0x2) {
 			rc = msm_audio_smmu_init(dev);
 		} else {
 			dev_err(dev, "%s: smmu version invalid %d\n",
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 7957e83..967b227 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -538,11 +538,11 @@
 	/* falcon ID */
 	[317] = {MSM_CPU_FALCON, "MSMFALCON"},
 
-	/* Skunk ID */
-	[321] = {MSM_CPU_SKUNK, "MSMSKUNK"},
+	/* sdm845 ID */
+	[321] = {MSM_CPU_SDM845, "SDM845"},
 
 	/* Bat ID */
-	[328] = {MSM_CPU_BAT, "SDMBAT"},
+	[328] = {MSM_CPU_SDM830, "SDM830"},
 
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
@@ -1221,13 +1221,13 @@
 		dummy_socinfo.id = 319;
 		strlcpy(dummy_socinfo.build_id, "apqcobalt - ",
 			sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_msmskunk()) {
+	} else if (early_machine_is_sdm845()) {
 		dummy_socinfo.id = 321;
-		strlcpy(dummy_socinfo.build_id, "msmskunk - ",
+		strlcpy(dummy_socinfo.build_id, "sdm845 - ",
 			sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_sdmbat()) {
+	} else if (early_machine_is_sdm830()) {
 		dummy_socinfo.id = 328;
-		strlcpy(dummy_socinfo.build_id, "sdmbat - ",
+		strlcpy(dummy_socinfo.build_id, "sdm830 - ",
 			sizeof(dummy_socinfo.build_id));
 	}
 
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
new file mode 100644
index 0000000..f381f16
--- /dev/null
+++ b/drivers/soc/qcom/spcom.c
@@ -0,0 +1,2560 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Secure-Processor-Communication (SPCOM).
+ *
+ * This driver provides communication to Secure Processor (SP)
+ * over G-Link transport layer.
+ *
+ * It provides interface to both User Space spcomlib and kernel drivers.
+ *
+ * User Space App shall use spcomlib for communication with SP.
+ * User Space App can be either Client or Server.
+ * spcomlib shall use write() file operation to send data,
+ * and read() file operation to read data.
+ *
+ * This driver uses glink as the transport layer.
+ * This driver exposes "/dev/<sp-channel-name>" file node for each glink
+ * logical channel.
+ * This driver exposes "/dev/spcom" file node for some debug/control command.
+ * The predefined channel "/dev/sp_kernel" is used for loading SP Application
+ * from HLOS.
+ * This driver exposes "/dev/sp_ssr" file node to allow user space poll for SSR.
+ * After the remote SP App is loaded, this driver exposes a new file node
+ * "/dev/<ch-name>" for the matching HLOS App to use.
+ * The access to predefined file node is restricted by using unix group
+ * and SELinux.
+ *
+ * No message routing is used, but using the G-Link "multiplexing" feature
+ * to use a dedicated logical channel for HLOS and SP Application-Pair.
+ *
+ * Each HLOS/SP Application can be either Client or Server or both,
+ * Messaging is allays point-to-point between 2 HLOS<=>SP applications.
+ *
+ * User Space Request & Response are synchronous.
+ * read() & write() operations are blocking until completed or terminated.
+ *
+ * This driver registers to G-Link callbacks to be aware on channel state.
+ * A notify callback is called upon channel connect/disconnect.
+ *
+ */
+
+/* Uncomment the line below to test spcom against modem rather than SP */
+/* #define SPCOM_TEST_HLOS_WITH_MODEM 1 */
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+
+#define pr_fmt(fmt)	"spcom [%s]: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/msm_ion.h>
+
+#include <soc/qcom/glink.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/spcom.h>
+
+#include <uapi/linux/spcom.h>
+
+#include "glink_private.h"
+
+/* "SPCM" string */
+#define SPCOM_MAGIC_ID	((uint32_t)(0x5350434D))
+
+/* Request/Response */
+#define SPCOM_FLAG_REQ		BIT(0)
+#define SPCOM_FLAG_RESP	BIT(1)
+#define SPCOM_FLAG_ENCODED	BIT(2)
+#define SPCOM_FLAG_NON_ENCODED	BIT(3)
+
+/* SPCOM driver name */
+#define DEVICE_NAME	"spcom"
+
+#define SPCOM_MAX_CHANNELS	0x20
+
+/* maximum ION buffers should be >= SPCOM_MAX_CHANNELS  */
+#define SPCOM_MAX_ION_BUF_PER_CH (SPCOM_MAX_CHANNELS + 4)
+
+/* maximum ION buffer per send request/response command */
+#define SPCOM_MAX_ION_BUF_PER_CMD SPCOM_MAX_ION_BUF
+
+/* Maximum command size */
+#define SPCOM_MAX_COMMAND_SIZE	(PAGE_SIZE)
+
+/* Maximum input size */
+#define SPCOM_MAX_READ_SIZE	(PAGE_SIZE)
+
+/* Current Process ID */
+#define current_pid() ((u32)(current->pid))
+
+/* Maximum channel name size (including null) - matching GLINK_NAME_SIZE */
+#define MAX_CH_NAME_LEN	32
+
+/* Connection negotiation timeout, if remote channel is open */
+#define OPEN_CHANNEL_TIMEOUT_MSEC	100
+
+/*
+ * After both sides get CONNECTED,
+ * there is a race between one side queueing rx buffer and the other side
+ * trying to call glink_tx() , this race is only on the 1st tx.
+ * Do tx retry with some delay to allow the other side to queue rx buffer.
+ */
+#define TX_RETRY_DELAY_MSEC	100
+
+/* number of tx retries */
+#define TX_MAX_RETRY	3
+
+/* SPCOM_MAX_REQUEST_SIZE-or-SPCOM_MAX_RESPONSE_SIZE + header */
+#define SPCOM_RX_BUF_SIZE	300
+
+/* The SPSS RAM size is 256 KB so SP App must fit into it */
+#define SPCOM_MAX_APP_SIZE	SZ_256K
+
+/*
+ * ACK timeout from remote side for TX data.
+ * Normally, it takes few msec for SPSS to respond with ACK for TX data.
+ * However, due to SPSS HW issue, the SPSS might disable interrupts
+ * for a very long time.
+ */
+#define TX_DONE_TIMEOUT_MSEC	5000
+
+/*
+ * Initial transaction id, use non-zero nonce for debug.
+ * Incremented by client on request, and copied back by server on response.
+ */
+#define INITIAL_TXN_ID	0x12345678
+
+/**
+ * struct spcom_msg_hdr - Request/Response message header between HLOS and SP.
+ *
+ * This header is proceeding any request specific parameters.
+ * The transaction id is used to match request with response.
+ * Note: glink API provides the rx/tx data size, so user payload size is
+ * calculated by reducing the header size.
+ */
+struct spcom_msg_hdr {
+	uint32_t reserved;	/* for future use */
+	uint32_t txn_id;	/* transaction id */
+	char buf[0];		/* Variable buffer size, must be last field */
+} __packed;
+
+/**
+ * struct spcom_client - Client handle
+ */
+struct spcom_client {
+	struct spcom_channel *ch;
+};
+
+/**
+ * struct spcom_server - Server handle
+ */
+struct spcom_server {
+	struct spcom_channel *ch;
+};
+
+/**
+ * struct spcom_channel - channel context
+ */
+struct spcom_channel {
+	char name[MAX_CH_NAME_LEN];
+	struct mutex lock;
+	void *glink_handle;
+	uint32_t txn_id;	/* incrementing nonce per channel */
+	bool is_server;		/* for txn_id and response_timeout_msec */
+	uint32_t response_timeout_msec; /* for client only */
+
+	/* char dev */
+	struct cdev *cdev;
+	struct device *dev;
+	struct device_attribute attr;
+
+	/*
+	 * glink state: CONNECTED / LOCAL_DISCONNECTED, REMOTE_DISCONNECTED
+	 */
+	unsigned int glink_state;
+
+	/* Events notification */
+	struct completion connect;
+	struct completion disconnect;
+	struct completion tx_done;
+	struct completion rx_done;
+
+	/*
+	 * Only one client or server per channel.
+	 * Only one rx/tx transaction at a time (request + response).
+	 */
+	int ref_count;
+	u32 pid;
+
+	/* link UP/DOWN callback */
+	void (*notify_link_state_cb)(bool up);
+
+	/* abort flags */
+	bool rx_abort;
+	bool tx_abort;
+
+	/* rx data info */
+	int rx_buf_size;	/* allocated rx buffer size */
+	bool rx_buf_ready;
+	int actual_rx_size;	/* actual data size received */
+	const void *glink_rx_buf;
+
+	/* ION lock/unlock support */
+	int ion_fd_table[SPCOM_MAX_ION_BUF_PER_CH];
+	struct ion_handle *ion_handle_table[SPCOM_MAX_ION_BUF_PER_CH];
+};
+
+/**
+ * struct spcom_device - device state structure.
+ */
+struct spcom_device {
+	char predefined_ch_name[SPCOM_MAX_CHANNELS][MAX_CH_NAME_LEN];
+
+	/* char device info */
+	struct cdev cdev;
+	dev_t device_no;
+	struct class *driver_class;
+	struct device *class_dev;
+
+	/* G-Link channels */
+	struct spcom_channel channels[SPCOM_MAX_CHANNELS];
+	int channel_count;
+
+	/* private */
+	struct mutex lock;
+
+	/* Link state */
+	struct completion link_state_changed;
+	enum glink_link_state link_state;
+
+	/* ION support */
+	struct ion_client *ion_client;
+};
+
+#ifdef SPCOM_TEST_HLOS_WITH_MODEM
+	static const char *spcom_edge = "mpss";
+	static const char *spcom_transport = "smem";
+#else
+	static const char *spcom_edge = "spss";
+	static const char *spcom_transport = "mailbox";
+#endif
+
+/* Device Driver State */
+static struct spcom_device *spcom_dev;
+
+/* static functions declaration */
+static int spcom_create_channel_chardev(const char *name);
+static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec);
+static int spcom_close(struct spcom_channel *ch);
+static void spcom_notify_rx_abort(void *handle, const void *priv,
+				  const void *pkt_priv);
+static struct spcom_channel *spcom_find_channel_by_name(const char *name);
+static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd);
+
+/**
+ * spcom_is_ready() - driver is initialized and ready.
+ */
+static inline bool spcom_is_ready(void)
+{
+	return spcom_dev != NULL;
+}
+
+/**
+ * spcom_is_channel_open() - channel is open on this side.
+ *
+ * Channel might not be fully connected if remote side didn't open the channel
+ * yet.
+ */
+static inline bool spcom_is_channel_open(struct spcom_channel *ch)
+{
+	return ch->glink_handle != NULL;
+}
+
+/**
+ * spcom_is_channel_connected() - channel is fully connected by both sides.
+ */
+static inline bool spcom_is_channel_connected(struct spcom_channel *ch)
+{
+	return (ch->glink_state == GLINK_CONNECTED);
+}
+
+/**
+ * spcom_create_predefined_channels_chardev() - expose predefined channels to
+ * user space.
+ *
+ * Predefined channels list is provided by device tree.
+ * Typically, it is for known servers on remote side that are not loaded by the
+ * HLOS.
+ */
+static int spcom_create_predefined_channels_chardev(void)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < SPCOM_MAX_CHANNELS; i++) {
+		const char *name = spcom_dev->predefined_ch_name[i];
+
+		if (name[0] == 0)
+			break;
+		ret = spcom_create_channel_chardev(name);
+		if (ret) {
+			pr_err("failed to create chardev [%s], ret [%d].\n",
+			       name, ret);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+/*======================================================================*/
+/*		GLINK CALLBACKS						*/
+/*======================================================================*/
+
+/**
+ * spcom_link_state_notif_cb() - glink callback for link state change.
+ *
+ * glink notifies link layer is up, before any channel opened on remote side.
+ * Calling glink_open() locally allowed only after link is up.
+ * Notify link down, normally upon Remote Subsystem Reset (SSR).
+ * Note: upon SSR, glink will also notify each channel about remote disconnect,
+ * and abort any pending rx buffer.
+ */
+static void spcom_link_state_notif_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct spcom_channel *ch = NULL;
+	const char *ch_name = "sp_kernel";
+
+	spcom_dev->link_state = cb_info->link_state;
+
+	pr_debug("spcom_link_state_notif_cb called. transport = %s edge = %s\n",
+		 cb_info->transport, cb_info->edge);
+
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		pr_info("GLINK_LINK_STATE_UP.\n");
+		spcom_create_predefined_channels_chardev();
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		pr_err("GLINK_LINK_STATE_DOWN.\n");
+
+		/*
+		 * Free all the SKP ION buffers that were locked
+		 * for SPSS app swapping, when remote subsystem reset.
+		 */
+		pr_debug("Free all SKP ION buffers on SSR.\n");
+		ch = spcom_find_channel_by_name(ch_name);
+		if (!ch)
+			pr_err("failed to find channel [%s].\n", ch_name);
+		else
+			spcom_unlock_ion_buf(ch, SPCOM_ION_FD_UNLOCK_ALL);
+		break;
+	default:
+		pr_err("unknown link_state [%d].\n", cb_info->link_state);
+		break;
+	}
+	complete_all(&spcom_dev->link_state_changed);
+}
+
+/**
+ * spcom_notify_rx() - glink callback on receiving data.
+ *
+ * Glink notify rx data is ready. The glink internal rx buffer was
+ * allocated upon glink_queue_rx_intent().
+ */
+static void spcom_notify_rx(void *handle,
+			    const void *priv, const void *pkt_priv,
+			    const void *buf, size_t size)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	if (!ch) {
+		pr_err("invalid ch parameter.\n");
+		return;
+	}
+
+	pr_debug("ch [%s] rx size [%d].\n", ch->name, (int) size);
+
+	ch->actual_rx_size = (int) size;
+	ch->glink_rx_buf = (void *) buf;
+
+	complete_all(&ch->rx_done);
+}
+
+/**
+ * spcom_notify_tx_done() - glink callback on ACK sent data.
+ *
+ * after calling glink_tx() the remote side ACK receiving the data.
+ */
+static void spcom_notify_tx_done(void *handle,
+				 const void *priv, const void *pkt_priv,
+				 const void *buf)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+	int *tx_buf = (int *) buf;
+
+	if (!ch) {
+		pr_err("invalid ch parameter.\n");
+		return;
+	}
+
+	pr_debug("ch [%s] buf[0] = [0x%x].\n", ch->name, tx_buf[0]);
+
+	complete_all(&ch->tx_done);
+}
+
+/**
+ * spcom_notify_state() - glink callback on channel connect/disconnect.
+ *
+ * Channel is fully CONNECTED after both sides opened the channel.
+ * Channel is LOCAL_DISCONNECTED after both sides closed the channel.
+ * If the remote side closed the channel, it is expected that the local side
+ * will also close the channel.
+ * Upon connection, rx buffer is allocated to receive data,
+ * the maximum transfer size is agreed by both sides.
+ */
+static void spcom_notify_state(void *handle, const void *priv,
+				unsigned int event)
+{
+	int ret;
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	switch (event) {
+	case GLINK_CONNECTED:
+		pr_debug("GLINK_CONNECTED, ch name [%s].\n", ch->name);
+		complete_all(&ch->connect);
+
+		/*
+		 * if spcom_notify_state() is called within glink_open()
+		 * then ch->glink_handle is not updated yet.
+		 */
+		if (!ch->glink_handle) {
+			pr_debug("update glink_handle, ch [%s].\n", ch->name);
+			ch->glink_handle = handle;
+		}
+
+		/* prepare default rx buffer after connected */
+		ret = glink_queue_rx_intent(ch->glink_handle,
+					    ch, ch->rx_buf_size);
+		if (ret) {
+			pr_err("glink_queue_rx_intent() err [%d]\n", ret);
+		} else {
+			pr_debug("rx buf is ready, size [%d].\n",
+				 ch->rx_buf_size);
+			ch->rx_buf_ready = true;
+		}
+		break;
+	case GLINK_LOCAL_DISCONNECTED:
+		/*
+		 * Channel state is GLINK_LOCAL_DISCONNECTED
+		 * only after *both* sides closed the channel.
+		 */
+		pr_debug("GLINK_LOCAL_DISCONNECTED, ch [%s].\n", ch->name);
+		complete_all(&ch->disconnect);
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		/*
+		 * Remote side initiates glink_close().
+		 * This is not expected on normal operation.
+		 * This may happen upon remote SSR.
+		 */
+		pr_err("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
+
+		/*
+		 * Abort any blocking read() operation.
+		 * The glink notification might be after REMOTE_DISCONNECT.
+		 */
+		spcom_notify_rx_abort(NULL, ch, NULL);
+
+		/*
+		 * after glink_close(),
+		 * expecting notify GLINK_LOCAL_DISCONNECTED
+		 */
+		spcom_close(ch);
+		break;
+	default:
+		pr_err("unknown event id = %d, ch name [%s].\n",
+		       (int) event, ch->name);
+		return;
+	}
+
+	ch->glink_state = event;
+}
+
+/**
+ * spcom_notify_rx_intent_req() - glink callback on intent request.
+ *
+ * glink allows the remote side to request for a local rx buffer if such
+ * buffer is not ready.
+ * However, for spcom simplicity on SP, and to reduce latency, we decided
+ * that glink_tx() on both side is not using INTENT_REQ flag, so this
+ * callback should not be called.
+ * Anyhow, return "false" to reject the request.
+ */
+static bool spcom_notify_rx_intent_req(void *handle, const void *priv,
+				       size_t req_size)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	pr_err("Unexpected intent request for ch [%s].\n", ch->name);
+
+	return false;
+}
+
+/**
+ * spcom_notify_rx_abort() - glink callback on aborting rx pending buffer.
+ *
+ * Rx abort may happen if channel is closed by remote side, while rx buffer is
+ * pending in the queue.
+ */
+static void spcom_notify_rx_abort(void *handle, const void *priv,
+				  const void *pkt_priv)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	pr_debug("ch [%s] pending rx aborted.\n", ch->name);
+
+	if (spcom_is_channel_connected(ch) && (!ch->rx_abort)) {
+		ch->rx_abort = true;
+		complete_all(&ch->rx_done);
+	}
+}
+
+/**
+ * spcom_notify_tx_abort() - glink callback on aborting tx data.
+ *
+ * This is probably not relevant, since glink_txv() is not used.
+ * Tx abort may happen if channel is closed by remote side,
+ * while multiple tx buffers are in a middle of tx operation.
+ */
+static void spcom_notify_tx_abort(void *handle, const void *priv,
+				  const void *pkt_priv)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	pr_debug("ch [%s] pending tx aborted.\n", ch->name);
+
+	if (spcom_is_channel_connected(ch) && (!ch->tx_abort)) {
+		ch->tx_abort = true;
+		complete_all(&ch->tx_done);
+	}
+}
+
+/*======================================================================*/
+/*		UTILITIES						*/
+/*======================================================================*/
+
+/**
+ * spcom_init_open_config() - Fill glink_open() configuration parameters.
+ *
+ * @cfg: glink configuration struct pointer
+ * @name: channel name
+ * @priv: private caller data, provided back by callbacks, channel state.
+ *
+ * specify callbacks and other parameters for glink open channel.
+ */
+static void spcom_init_open_config(struct glink_open_config *cfg,
+				   const char *name, void *priv)
+{
+	cfg->notify_rx		= spcom_notify_rx;
+	cfg->notify_rxv		= NULL;
+	cfg->notify_tx_done	= spcom_notify_tx_done;
+	cfg->notify_state	= spcom_notify_state;
+	cfg->notify_rx_intent_req = spcom_notify_rx_intent_req;
+	cfg->notify_rx_sigs	= NULL;
+	cfg->notify_rx_abort	= spcom_notify_rx_abort;
+	cfg->notify_tx_abort	= spcom_notify_tx_abort;
+
+	cfg->options	= 0; /* not using GLINK_OPT_INITIAL_XPORT */
+	cfg->priv	= priv; /* provided back by callbacks */
+
+	cfg->name	= name;
+
+	cfg->transport	= spcom_transport;
+	cfg->edge	= spcom_edge;
+}
+
+/**
+ * spcom_init_channel() - initialize channel state.
+ *
+ * @ch: channel state struct pointer
+ * @name: channel name
+ */
+static int spcom_init_channel(struct spcom_channel *ch, const char *name)
+{
+	if (!ch || !name || !name[0]) {
+		pr_err("invalid parameters.\n");
+		return -EINVAL;
+	}
+
+	strlcpy(ch->name, name, sizeof(ch->name));
+
+	init_completion(&ch->connect);
+	init_completion(&ch->disconnect);
+	init_completion(&ch->tx_done);
+	init_completion(&ch->rx_done);
+
+	mutex_init(&ch->lock);
+	ch->glink_state = GLINK_LOCAL_DISCONNECTED;
+	ch->actual_rx_size = 0;
+	ch->rx_buf_size = SPCOM_RX_BUF_SIZE;
+
+	return 0;
+}
+
+/**
+ * spcom_find_channel_by_name() - find a channel by name.
+ *
+ * @name: channel name
+ *
+ * Return: a channel state struct.
+ */
+static struct spcom_channel *spcom_find_channel_by_name(const char *name)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
+		struct spcom_channel *ch = &spcom_dev->channels[i];
+
+		if (strcmp(ch->name, name) == 0)
+			return ch;
+	}
+
+	return NULL;
+}
+
+/**
+ * spcom_open() - Open glink channel and wait for connection ACK.
+ *
+ * @ch: channel state struct pointer
+ *
+ * Normally, a local client opens a channel after remote server has opened
+ * the channel.
+ * A local server may open the channel before remote client is running.
+ */
+static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
+{
+	struct glink_open_config cfg = {0};
+	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
+	long timeleft;
+	const char *name;
+	void *handle;
+
+	mutex_lock(&ch->lock);
+	name = ch->name;
+
+	/* only one client/server may use the channel */
+	if (ch->ref_count) {
+		pr_err("channel [%s] already in use.\n", name);
+		goto exit_err;
+	}
+	ch->ref_count++;
+	ch->pid = current_pid();
+	ch->txn_id = INITIAL_TXN_ID;
+
+	pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
+		 name, ch->pid, ch->ref_count);
+
+	pr_debug("Open channel [%s] timeout_msec [%d].\n", name, timeout_msec);
+
+	if (spcom_is_channel_open(ch)) {
+		pr_debug("channel [%s] already open.\n", name);
+		mutex_unlock(&ch->lock);
+		return 0;
+	}
+
+	spcom_init_open_config(&cfg, name, ch);
+
+	/* init completion before calling glink_open() */
+	reinit_completion(&ch->connect);
+
+	handle = glink_open(&cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("glink_open failed.\n");
+		goto exit_err;
+	} else {
+		pr_debug("glink_open [%s] ok.\n", name);
+	}
+	ch->glink_handle = handle;
+
+	pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
+		 name, timeout_msec);
+
+	/* Wait for remote side to connect */
+	if (timeout_msec) {
+		timeleft = wait_for_completion_timeout(&(ch->connect), jiffies);
+		if (timeleft == 0)
+			pr_debug("Channel [%s] is NOT connected.\n", name);
+		else
+			pr_debug("Channel [%s] fully connect.\n", name);
+	} else {
+		pr_debug("wait for connection ch [%s] no timeout.\n", name);
+		wait_for_completion(&(ch->connect));
+		pr_debug("Channel [%s] opened, no timeout.\n", name);
+	}
+
+	mutex_unlock(&ch->lock);
+
+	return 0;
+exit_err:
+	mutex_unlock(&ch->lock);
+
+	return -EFAULT;
+}
+
+/**
+ * spcom_close() - Close glink channel.
+ *
+ * @ch: channel state struct pointer
+ *
+ * A calling API functions should wait for disconnecting by both sides.
+ */
+static int spcom_close(struct spcom_channel *ch)
+{
+	int ret = 0;
+
+	mutex_lock(&ch->lock);
+
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch already closed.\n");
+		mutex_unlock(&ch->lock);
+		return 0;
+	}
+
+	ret = glink_close(ch->glink_handle);
+	if (ret)
+		pr_err("glink_close() fail, ret [%d].\n", ret);
+	else
+		pr_debug("glink_close() ok.\n");
+
+	ch->glink_handle = NULL;
+	ch->ref_count = 0;
+	ch->rx_abort = false;
+	ch->tx_abort = false;
+	ch->glink_state = GLINK_LOCAL_DISCONNECTED;
+	ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
+	ch->pid = 0;
+
+	pr_debug("Channel closed [%s].\n", ch->name);
+	mutex_unlock(&ch->lock);
+
+	return 0;
+}
+
+/**
+ * spcom_tx() - Send data and wait for ACK or timeout.
+ *
+ * @ch: channel state struct pointer
+ * @buf: buffer pointer
+ * @size: buffer size
+ *
+ * ACK is expected within a very short time (few msec).
+ */
+static int spcom_tx(struct spcom_channel *ch,
+		    void *buf,
+		    uint32_t size,
+		    uint32_t timeout_msec)
+{
+	int ret;
+	void *pkt_priv = NULL;
+	uint32_t tx_flags = 0 ; /* don't use GLINK_TX_REQ_INTENT */
+	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
+	long timeleft;
+	int retry = 0;
+
+	mutex_lock(&ch->lock);
+
+	/* reset completion before calling glink */
+	reinit_completion(&ch->tx_done);
+
+	for (retry = 0; retry < TX_MAX_RETRY ; retry++) {
+		ret = glink_tx(ch->glink_handle, pkt_priv, buf, size, tx_flags);
+		if (ret == -EAGAIN) {
+			pr_err("glink_tx() fail, try again.\n");
+			/*
+			 * Delay to allow remote side to queue rx buffer.
+			 * This may happen after the first channel connection.
+			 */
+			msleep(TX_RETRY_DELAY_MSEC);
+		} else if (ret < 0) {
+			pr_err("glink_tx() error %d.\n", ret);
+			goto exit_err;
+		} else {
+			break; /* no retry needed */
+		}
+	}
+
+	pr_debug("Wait for Tx done.\n");
+
+	/* Wait for Tx Completion */
+	timeleft = wait_for_completion_timeout(&ch->tx_done, jiffies);
+	if (timeleft == 0) {
+		pr_err("tx_done timeout %d msec expired.\n", timeout_msec);
+		goto exit_err;
+	} else if (ch->tx_abort) {
+		pr_err("tx aborted.\n");
+		goto exit_err;
+	}
+
+	mutex_unlock(&ch->lock);
+
+	return ret;
+exit_err:
+	mutex_unlock(&ch->lock);
+	return -EFAULT;
+}
+
+/**
+ * spcom_rx() - Wait for received data until timeout, unless pending rx data is
+ * already ready
+ *
+ * @ch: channel state struct pointer
+ * @buf: buffer pointer
+ * @size: buffer size
+ *
+ * ACK is expected within a very short time (few msec).
+ */
+static int spcom_rx(struct spcom_channel *ch,
+		     void *buf,
+		     uint32_t size,
+		     uint32_t timeout_msec)
+{
+	int ret;
+	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
+	long timeleft = 1;
+
+	mutex_lock(&ch->lock);
+
+	/* check for already pending data */
+	if (ch->actual_rx_size) {
+		pr_debug("already pending data size [%d].\n",
+			 ch->actual_rx_size);
+		goto copy_buf;
+	}
+
+	/* reset completion before calling glink */
+	reinit_completion(&ch->rx_done);
+
+	/* Wait for Rx response */
+	pr_debug("Wait for Rx done.\n");
+	if (timeout_msec)
+		timeleft = wait_for_completion_timeout(&ch->rx_done, jiffies);
+	else
+		wait_for_completion(&ch->rx_done);
+
+	if (timeleft == 0) {
+		pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
+		goto exit_err;
+	} else if (ch->rx_abort) {
+		pr_err("rx aborted.\n");
+		goto exit_err;
+	} else if (ch->actual_rx_size) {
+		pr_debug("actual_rx_size is [%d].\n", ch->actual_rx_size);
+	} else {
+		pr_err("actual_rx_size is zero.\n");
+		goto exit_err;
+	}
+
+	if (!ch->glink_rx_buf) {
+		pr_err("invalid glink_rx_buf.\n");
+		goto exit_err;
+	}
+
+copy_buf:
+	/* Copy from glink buffer to spcom buffer */
+	size = min_t(int, ch->actual_rx_size, size);
+	memcpy(buf, ch->glink_rx_buf, size);
+
+	pr_debug("copy size [%d].\n", (int) size);
+
+	/* free glink buffer after copy to spcom buffer */
+	glink_rx_done(ch->glink_handle, ch->glink_rx_buf, false);
+	ch->glink_rx_buf = NULL;
+	ch->actual_rx_size = 0;
+
+	/* queue rx buffer for the next time */
+	ret = glink_queue_rx_intent(ch->glink_handle, ch, ch->rx_buf_size);
+	if (ret) {
+		pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
+		goto exit_err;
+	} else {
+		pr_debug("queue rx_buf, size [%d].\n", ch->rx_buf_size);
+	}
+
+	mutex_unlock(&ch->lock);
+
+	return size;
+exit_err:
+	mutex_unlock(&ch->lock);
+	return -EFAULT;
+}
+
+/**
+ * spcom_get_next_request_size() - get request size.
+ * already ready
+ *
+ * @ch: channel state struct pointer
+ *
+ * Server needs the size of the next request to allocate a request buffer.
+ * Initially used intent-request, however this complicated the remote side,
+ * so both sides are not using glink_tx() with INTENT_REQ anymore.
+ */
+static int spcom_get_next_request_size(struct spcom_channel *ch)
+{
+	int size = -1;
+
+	/* NOTE: Remote clients might not be connected yet.*/
+	mutex_lock(&ch->lock);
+	reinit_completion(&ch->rx_done);
+
+	/* check if already got it via callback */
+	if (ch->actual_rx_size) {
+		pr_debug("next-req-size already ready ch [%s] size [%d].\n",
+			 ch->name, ch->actual_rx_size);
+		goto exit_ready;
+	}
+
+	pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
+	wait_for_completion(&ch->rx_done);
+	if (ch->actual_rx_size <= 0) {
+		pr_err("invalid rx size [%d] ch [%s].\n",
+		       ch->actual_rx_size, ch->name);
+		goto exit_error;
+	}
+
+exit_ready:
+	size = ch->actual_rx_size;
+	if (size > sizeof(struct spcom_msg_hdr)) {
+		size -= sizeof(struct spcom_msg_hdr);
+	} else {
+		pr_err("rx size [%d] too small.\n", size);
+		goto exit_error;
+	}
+
+	mutex_unlock(&ch->lock);
+	return size;
+
+exit_error:
+	mutex_unlock(&ch->lock);
+	return -EFAULT;
+
+
+}
+
+/*======================================================================*/
+/*		General API for kernel drivers				*/
+/*======================================================================*/
+
+/**
+ * spcom_is_sp_subsystem_link_up() - check if SPSS link is up.
+ *
+ * return: true if link is up, false if link is down.
+ */
+bool spcom_is_sp_subsystem_link_up(void)
+{
+	return (spcom_dev->link_state == GLINK_LINK_STATE_UP);
+}
+EXPORT_SYMBOL(spcom_is_sp_subsystem_link_up);
+
+/*======================================================================*/
+/*		Client API for kernel drivers				*/
+/*======================================================================*/
+
+/**
+ * spcom_register_client() - register a client.
+ *
+ * @info: channel name and ssr callback.
+ *
+ * Return: client handle
+ */
+struct spcom_client *spcom_register_client(struct spcom_client_info *info)
+{
+	int ret;
+	const char *name;
+	struct spcom_channel *ch;
+	struct spcom_client *client;
+
+	if (!info) {
+		pr_err("Invalid parameter.\n");
+			return NULL;
+	}
+	name = info->ch_name;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s doesn't exist, load App first.\n", name);
+		return NULL;
+	}
+
+	client->ch = ch; /* backtrack */
+
+	ret = spcom_open(ch, OPEN_CHANNEL_TIMEOUT_MSEC);
+	if (ret) {
+		pr_err("failed to open channel [%s].\n", name);
+		kfree(client);
+		client = NULL;
+	} else {
+		pr_info("remote side connect to channel [%s].\n", name);
+	}
+
+	return client;
+}
+EXPORT_SYMBOL(spcom_register_client);
+
+
+/**
+ * spcom_unregister_client() - unregister a client.
+ *
+ * @client: client handle
+ */
+int spcom_unregister_client(struct spcom_client *client)
+{
+	struct spcom_channel *ch;
+
+	if (!client) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = client->ch;
+
+	kfree(client);
+
+	spcom_close(ch);
+
+	return 0;
+}
+EXPORT_SYMBOL(spcom_unregister_client);
+
+
+/**
+ * spcom_client_send_message_sync() - send request and wait for response.
+ *
+ * @client: client handle
+ * @req_ptr: request pointer
+ * @req_size: request size
+ * @resp_ptr: response pointer
+ * @resp_size: response size
+ * @timeout_msec: timeout waiting for response.
+ *
+ * The timeout depends on the specific request handling time at the remote side.
+ */
+int spcom_client_send_message_sync(struct spcom_client	*client,
+				    void	*req_ptr,
+				    uint32_t	req_size,
+				    void	*resp_ptr,
+				    uint32_t	resp_size,
+				    uint32_t	timeout_msec)
+{
+	int ret;
+	struct spcom_channel *ch;
+
+	if (!client || !req_ptr || !resp_ptr) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = client->ch;
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	ret = spcom_tx(ch, req_ptr, req_size, TX_DONE_TIMEOUT_MSEC);
+	if (ret < 0) {
+		pr_err("tx error %d.\n", ret);
+		return ret;
+	}
+
+	ret = spcom_rx(ch, resp_ptr, resp_size, timeout_msec);
+	if (ret < 0) {
+		pr_err("rx error %d.\n", ret);
+		return ret;
+	}
+
+	/* @todo verify response transaction id match the request */
+
+	return ret;
+}
+EXPORT_SYMBOL(spcom_client_send_message_sync);
+
+
+/**
+ * spcom_client_is_server_connected() - is remote server connected.
+ *
+ * @client: client handle
+ */
+bool spcom_client_is_server_connected(struct spcom_client *client)
+{
+	bool connected;
+
+	if (!client) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	connected = spcom_is_channel_connected(client->ch);
+
+	return connected;
+}
+EXPORT_SYMBOL(spcom_client_is_server_connected);
+
+/*======================================================================*/
+/*		Server API for kernel drivers				*/
+/*======================================================================*/
+
+/**
+ * spcom_register_service() - register a server.
+ *
+ * @info: channel name and ssr callback.
+ *
+ * Return: server handle
+ */
+struct spcom_server *spcom_register_service(struct spcom_service_info *info)
+{
+	int ret;
+	const char *name;
+	struct spcom_channel *ch;
+	struct spcom_server *server;
+
+	if (!info) {
+		pr_err("Invalid parameter.\n");
+		return NULL;
+	}
+	name = info->ch_name;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		return NULL;
+
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s doesn't exist, load App first.\n", name);
+		return NULL;
+	}
+
+	server->ch = ch; /* backtrack */
+
+	ret = spcom_open(ch, 0);
+	if (ret) {
+		pr_err("failed to open channel [%s].\n", name);
+		kfree(server);
+		server = NULL;
+	}
+
+	return server;
+}
+EXPORT_SYMBOL(spcom_register_service);
+
+/**
+ * spcom_unregister_service() - unregister a server.
+ *
+ * @server: server handle
+ */
+int spcom_unregister_service(struct spcom_server *server)
+{
+	struct spcom_channel *ch;
+
+	if (!server) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+
+	kfree(server);
+
+	spcom_close(ch);
+
+	return 0;
+}
+EXPORT_SYMBOL(spcom_unregister_service);
+
+/**
+ * spcom_server_get_next_request_size() - get request size.
+ *
+ * @server: server handle
+ *
+ * Return: request size in bytes.
+ */
+int spcom_server_get_next_request_size(struct spcom_server *server)
+{
+	int size;
+	struct spcom_channel *ch;
+
+	if (!server) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	size = spcom_get_next_request_size(ch);
+
+	pr_debug("next_request_size [%d].\n", size);
+
+	return size;
+}
+EXPORT_SYMBOL(spcom_server_get_next_request_size);
+
+/**
+ * spcom_server_wait_for_request() - wait for request.
+ *
+ * @server: server handle
+ * @req_ptr: request buffer pointer
+ * @req_size: max request size
+ *
+ * Return: request size in bytes.
+ */
+int spcom_server_wait_for_request(struct spcom_server	*server,
+				  void			*req_ptr,
+				  uint32_t		req_size)
+{
+	int ret;
+	struct spcom_channel *ch;
+
+	if (!server || !req_ptr) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	ret = spcom_rx(ch, req_ptr, req_size, 0);
+
+	return ret;
+}
+EXPORT_SYMBOL(spcom_server_wait_for_request);
+
+/**
+ * spcom_server_send_response() - Send response
+ *
+ * @server: server handle
+ * @resp_ptr: response buffer pointer
+ * @resp_size: response size
+ */
+int spcom_server_send_response(struct spcom_server	*server,
+			       void			*resp_ptr,
+			       uint32_t			resp_size)
+{
+	int ret;
+	struct spcom_channel *ch;
+
+	if (!server || !resp_ptr) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	ret = spcom_tx(ch, resp_ptr, resp_size, TX_DONE_TIMEOUT_MSEC);
+
+	return ret;
+}
+EXPORT_SYMBOL(spcom_server_send_response);
+
+/*======================================================================*/
+/*	USER SPACE commands handling					*/
+/*======================================================================*/
+
+/**
+ * spcom_handle_create_channel_command() - Handle Create Channel command from
+ * user space.
+ *
+ * @cmd_buf:	command buffer.
+ * @cmd_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
+{
+	int ret = 0;
+	struct spcom_user_create_channel_command *cmd = cmd_buf;
+	const char *ch_name;
+
+	if (cmd_size != sizeof(*cmd)) {
+		pr_err("cmd_size [%d] , expected [%d].\n",
+		       (int) cmd_size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	ch_name = cmd->ch_name;
+
+	pr_debug("ch_name [%s].\n", ch_name);
+
+	ret = spcom_create_channel_chardev(ch_name);
+
+	return ret;
+}
+
+/**
+ * spcom_handle_send_command() - Handle send request/response from user space.
+ *
+ * @buf:	command buffer.
+ * @buf_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_send_command(struct spcom_channel *ch,
+					     void *cmd_buf, int size)
+{
+	int ret = 0;
+	struct spcom_send_command *cmd = cmd_buf;
+	uint32_t buf_size;
+	void *buf;
+	struct spcom_msg_hdr *hdr;
+	void *tx_buf;
+	int tx_buf_size;
+	uint32_t timeout_msec;
+
+	pr_debug("send req/resp ch [%s] size [%d] .\n", ch->name, size);
+
+	/*
+	 * check that cmd buf size is at least struct size,
+	 * to allow access to struct fields.
+	 */
+	if (size < sizeof(*cmd)) {
+		pr_err("ch [%s] invalid cmd buf.\n",
+			ch->name);
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* parse command buffer */
+	buf = &cmd->buf;
+	buf_size = cmd->buf_size;
+	timeout_msec = cmd->timeout_msec;
+
+	/* Check param validity */
+	if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid buf size [%d].\n",
+			ch->name, buf_size);
+		return -EINVAL;
+	}
+	if (size != sizeof(*cmd) + buf_size) {
+		pr_err("ch [%s] invalid cmd size [%d].\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	tx_buf_size = sizeof(*hdr) + buf_size;
+	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
+	if (!tx_buf)
+		return -ENOMEM;
+
+	/* Prepare Tx Buf */
+	hdr = tx_buf;
+
+	/* Header */
+	hdr->txn_id = ch->txn_id;
+	if (!ch->is_server) {
+		ch->txn_id++;   /* client sets the request txn_id */
+		ch->response_timeout_msec = timeout_msec;
+	}
+
+	/* user buf */
+	memcpy(hdr->buf, buf, buf_size);
+
+	/*
+	 * remote side should have rx buffer ready.
+	 * tx_done is expected to be received quickly.
+	 */
+	ret = spcom_tx(ch, tx_buf, tx_buf_size, TX_DONE_TIMEOUT_MSEC);
+	if (ret < 0)
+		pr_err("tx error %d.\n", ret);
+
+	kfree(tx_buf);
+
+	return ret;
+}
+
+/**
+ * modify_ion_addr() - replace the ION buffer virtual address with physical
+ * address in a request or response buffer.
+ *
+ * @buf: buffer to modify
+ * @buf_size: buffer size
+ * @ion_info: ION buffer info such as FD and offset in buffer.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int modify_ion_addr(void *buf,
+			    uint32_t buf_size,
+			    struct spcom_ion_info ion_info)
+{
+	struct ion_handle *handle = NULL;
+	ion_phys_addr_t ion_phys_addr;
+	size_t len;
+	int fd;
+	uint32_t buf_offset;
+	char *ptr = (char *)buf;
+	int ret;
+
+	fd = ion_info.fd;
+	buf_offset = ion_info.buf_offset;
+	ptr += buf_offset;
+
+	if (fd < 0) {
+		pr_err("invalid fd [%d].\n", fd);
+		return -ENODEV;
+	}
+
+	if (buf_size < sizeof(uint64_t)) {
+		pr_err("buf size too small [%d].\n", buf_size);
+		return -ENODEV;
+	}
+
+	if (buf_offset > buf_size - sizeof(uint64_t)) {
+		pr_err("invalid buf_offset [%d].\n", buf_offset);
+		return -ENODEV;
+	}
+
+	/* Get ION handle from fd */
+	handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
+	if (handle == NULL) {
+		pr_err("fail to get ion handle.\n");
+		return -EINVAL;
+	}
+	pr_debug("ion handle ok.\n");
+
+	/* Get the ION buffer Physical Address */
+	ret = ion_phys(spcom_dev->ion_client, handle, &ion_phys_addr, &len);
+	if (ret < 0) {
+		pr_err("fail to get ion phys addr.\n");
+		ion_free(spcom_dev->ion_client, handle);
+		return -EINVAL;
+	}
+	if (buf_offset % sizeof(uint64_t))
+		pr_debug("offset [%d] is NOT 64-bit aligned.\n", buf_offset);
+	else
+		pr_debug("offset [%d] is 64-bit aligned.\n", buf_offset);
+
+	/* Set the ION Physical Address at the buffer offset */
+	pr_debug("ion phys addr = [0x%lx].\n", (long int) ion_phys_addr);
+	memcpy(ptr, &ion_phys_addr, sizeof(uint64_t));
+
+	/* Release the ION handle */
+	ion_free(spcom_dev->ion_client, handle);
+
+	return 0;
+}
+
+/**
+ * spcom_handle_send_modified_command() - send a request/response with ION
+ * buffer address. Modify the request/response by replacing the ION buffer
+ * virtual address with the physical address.
+ *
+ * @ch: channel pointer
+ * @cmd_buf: User space command buffer
+ * @size: size of user command buffer
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_send_modified_command(struct spcom_channel *ch,
+					       void *cmd_buf, int size)
+{
+	int ret = 0;
+	struct spcom_user_send_modified_command *cmd = cmd_buf;
+	uint32_t buf_size;
+	void *buf;
+	struct spcom_msg_hdr *hdr;
+	void *tx_buf;
+	int tx_buf_size;
+	uint32_t timeout_msec;
+	struct spcom_ion_info ion_info[SPCOM_MAX_ION_BUF_PER_CMD];
+	int i;
+
+	pr_debug("send req/resp ch [%s] size [%d] .\n", ch->name, size);
+
+	/*
+	 * check that cmd buf size is at least struct size,
+	 * to allow access to struct fields.
+	 */
+	if (size < sizeof(*cmd)) {
+		pr_err("ch [%s] invalid cmd buf.\n",
+			ch->name);
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* parse command buffer */
+	buf = &cmd->buf;
+	buf_size = cmd->buf_size;
+	timeout_msec = cmd->timeout_msec;
+	memcpy(ion_info, cmd->ion_info, sizeof(ion_info));
+
+	/* Check param validity */
+	if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid buf size [%d].\n",
+			ch->name, buf_size);
+		return -EINVAL;
+	}
+	if (size != sizeof(*cmd) + buf_size) {
+		pr_err("ch [%s] invalid cmd size [%d].\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	tx_buf_size = sizeof(*hdr) + buf_size;
+	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
+	if (!tx_buf)
+		return -ENOMEM;
+
+	/* Prepare Tx Buf */
+	hdr = tx_buf;
+
+	/* Header */
+	hdr->txn_id = ch->txn_id;
+	if (!ch->is_server) {
+		ch->txn_id++;   /* client sets the request txn_id */
+		ch->response_timeout_msec = timeout_msec;
+	}
+
+	/* user buf */
+	memcpy(hdr->buf, buf, buf_size);
+
+	for (i = 0 ; i < ARRAY_SIZE(ion_info) ; i++) {
+		if (ion_info[i].fd >= 0) {
+			ret = modify_ion_addr(hdr->buf, buf_size, ion_info[i]);
+			if (ret < 0) {
+				pr_err("modify_ion_addr() error [%d].\n", ret);
+				kfree(tx_buf);
+				return -EFAULT;
+			}
+		}
+	}
+
+	/*
+	 * remote side should have rx buffer ready.
+	 * tx_done is expected to be received quickly.
+	 */
+	ret = spcom_tx(ch, tx_buf, tx_buf_size, TX_DONE_TIMEOUT_MSEC);
+	if (ret < 0)
+		pr_err("tx error %d.\n", ret);
+
+	kfree(tx_buf);
+
+	return ret;
+}
+
+
+/**
+ * spcom_handle_lock_ion_buf_command() - Lock an ION buffer.
+ *
+ * Lock an ION buffer, prevent it from being free if the user space App crash,
+ * while it is used by the remote subsystem.
+ */
+static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
+					      void *cmd_buf, int size)
+{
+	struct spcom_user_command *cmd = cmd_buf;
+	int fd = cmd->arg;
+	struct ion_handle *ion_handle;
+	int i;
+
+	if (size != sizeof(*cmd)) {
+		pr_err("cmd size [%d] , expected [%d].\n",
+		       (int) size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	/* Check ION client */
+	if (spcom_dev->ion_client == NULL) {
+		pr_err("invalid ion client.\n");
+		return -ENODEV;
+	}
+
+	/* Get ION handle from fd - this increments the ref count */
+	ion_handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
+	if (ion_handle == NULL) {
+		pr_err("fail to get ion handle.\n");
+		return -EINVAL;
+	}
+	pr_debug("ion handle ok.\n");
+
+	/* Check if this ION buffer is already locked */
+	for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+		if (ch->ion_handle_table[i] == ion_handle) {
+			pr_debug("fd [%d] ion buf is already locked.\n", fd);
+			/* decrement back the ref count */
+			ion_free(spcom_dev->ion_client, ion_handle);
+			return -EINVAL;
+		}
+	}
+
+       /* Store the ION handle */
+	for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+		if (ch->ion_handle_table[i] == NULL) {
+			ch->ion_handle_table[i] = ion_handle;
+			ch->ion_fd_table[i] = fd;
+			pr_debug("locked ion buf#[%d], fd [%d].\n", i, fd);
+			return 0;
+		}
+	}
+
+	return -EFAULT;
+}
+
+/**
+ * spcom_unlock_ion_buf() - Unlock an ION buffer.
+ *
+ * Unlock an ION buffer, let it be free, when it is no longer being used by
+ * the remote subsystem.
+ */
+static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd)
+{
+	struct ion_client *ion_client = spcom_dev->ion_client;
+	int i;
+	bool found = false;
+
+	pr_debug("Unlock ion buf ch [%s] fd [%d].\n", ch->name, fd);
+
+	/* Check ION client */
+	if (ion_client == NULL) {
+		pr_err("fail to create ion client.\n");
+		return -ENODEV;
+	}
+
+	if (fd == (int) SPCOM_ION_FD_UNLOCK_ALL) {
+		pr_debug("unlocked ALL ion buf ch [%s].\n", ch->name);
+		found = true;
+		/* unlock all ION buf */
+		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+			if (ch->ion_handle_table[i] != NULL) {
+				ion_free(ion_client, ch->ion_handle_table[i]);
+				ch->ion_handle_table[i] = NULL;
+				ch->ion_fd_table[i] = -1;
+				pr_debug("unlocked ion buf#[%d].\n", i);
+			}
+		}
+	} else {
+		/* unlock specific ION buf */
+		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+			if (ch->ion_fd_table[i] == fd) {
+				ion_free(ion_client, ch->ion_handle_table[i]);
+				ch->ion_handle_table[i] = NULL;
+				ch->ion_fd_table[i] = -1;
+				pr_debug("unlocked ion buf#[%d].\n", i);
+				found = true;
+				break;
+			}
+		}
+	}
+
+	if (!found) {
+		pr_err("ch [%s] fd [%d] was not found.\n", ch->name, fd);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * spcom_handle_unlock_ion_buf_command() - Unlock an ION buffer.
+ *
+ * Unlock an ION buffer, let it be free, when it is no longer being used by
+ * the remote subsystem.
+ */
+static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
+					      void *cmd_buf, int size)
+{
+	int ret;
+	struct spcom_user_command *cmd = cmd_buf;
+	int fd = cmd->arg;
+
+	if (size != sizeof(*cmd)) {
+		pr_err("cmd size [%d] , expected [%d].\n",
+		       (int) size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	ret = spcom_unlock_ion_buf(ch, fd);
+
+	return ret;
+}
+
+/**
+ * spcom_handle_fake_ssr_command() - Handle fake ssr command from user space.
+ */
+static int spcom_handle_fake_ssr_command(struct spcom_channel *ch, int arg)
+{
+	pr_debug("Start Fake glink SSR subsystem [%s].\n", spcom_edge);
+	glink_ssr(spcom_edge);
+	pr_debug("Fake glink SSR subsystem [%s] done.\n", spcom_edge);
+
+	return 0;
+}
+
+/**
+ * spcom_handle_write() - Handle user space write commands.
+ *
+ * @buf:	command buffer.
+ * @buf_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_write(struct spcom_channel *ch,
+			       void *buf,
+			       int buf_size)
+{
+	int ret = 0;
+	struct spcom_user_command *cmd = NULL;
+	int cmd_id = 0;
+	int swap_id;
+	char cmd_name[5] = {0}; /* debug only */
+
+	/* opcode field is the minimum length of cmd */
+	if (buf_size < sizeof(cmd->cmd_id)) {
+		pr_err("Invalid argument user buffer size %d.\n", buf_size);
+		return -EINVAL;
+	}
+
+	cmd = (struct spcom_user_command *)buf;
+	cmd_id = (int) cmd->cmd_id;
+	swap_id = htonl(cmd->cmd_id);
+	memcpy(cmd_name, &swap_id, sizeof(int));
+
+	pr_debug("cmd_id [0x%x] cmd_name [%s].\n", cmd_id, cmd_name);
+
+	switch (cmd_id) {
+	case SPCOM_CMD_SEND:
+		ret = spcom_handle_send_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_SEND_MODIFIED:
+		ret = spcom_handle_send_modified_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_LOCK_ION_BUF:
+		ret = spcom_handle_lock_ion_buf_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_UNLOCK_ION_BUF:
+		ret = spcom_handle_unlock_ion_buf_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_FSSR:
+		ret = spcom_handle_fake_ssr_command(ch, cmd->arg);
+		break;
+	case SPCOM_CMD_CREATE_CHANNEL:
+		ret = spcom_handle_create_channel_command(buf, buf_size);
+		break;
+	default:
+		pr_err("Invalid Command Id [0x%x].\n", (int) cmd->cmd_id);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * spcom_handle_get_req_size() - Handle user space get request size command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * Return: size in bytes.
+ */
+static int spcom_handle_get_req_size(struct spcom_channel *ch,
+				      void *buf,
+				      uint32_t size)
+{
+	uint32_t next_req_size = 0;
+
+	if (size < sizeof(next_req_size)) {
+		pr_err("buf size [%d] too small.\n", (int) size);
+		return -EINVAL;
+	}
+
+	next_req_size = spcom_get_next_request_size(ch);
+
+	memcpy(buf, &next_req_size, sizeof(next_req_size));
+	pr_debug("next_req_size [%d].\n", next_req_size);
+
+	return sizeof(next_req_size); /* can't exceed user buffer size */
+}
+
+/**
+ * spcom_handle_read_req_resp() - Handle user space get request/response command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * Return: size in bytes.
+ */
+static int spcom_handle_read_req_resp(struct spcom_channel *ch,
+				       void *buf,
+				       uint32_t size)
+{
+	int ret;
+	struct spcom_msg_hdr *hdr;
+	void *rx_buf;
+	int rx_buf_size;
+	uint32_t timeout_msec = 0; /* client only */
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* Check param validity */
+	if (size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] inavlid size [%d].\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	rx_buf_size = sizeof(*hdr) + size;
+	rx_buf = kzalloc(rx_buf_size, GFP_KERNEL);
+	if (!rx_buf)
+		return -ENOMEM;
+
+	/*
+	 * client response timeout depends on the request
+	 * handling time on the remote side .
+	 */
+	if (!ch->is_server) {
+		timeout_msec = ch->response_timeout_msec;
+		pr_debug("response_timeout_msec = %d.\n", (int) timeout_msec);
+	}
+
+	ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
+	if (ret < 0) {
+		pr_err("rx error %d.\n", ret);
+		goto exit_err;
+	} else {
+		size = ret; /* actual_rx_size */
+	}
+
+	hdr = rx_buf;
+
+	if (ch->is_server) {
+		ch->txn_id = hdr->txn_id;
+		pr_debug("request txn_id [0x%x].\n", ch->txn_id);
+	}
+
+	/* copy data to user without the header */
+	if (size > sizeof(*hdr)) {
+		size -= sizeof(*hdr);
+		memcpy(buf, hdr->buf, size);
+	} else {
+		pr_err("rx size [%d] too small.\n", size);
+		goto exit_err;
+	}
+
+	kfree(rx_buf);
+	return size;
+exit_err:
+	kfree(rx_buf);
+	return -EFAULT;
+
+}
+
+/**
+ * spcom_handle_read() - Handle user space read request/response or
+ * request-size command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * A special size SPCOM_GET_NEXT_REQUEST_SIZE, which is bigger than the max
+ * response/request tells the kernel that user space only need the size.
+ *
+ * Return: size in bytes.
+ */
+static int spcom_handle_read(struct spcom_channel *ch,
+			      void *buf,
+			      uint32_t size)
+{
+	if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
+		pr_debug("get next request size, ch [%s].\n", ch->name);
+		size = spcom_handle_get_req_size(ch, buf, size);
+		ch->is_server = true;
+	} else {
+		pr_debug("get request/response, ch [%s].\n", ch->name);
+		size = spcom_handle_read_req_resp(ch, buf, size);
+	}
+
+	pr_debug("ch [%s] , size = %d.\n", ch->name, size);
+
+	return size;
+}
+
+/*======================================================================*/
+/*		CHAR DEVICE USER SPACE INTERFACE			*/
+/*======================================================================*/
+
+/**
+ * file_to_filename() - get the filename from file pointer.
+ *
+ * @filp: file pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+static char *file_to_filename(struct file *filp)
+{
+	struct dentry *dentry = NULL;
+	char *filename = NULL;
+
+	if (!filp || !filp->f_path.dentry)
+		return "unknown";
+
+	dentry = filp->f_path.dentry;
+	filename = dentry->d_iname;
+
+	return filename;
+}
+
+/**
+ * spcom_device_open() - handle channel file open() from user space.
+ *
+ * @filp: file pointer
+ *
+ * The file name (without path) is the channel name.
+ * Open the relevant glink channel.
+ * Store the channel context in the file private
+ * date pointer for future read/write/close
+ * operations.
+ */
+static int spcom_device_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+
+	pr_debug("Open file [%s].\n", name);
+
+	if (strcmp(name, DEVICE_NAME) == 0) {
+		pr_debug("root dir skipped.\n");
+		return 0;
+	}
+
+	if (strcmp(name, "sp_ssr") == 0) {
+		pr_debug("sp_ssr dev node skipped.\n");
+		return 0;
+	}
+
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s doesn't exist, load App first.\n", name);
+		return -ENODEV;
+	}
+
+	filp->private_data = ch;
+
+	ret = spcom_open(ch, OPEN_CHANNEL_TIMEOUT_MSEC);
+	if (ret == -ETIMEDOUT) {
+		pr_err("Connection timeout channel [%s].\n", name);
+	} else if (ret) {
+		pr_err("failed to open channel [%s] , err=%d.\n", name, ret);
+		return ret;
+	}
+
+	pr_debug("finished.\n");
+
+	return 0;
+}
+
+/**
+ * spcom_device_release() - handle channel file close() from user space.
+ *
+ * @filp: file pointer
+ *
+ * The file name (without path) is the channel name.
+ * Open the relevant glink channel.
+ * Store the channel context in the file private
+ * date pointer for future read/write/close
+ * operations.
+ */
+static int spcom_device_release(struct inode *inode, struct file *filp)
+{
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	bool connected = false;
+
+	pr_debug("Close file [%s].\n", name);
+
+	if (strcmp(name, DEVICE_NAME) == 0) {
+		pr_debug("root dir skipped.\n");
+		return 0;
+	}
+
+	if (strcmp(name, "sp_ssr") == 0) {
+		pr_debug("sp_ssr dev node skipped.\n");
+		return 0;
+	}
+
+	ch = filp->private_data;
+
+	if (!ch) {
+		pr_debug("ch is NULL, file name %s.\n", file_to_filename(filp));
+		return -ENODEV;
+	}
+
+	/* channel might be already closed or disconnected */
+	if (spcom_is_channel_open(ch) && spcom_is_channel_connected(ch))
+		connected = true;
+
+	reinit_completion(&ch->disconnect);
+
+	spcom_close(ch);
+
+	if (connected) {
+		pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n",
+			 name);
+		wait_for_completion(&ch->disconnect);
+		pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
+	}
+
+	return 0;
+}
+
+/**
+ * spcom_device_write() - handle channel file write() from user space.
+ *
+ * @filp: file pointer
+ *
+ * Return: On Success - same size as number of bytes to write.
+ * On Failure - negative value.
+ */
+static ssize_t spcom_device_write(struct file *filp,
+				   const char __user *user_buff,
+				   size_t size, loff_t *f_pos)
+{
+	int ret;
+	char *buf;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+
+	pr_debug("Write file [%s] size [%d] pos [%d].\n",
+		 name, (int) size, (int) *f_pos);
+
+	if (!user_buff || !f_pos || !filp) {
+		pr_err("invalid null parameters.\n");
+		return -EINVAL;
+	}
+
+	ch = filp->private_data;
+	if (!ch) {
+		pr_debug("invalid ch pointer.\n");
+		/* Allow some special commands via /dev/spcom and /dev/sp_ssr */
+	} else {
+		/* Check if remote side connect */
+		if (!spcom_is_channel_connected(ch)) {
+			pr_err("ch [%s] remote side not connect.\n", ch->name);
+			return -ENOTCONN;
+		}
+	}
+
+	if (size > SPCOM_MAX_COMMAND_SIZE) {
+		pr_err("size [%d] > max size [%d].\n",
+			   (int) size, (int) SPCOM_MAX_COMMAND_SIZE);
+		return -EINVAL;
+	}
+
+	if (*f_pos != 0) {
+		pr_err("offset should be zero, no sparse buffer.\n");
+		return -EINVAL;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	ret = copy_from_user(buf, user_buff, size);
+	if (ret) {
+		pr_err("Unable to copy from user (err %d).\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	ret = spcom_handle_write(ch, buf, size);
+	if (ret) {
+		pr_err("handle command error [%d].\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	kfree(buf);
+
+	return size;
+}
+
+/**
+ * spcom_device_read() - handle channel file write() from user space.
+ *
+ * @filp: file pointer
+ *
+ * Return: number of bytes to read on success, negative value on
+ * failure.
+ */
+static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
+				 size_t size, loff_t *f_pos)
+{
+	int ret = 0;
+	int actual_size = 0;
+	char *buf;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+
+	pr_debug("Read file [%s], size = %d bytes.\n", name, (int) size);
+
+	if (!filp || !user_buff || !f_pos ||
+	    (size == 0) || (size > SPCOM_MAX_READ_SIZE)) {
+		pr_err("invalid parameters.\n");
+		return -EINVAL;
+	}
+
+	ch = filp->private_data;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	actual_size = spcom_handle_read(ch, buf, size);
+	if ((actual_size <= 0) || (actual_size > size)) {
+		pr_err("invalid actual_size [%d].\n", actual_size);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	ret = copy_to_user(user_buff, buf, actual_size);
+
+	if (ret) {
+		pr_err("Unable to copy to user, err = %d.\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	kfree(buf);
+
+	pr_debug("ch [%s] ret [%d].\n", name, (int) actual_size);
+
+	return actual_size;
+}
+
+/**
+ * spcom_device_poll() - handle channel file poll() from user space.
+ *
+ * @filp: file pointer
+ *
+ * This allows user space to wait/check for channel connection,
+ * or wait for SSR event.
+ *
+ * Return: event bitmask on success, set POLLERR on failure.
+ */
+static unsigned int spcom_device_poll(struct file *filp,
+				       struct poll_table_struct *poll_table)
+{
+	/*
+	 * when user call with timeout -1 for blocking mode,
+	 * any bit must be set in response
+	 */
+	unsigned int ret = SPCOM_POLL_READY_FLAG;
+	unsigned long mask;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	bool wait = false;
+	bool done = false;
+	/* Event types always implicitly polled for */
+	unsigned long reserved = POLLERR | POLLHUP | POLLNVAL;
+	int ready = 0;
+
+	ch = filp->private_data;
+
+	mask = poll_requested_events(poll_table);
+
+	pr_debug("== ch [%s] mask [0x%x] ==.\n", name, (int) mask);
+
+	/* user space API has poll use "short" and not "long" */
+	mask &= 0x0000FFFF;
+
+	wait = mask & SPCOM_POLL_WAIT_FLAG;
+	if (wait)
+		pr_debug("ch [%s] wait for event flag is ON.\n", name);
+	mask &= ~SPCOM_POLL_WAIT_FLAG; /* clear the wait flag */
+	mask &= ~SPCOM_POLL_READY_FLAG; /* clear the ready flag */
+	mask &= ~reserved; /* clear the implicitly set reserved bits */
+
+	switch (mask) {
+	case SPCOM_POLL_LINK_STATE:
+		pr_debug("ch [%s] SPCOM_POLL_LINK_STATE.\n", name);
+		if (wait) {
+			reinit_completion(&spcom_dev->link_state_changed);
+			ready = wait_for_completion_interruptible(
+				&spcom_dev->link_state_changed);
+			pr_debug("ch [%s] poll LINK_STATE signaled.\n", name);
+		}
+		done = (spcom_dev->link_state == GLINK_LINK_STATE_UP);
+		break;
+	case SPCOM_POLL_CH_CONNECT:
+		pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT.\n", name);
+		if (wait) {
+			reinit_completion(&ch->connect);
+			ready = wait_for_completion_interruptible(&ch->connect);
+			pr_debug("ch [%s] poll CH_CONNECT signaled.\n", name);
+		}
+		done = completion_done(&ch->connect);
+		break;
+	default:
+		pr_err("ch [%s] poll, invalid mask [0x%x].\n",
+			 name, (int) mask);
+		ret = POLLERR;
+		break;
+	}
+
+	if (ready < 0) { /* wait was interrupted */
+		pr_debug("ch [%s] poll interrupted, ret [%d].\n", name, ready);
+		ret = POLLERR | SPCOM_POLL_READY_FLAG | mask;
+	}
+	if (done)
+		ret |= mask;
+
+	pr_debug("ch [%s] poll, mask = 0x%x, ret=0x%x.\n",
+		 name, (int) mask, ret);
+
+	return ret;
+}
+
+/* file operation supported from user space */
+static const struct file_operations fops = {
+	.owner = THIS_MODULE,
+	.read = spcom_device_read,
+	.poll = spcom_device_poll,
+	.write = spcom_device_write,
+	.open = spcom_device_open,
+	.release = spcom_device_release,
+};
+
+/**
+ * spcom_create_channel_chardev() - Create a channel char-dev node file
+ * for user space interface
+ */
+static int spcom_create_channel_chardev(const char *name)
+{
+	int ret;
+	struct device *dev;
+	struct spcom_channel *ch;
+	dev_t devt;
+	struct class *cls = spcom_dev->driver_class;
+	struct device *parent = spcom_dev->class_dev;
+	void *priv;
+	struct cdev *cdev;
+
+	pr_debug("Add channel [%s].\n", name);
+
+	ch = spcom_find_channel_by_name(name);
+	if (ch) {
+		pr_err("channel [%s] already exist.\n", name);
+		return -EINVAL;
+	}
+
+	ch = spcom_find_channel_by_name(""); /* find reserved channel */
+	if (!ch) {
+		pr_err("no free channel.\n");
+		return -ENODEV;
+	}
+
+	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	if (!cdev)
+		return -ENOMEM;
+
+	spcom_dev->channel_count++;
+	devt = spcom_dev->device_no + spcom_dev->channel_count;
+	priv = ch;
+	dev = device_create(cls, parent, devt, priv, name);
+	if (!dev) {
+		pr_err("device_create failed.\n");
+		kfree(cdev);
+		return -ENODEV;
+	}
+
+	cdev_init(cdev, &fops);
+	cdev->owner = THIS_MODULE;
+
+	ret = cdev_add(cdev, devt, 1);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto exit_destroy_device;
+	}
+
+	spcom_init_channel(ch, name);
+
+	ch->cdev = cdev;
+	ch->dev = dev;
+
+	return 0;
+
+exit_destroy_device:
+	device_destroy(spcom_dev->driver_class, devt);
+	kfree(cdev);
+	return -EFAULT;
+}
+
+static int __init spcom_register_chardev(void)
+{
+	int ret;
+	unsigned int baseminor = 0;
+	unsigned int count = 1;
+	void *priv = spcom_dev;
+
+	ret = alloc_chrdev_region(&spcom_dev->device_no, baseminor, count,
+				 DEVICE_NAME);
+	if (ret < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		return ret;
+	}
+
+	spcom_dev->driver_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(spcom_dev->driver_class)) {
+		ret = -ENOMEM;
+		pr_err("class_create failed %d\n", ret);
+		goto exit_unreg_chrdev_region;
+	}
+
+	spcom_dev->class_dev = device_create(spcom_dev->driver_class, NULL,
+				  spcom_dev->device_no, priv,
+				  DEVICE_NAME);
+
+	if (!spcom_dev->class_dev) {
+		pr_err("class_device_create failed %d\n", ret);
+		ret = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&spcom_dev->cdev, &fops);
+	spcom_dev->cdev.owner = THIS_MODULE;
+
+	ret = cdev_add(&spcom_dev->cdev,
+		       MKDEV(MAJOR(spcom_dev->device_no), 0),
+		       SPCOM_MAX_CHANNELS);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto exit_destroy_device;
+	}
+
+	pr_debug("char device created.\n");
+
+	return 0;
+
+exit_destroy_device:
+	device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
+exit_destroy_class:
+	class_destroy(spcom_dev->driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(spcom_dev->device_no, 1);
+	return ret;
+}
+
+static void spcom_unregister_chrdev(void)
+{
+	cdev_del(&spcom_dev->cdev);
+	device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
+	class_destroy(spcom_dev->driver_class);
+	unregister_chrdev_region(spcom_dev->device_no, 1);
+
+}
+
+/*======================================================================*/
+/*		Device Tree						*/
+/*======================================================================*/
+
+static int spcom_parse_dt(struct device_node *np)
+{
+	int ret;
+	const char *propname = "qcom,spcom-ch-names";
+	int num_ch = of_property_count_strings(np, propname);
+	int i;
+	const char *name;
+
+	pr_debug("num of predefined channels [%d].\n", num_ch);
+
+	for (i = 0; i < num_ch; i++) {
+		ret = of_property_read_string_index(np, propname, i, &name);
+		if (ret) {
+			pr_err("failed to read DT channel [%d] name .\n", i);
+			return -EFAULT;
+		}
+		strlcpy(spcom_dev->predefined_ch_name[i],
+			name,
+			sizeof(spcom_dev->predefined_ch_name[i]));
+
+		pr_debug("found ch [%s].\n", name);
+	}
+
+	return num_ch;
+}
+
+static int spcom_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct spcom_device *dev = NULL;
+	struct glink_link_info link_info;
+	struct device_node *np;
+	struct link_state_notifier_info *notif_handle;
+
+	if (!pdev) {
+		pr_err("invalid pdev.\n");
+		return -ENODEV;
+	}
+
+	np = pdev->dev.of_node;
+	if (!np) {
+		pr_err("invalid DT node.\n");
+		return -EINVAL;
+	}
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (dev == NULL)
+		return -ENOMEM;
+
+	spcom_dev = dev;
+	mutex_init(&dev->lock);
+	init_completion(&dev->link_state_changed);
+	spcom_dev->link_state = GLINK_LINK_STATE_DOWN;
+
+	ret = spcom_register_chardev();
+	if (ret) {
+		pr_err("create character device failed.\n");
+		goto fail_reg_chardev;
+	}
+
+	link_info.glink_link_state_notif_cb = spcom_link_state_notif_cb;
+	link_info.transport = spcom_transport;
+	link_info.edge = spcom_edge;
+
+	ret = spcom_parse_dt(np);
+	if (ret < 0)
+		goto fail_reg_chardev;
+
+	/*
+	 * Register for glink link up/down notification.
+	 * glink channels can't be opened before link is up.
+	 */
+	pr_debug("register_link_state_cb(), transport [%s] edge [%s]\n",
+		link_info.transport, link_info.edge);
+	notif_handle = glink_register_link_state_cb(&link_info, spcom_dev);
+	if (!notif_handle) {
+		pr_err("glink_register_link_state_cb(), err [%d]\n", ret);
+		goto fail_reg_chardev;
+	}
+
+	spcom_dev->ion_client = msm_ion_client_create(DEVICE_NAME);
+	if (spcom_dev->ion_client == NULL) {
+		pr_err("fail to create ion client.\n");
+		goto fail_reg_chardev;
+	}
+
+	pr_info("Driver Initialization ok.\n");
+
+	return 0;
+
+fail_reg_chardev:
+	pr_err("Failed to init driver.\n");
+	spcom_unregister_chrdev();
+	kfree(dev);
+	spcom_dev = NULL;
+
+	return -ENODEV;
+}
+
+static const struct of_device_id spcom_match_table[] = {
+	{ .compatible = "qcom,spcom", },
+	{ },
+};
+
+static struct platform_driver spcom_driver = {
+	.probe = spcom_probe,
+	.driver = {
+		.name = DEVICE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(spcom_match_table),
+	},
+};
+
+/*======================================================================*/
+/*		Driver Init/Exit					*/
+/*======================================================================*/
+
+static int __init spcom_init(void)
+{
+	int ret;
+
+	pr_info("spcom driver Ver 1.0 23-Nov-2015.\n");
+
+	ret = platform_driver_register(&spcom_driver);
+	if (ret)
+		pr_err("spcom_driver register failed %d\n", ret);
+
+	return 0;
+}
+module_init(spcom_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Secure Processor Communication");
diff --git a/drivers/soc/qcom/wlan_firmware_service_v01.c b/drivers/soc/qcom/wlan_firmware_service_v01.c
index 23d18e3..96e7d68 100644
--- a/drivers/soc/qcom/wlan_firmware_service_v01.c
+++ b/drivers/soc/qcom/wlan_firmware_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -130,6 +130,23 @@
 	},
 };
 
+static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
 	{
 		.data_type      = QMI_UNSIGNED_8_BYTE,
@@ -361,6 +378,78 @@
 					   client_id),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
 		.is_array       = QMI_COMMON_TLV_TYPE,
@@ -647,6 +736,34 @@
 		.ei_array      = wlfw_shadow_reg_cfg_s_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+		.elem_size      = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2),
+		.ei_array      = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
 		.is_array       = QMI_COMMON_TLV_TYPE,
@@ -978,9 +1095,8 @@
 		.is_array       = NO_ARRAY,
 		.tlv_type       = 0x01,
 		.offset         = offsetof(
-				   struct
-				   wlfw_initiate_cal_download_ind_msg_v01,
-				   cal_id),
+			   struct wlfw_initiate_cal_download_ind_msg_v01,
+			   cal_id),
 	},
 	{
 		.data_type      = QMI_EOTI,
@@ -1657,3 +1773,318 @@
 		.is_array       = QMI_COMMON_TLV_TYPE,
 	},
 };
+
+struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_host_cap_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_request_mem_ind_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_rejuvenate_ack_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				   wlfw_dynamic_feature_mask_req_msg_v01,
+				   mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_dynamic_feature_mask_req_msg_v01,
+				   mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   prev_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   prev_mask),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   curr_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   curr_mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/soc/qcom/wlan_firmware_service_v01.h b/drivers/soc/qcom/wlan_firmware_service_v01.h
index 3c8a267..96c03d6 100644
--- a/drivers/soc/qcom/wlan_firmware_service_v01.h
+++ b/drivers/soc/qcom/wlan_firmware_service_v01.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,10 @@
 #define WLFW_SERVICE_VERS_V01 0x01
 
 #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
 #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
 #define QMI_WLFW_CAP_REQ_V01 0x0024
 #define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
 #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
@@ -26,25 +29,34 @@
 #define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
 #define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
 #define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
 #define QMI_WLFW_MSA_READY_IND_V01 0x002B
 #define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
 #define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
 #define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
 #define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
 #define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
 #define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
 #define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
 #define QMI_WLFW_FW_READY_IND_V01 0x0021
 #define QMI_WLFW_MSA_READY_RESP_V01 0x002E
 #define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
 #define QMI_WLFW_INI_REQ_V01 0x002F
 #define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
 #define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
 #define QMI_WLFW_MSA_READY_REQ_V01 0x002E
 #define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
 #define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
 #define QMI_WLFW_VBATT_REQ_V01 0x0032
 #define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
 #define QMI_WLFW_VBATT_RESP_V01 0x0032
 #define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
 #define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
@@ -55,12 +67,14 @@
 #define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
 #define QMI_WLFW_MAX_NUM_CAL_V01 5
 #define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
 #define QMI_WLFW_MAX_NUM_CE_V01 12
 #define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
 #define QMI_WLFW_MAX_STR_LEN_V01 16
 #define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
 #define QMI_WLFW_MAC_ADDR_SIZE_V01 6
-#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
 #define QMI_WLFW_MAX_NUM_SVC_V01 24
 
 enum wlfw_driver_mode_enum_v01 {
@@ -72,6 +86,7 @@
 	QMI_WLFW_OFF_V01 = 4,
 	QMI_WLFW_CCPM_V01 = 5,
 	QMI_WLFW_QVIT_V01 = 6,
+	QMI_WLFW_CALIBRATION_V01 = 7,
 	WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
@@ -104,6 +119,9 @@
 #define QMI_WLFW_ALREADY_REGISTERED_V01 ((uint64_t)0x01ULL)
 #define QMI_WLFW_FW_READY_V01 ((uint64_t)0x02ULL)
 #define QMI_WLFW_MSA_READY_V01 ((uint64_t)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((uint64_t)0x08ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((uint64_t)0x01ULL)
 
 struct wlfw_ce_tgt_pipe_cfg_s_v01 {
 	uint32_t pipe_num;
@@ -124,6 +142,10 @@
 	uint16_t offset;
 };
 
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+	uint32_t addr;
+};
+
 struct wlfw_memory_region_info_s_v01 {
 	uint64_t region_addr;
 	uint32_t size;
@@ -161,8 +183,16 @@
 	uint8_t pin_connect_result_enable;
 	uint8_t client_id_valid;
 	uint32_t client_id;
+	uint8_t request_mem_enable_valid;
+	uint8_t request_mem_enable;
+	uint8_t fw_mem_ready_enable_valid;
+	uint8_t fw_mem_ready_enable;
+	uint8_t cold_boot_cal_done_enable_valid;
+	uint8_t cold_boot_cal_done_enable;
+	uint8_t rejuvenate_enable_valid;
+	uint32_t rejuvenate_enable;
 };
-#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 27
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
 extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
 
 struct wlfw_ind_register_resp_msg_v01 {
@@ -223,8 +253,12 @@
 	uint32_t shadow_reg_len;
 	struct wlfw_shadow_reg_cfg_s_v01
 	shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+	uint8_t shadow_reg_v2_valid;
+	uint32_t shadow_reg_v2_len;
+	struct wlfw_shadow_reg_v2_cfg_s_v01
+	shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
 };
-#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 655
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
 extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
 
 struct wlfw_wlan_cfg_resp_msg_v01 {
@@ -449,4 +483,90 @@
 #define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
 extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
 
+struct wlfw_host_cap_req_msg_v01 {
+	uint8_t daemon_support_valid;
+	uint8_t daemon_support;
+};
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+	uint32_t size;
+};
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+	uint64_t addr;
+	uint32_t size;
+};
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+	char placeholder;
+};
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+	char placeholder;
+};
+#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+	uint8_t cause_for_rejuvenation_valid;
+	uint8_t cause_for_rejuvenation;
+	uint8_t requesting_sub_system_valid;
+	uint8_t requesting_sub_system;
+	uint8_t line_number_valid;
+	uint16_t line_number;
+	uint8_t function_name_valid;
+	char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+	char placeholder;
+};
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+	uint8_t mask_valid;
+	uint64_t mask;
+};
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t prev_mask_valid;
+	uint64_t prev_mask;
+	uint8_t curr_mask_valid;
+	uint64_t curr_mask;
+};
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
 #endif
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index ef8a351..a4c2f0c 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -9,7 +9,7 @@
 
 if SOUNDWIRE
 config SOUNDWIRE_WCD_CTRL
-	depends on WCD9335_CODEC
+	depends on WCD9335_CODEC || WCD934X_CODEC
 	tristate "QTI WCD CODEC Soundwire controller"
 	default n
 	help
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 68b3b24..2fda339 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -443,6 +443,17 @@
 	  to this driver. This driver reports the temperature by reading ADC
 	  channel and converts it to temperature based on lookup table.
 
+config THERMAL_QPNP_ADC_TM
+	tristate "Qualcomm Technologies Inc. Thermal Monitor ADC Driver"
+	depends on THERMAL
+	depends on  SPMI
+	help
+	  This enables the thermal Sysfs driver for the ADC thermal monitoring
+	  device. It shows up in Sysfs as a thermal zone with multiple trip points.
+	  Disabling the thermal zone device via the mode file results in disabling
+	  the sensor. Also able to set threshold temperature for both hot and cold
+	  and update when a threshold is reached.
+
 menu "Qualcomm thermal drivers"
 depends on (ARCH_QCOM && OF) || COMPILE_TEST
 source "drivers/thermal/qcom/Kconfig"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 7d3b5312..d9489a7 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -56,3 +56,4 @@
 obj-$(CONFIG_HISI_THERMAL)     += hisi_thermal.o
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
 obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
+obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
new file mode 100644
index 0000000..8d706cd
--- /dev/null
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -0,0 +1,3365 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/* QPNP VADC TM register definition */
+#define QPNP_REVISION3					0x2
+#define QPNP_PERPH_SUBTYPE				0x5
+#define QPNP_PERPH_TYPE2				0x2
+#define QPNP_REVISION_EIGHT_CHANNEL_SUPPORT		2
+#define QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT		0x22
+#define QPNP_STATUS1					0x8
+#define QPNP_STATUS1_OP_MODE				4
+#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS		BIT(2)
+#define QPNP_STATUS1_REQ_STS				BIT(1)
+#define QPNP_STATUS1_EOC				BIT(0)
+#define QPNP_STATUS2					0x9
+#define QPNP_STATUS2_CONV_SEQ_STATE			6
+#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG		BIT(1)
+#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS		BIT(0)
+#define QPNP_CONV_TIMEOUT_ERR				2
+
+#define QPNP_MODE_CTL					0x40
+#define QPNP_OP_MODE_SHIFT				3
+#define QPNP_VREF_XO_THM_FORCE				BIT(2)
+#define QPNP_AMUX_TRIM_EN				BIT(1)
+#define QPNP_ADC_TRIM_EN				BIT(0)
+#define QPNP_EN_CTL1					0x46
+#define QPNP_ADC_TM_EN					BIT(7)
+#define QPNP_BTM_CONV_REQ				0x47
+#define QPNP_ADC_CONV_REQ_EN				BIT(7)
+
+#define QPNP_ADC_CH_SEL_CTL				0x48
+#define QPNP_ADC_DIG_PARAM				0x50
+#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT		3
+#define QPNP_HW_SETTLE_DELAY				0x51
+#define QPNP_CONV_REQ					0x52
+#define QPNP_CONV_REQ_SET				BIT(7)
+#define QPNP_CONV_SEQ_CTL				0x54
+#define QPNP_CONV_SEQ_HOLDOFF_SHIFT			4
+#define QPNP_CONV_SEQ_TRIG_CTL				0x55
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL			0x57
+#define QPNP_ADC_TM_MEAS_INTERVAL_TIME_SHIFT		0x3
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2			0x58
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT		0x4
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK		0xf0
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK		0xf
+
+#define QPNP_ADC_MEAS_INTERVAL_OP_CTL			0x59
+#define QPNP_ADC_MEAS_INTERVAL_OP			BIT(7)
+
+#define QPNP_FAST_AVG_CTL				0x5a
+#define QPNP_FAST_AVG_EN				0x5b
+#define QPNP_FAST_AVG_ENABLED				BIT(7)
+
+#define QPNP_M0_LOW_THR_LSB				0x5c
+#define QPNP_M0_LOW_THR_MSB				0x5d
+#define QPNP_M0_HIGH_THR_LSB				0x5e
+#define QPNP_M0_HIGH_THR_MSB				0x5f
+#define QPNP_M1_ADC_CH_SEL_CTL				0x68
+#define QPNP_M1_LOW_THR_LSB				0x69
+#define QPNP_M1_LOW_THR_MSB				0x6a
+#define QPNP_M1_HIGH_THR_LSB				0x6b
+#define QPNP_M1_HIGH_THR_MSB				0x6c
+#define QPNP_M2_ADC_CH_SEL_CTL				0x70
+#define QPNP_M2_LOW_THR_LSB				0x71
+#define QPNP_M2_LOW_THR_MSB				0x72
+#define QPNP_M2_HIGH_THR_LSB				0x73
+#define QPNP_M2_HIGH_THR_MSB				0x74
+#define QPNP_M3_ADC_CH_SEL_CTL				0x78
+#define QPNP_M3_LOW_THR_LSB				0x79
+#define QPNP_M3_LOW_THR_MSB				0x7a
+#define QPNP_M3_HIGH_THR_LSB				0x7b
+#define QPNP_M3_HIGH_THR_MSB				0x7c
+#define QPNP_M4_ADC_CH_SEL_CTL				0x80
+#define QPNP_M4_LOW_THR_LSB				0x81
+#define QPNP_M4_LOW_THR_MSB				0x82
+#define QPNP_M4_HIGH_THR_LSB				0x83
+#define QPNP_M4_HIGH_THR_MSB				0x84
+#define QPNP_M5_ADC_CH_SEL_CTL				0x88
+#define QPNP_M5_LOW_THR_LSB				0x89
+#define QPNP_M5_LOW_THR_MSB				0x8a
+#define QPNP_M5_HIGH_THR_LSB				0x8b
+#define QPNP_M5_HIGH_THR_MSB				0x8c
+#define QPNP_M6_ADC_CH_SEL_CTL				0x90
+#define QPNP_M6_LOW_THR_LSB				0x91
+#define QPNP_M6_LOW_THR_MSB				0x92
+#define QPNP_M6_HIGH_THR_LSB				0x93
+#define QPNP_M6_HIGH_THR_MSB				0x94
+#define QPNP_M7_ADC_CH_SEL_CTL				0x98
+#define QPNP_M7_LOW_THR_LSB				0x99
+#define QPNP_M7_LOW_THR_MSB				0x9a
+#define QPNP_M7_HIGH_THR_LSB				0x9b
+#define QPNP_M7_HIGH_THR_MSB				0x9c
+
+#define QPNP_ADC_TM_MULTI_MEAS_EN			0x41
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M0			BIT(0)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M1			BIT(1)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M2			BIT(2)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M3			BIT(3)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M4			BIT(4)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M5			BIT(5)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M6			BIT(6)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M7			BIT(7)
+#define QPNP_ADC_TM_LOW_THR_INT_EN			0x42
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M0			BIT(0)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M1			BIT(1)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M2			BIT(2)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M3			BIT(3)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M4			BIT(4)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M5			BIT(5)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M6			BIT(6)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M7			BIT(7)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN			0x43
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M0			BIT(0)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M1			BIT(1)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M2			BIT(2)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M3			BIT(3)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M4			BIT(4)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M5			BIT(5)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M6			BIT(6)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M7			BIT(7)
+
+#define QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL			0x59
+#define QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL			0x6d
+#define QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL			0x75
+#define QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL			0x7d
+#define QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL			0x85
+#define QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL			0x8d
+#define QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL			0x95
+#define QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL			0x9d
+#define QPNP_ADC_TM_STATUS1				0x8
+#define QPNP_ADC_TM_STATUS_LOW				0xa
+#define QPNP_ADC_TM_STATUS_HIGH				0xb
+
+#define QPNP_ADC_TM_M0_LOW_THR				0x5d5c
+#define QPNP_ADC_TM_M0_HIGH_THR				0x5f5e
+#define QPNP_ADC_TM_MEAS_INTERVAL			0x0
+
+#define QPNP_ADC_TM_THR_LSB_MASK(val)			(val & 0xff)
+#define QPNP_ADC_TM_THR_MSB_MASK(val)			((val & 0xff00) >> 8)
+
+#define QPNP_MIN_TIME			2000
+#define QPNP_MAX_TIME			2100
+#define QPNP_RETRY			1000
+
+/* QPNP ADC TM HC start */
+#define QPNP_BTM_HC_STATUS1		0x08
+#define QPNP_BTM_HC_STATUS_LOW		0x0a
+#define QPNP_BTM_HC_STATUS_HIGH		0x0b
+
+#define QPNP_BTM_HC_ADC_DIG_PARAM	0x42
+#define QPNP_BTM_HC_FAST_AVG_CTL	0x43
+#define QPNP_BTM_EN_CTL1		0x46
+#define QPNP_BTM_CONV_REQ		0x47
+
+#define QPNP_BTM_MEAS_INTERVAL_CTL	0x50
+#define QPNP_BTM_MEAS_INTERVAL_CTL2	0x51
+
+#define QPNP_BTM_Mn_ADC_CH_SEL_CTL(n)		((n * 8) + 0x60)
+#define QPNP_BTM_Mn_LOW_THR0(n)			((n * 8) + 0x61)
+#define QPNP_BTM_Mn_LOW_THR1(n)			((n * 8) + 0x62)
+#define QPNP_BTM_Mn_HIGH_THR0(n)		((n * 8) + 0x63)
+#define QPNP_BTM_Mn_HIGH_THR1(n)		((n * 8) + 0x64)
+#define QPNP_BTM_Mn_MEAS_INTERVAL_CTL(n)	((n * 8) + 0x65)
+#define QPNP_BTM_Mn_CTL(n)			((n * 8) + 0x66)
+#define QPNP_BTM_CTL_HW_SETTLE_DELAY_MASK	0xf
+#define QPNP_BTM_CTL_CAL_SEL			0x30
+#define QPNP_BTM_CTL_CAL_SEL_MASK_SHIFT		4
+#define QPNP_BTM_CTL_CAL_VAL			0x40
+
+#define QPNP_BTM_Mn_EN(n)			((n * 8) + 0x67)
+#define QPNP_BTM_Mn_MEAS_EN			BIT(7)
+#define QPNP_BTM_Mn_HIGH_THR_INT_EN		BIT(1)
+#define QPNP_BTM_Mn_LOW_THR_INT_EN		BIT(0)
+
+#define QPNP_BTM_Mn_DATA0(n)			((n * 2) + 0xa0)
+#define QPNP_BTM_Mn_DATA1(n)			((n * 2) + 0xa1)
+
+/* QPNP ADC TM HC end */
+
+struct qpnp_adc_thr_info {
+	u8		status_low;
+	u8		status_high;
+	u8		qpnp_adc_tm_meas_en;
+	u8		adc_tm_low_enable;
+	u8		adc_tm_high_enable;
+	u8		adc_tm_low_thr_set;
+	u8		adc_tm_high_thr_set;
+};
+
+struct qpnp_adc_thr_client_info {
+	struct list_head		list;
+	struct qpnp_adc_tm_btm_param	*btm_param;
+	int32_t				low_thr_requested;
+	int32_t				high_thr_requested;
+	enum qpnp_state_request		state_requested;
+	enum qpnp_state_request		state_req_copy;
+	bool				low_thr_set;
+	bool				high_thr_set;
+	bool				notify_low_thr;
+	bool				notify_high_thr;
+};
+
+struct qpnp_adc_tm_sensor {
+	struct thermal_zone_device	*tz_dev;
+	struct qpnp_adc_tm_chip		*chip;
+	enum thermal_device_mode	mode;
+	uint32_t			sensor_num;
+	enum qpnp_adc_meas_timer_select	timer_select;
+	uint32_t			meas_interval;
+	uint32_t			low_thr;
+	uint32_t			high_thr;
+	uint32_t			btm_channel_num;
+	uint32_t			vadc_channel_num;
+	struct workqueue_struct		*req_wq;
+	struct work_struct		work;
+	bool				thermal_node;
+	uint32_t			scale_type;
+	struct list_head		thr_list;
+	bool				high_thr_triggered;
+	bool				low_thr_triggered;
+};
+
+struct qpnp_adc_tm_chip {
+	struct device			*dev;
+	struct qpnp_adc_drv		*adc;
+	struct list_head		list;
+	bool				adc_tm_initialized;
+	bool				adc_tm_recalib_check;
+	int				max_channels_available;
+	atomic_t			wq_cnt;
+	struct qpnp_vadc_chip		*vadc_dev;
+	struct workqueue_struct		*high_thr_wq;
+	struct workqueue_struct		*low_thr_wq;
+	struct workqueue_struct		*thr_wq;
+	struct work_struct		trigger_high_thr_work;
+	struct work_struct		trigger_low_thr_work;
+	struct work_struct		trigger_thr_work;
+	bool				adc_vote_enable;
+	struct qpnp_adc_thr_info	th_info;
+	bool				adc_tm_hc;
+	struct qpnp_adc_tm_sensor	sensor[0];
+};
+
+LIST_HEAD(qpnp_adc_tm_device_list);
+
+struct qpnp_adc_tm_trip_reg_type {
+	enum qpnp_adc_tm_channel_select	btm_amux_chan;
+	uint16_t			low_thr_lsb_addr;
+	uint16_t			low_thr_msb_addr;
+	uint16_t			high_thr_lsb_addr;
+	uint16_t			high_thr_msb_addr;
+	u8				multi_meas_en;
+	u8				low_thr_int_chan_en;
+	u8				high_thr_int_chan_en;
+	u8				meas_interval_ctl;
+};
+
+static struct qpnp_adc_tm_trip_reg_type adc_tm_data[] = {
+	[QPNP_ADC_TM_CHAN0] = {QPNP_ADC_TM_M0_ADC_CH_SEL_CTL,
+		QPNP_M0_LOW_THR_LSB,
+		QPNP_M0_LOW_THR_MSB, QPNP_M0_HIGH_THR_LSB,
+		QPNP_M0_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M0,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M0, QPNP_ADC_TM_HIGH_THR_INT_EN_M0,
+		QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN1] = {QPNP_ADC_TM_M1_ADC_CH_SEL_CTL,
+		QPNP_M1_LOW_THR_LSB,
+		QPNP_M1_LOW_THR_MSB, QPNP_M1_HIGH_THR_LSB,
+		QPNP_M1_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M1,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M1, QPNP_ADC_TM_HIGH_THR_INT_EN_M1,
+		QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN2] = {QPNP_ADC_TM_M2_ADC_CH_SEL_CTL,
+		QPNP_M2_LOW_THR_LSB,
+		QPNP_M2_LOW_THR_MSB, QPNP_M2_HIGH_THR_LSB,
+		QPNP_M2_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M2,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M2, QPNP_ADC_TM_HIGH_THR_INT_EN_M2,
+		QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN3] = {QPNP_ADC_TM_M3_ADC_CH_SEL_CTL,
+		QPNP_M3_LOW_THR_LSB,
+		QPNP_M3_LOW_THR_MSB, QPNP_M3_HIGH_THR_LSB,
+		QPNP_M3_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M3,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M3, QPNP_ADC_TM_HIGH_THR_INT_EN_M3,
+		QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN4] = {QPNP_ADC_TM_M4_ADC_CH_SEL_CTL,
+		QPNP_M4_LOW_THR_LSB,
+		QPNP_M4_LOW_THR_MSB, QPNP_M4_HIGH_THR_LSB,
+		QPNP_M4_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M4,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M4, QPNP_ADC_TM_HIGH_THR_INT_EN_M4,
+		QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN5] = {QPNP_ADC_TM_M5_ADC_CH_SEL_CTL,
+		QPNP_M5_LOW_THR_LSB,
+		QPNP_M5_LOW_THR_MSB, QPNP_M5_HIGH_THR_LSB,
+		QPNP_M5_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M5,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M5, QPNP_ADC_TM_HIGH_THR_INT_EN_M5,
+		QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN6] = {QPNP_ADC_TM_M6_ADC_CH_SEL_CTL,
+		QPNP_M6_LOW_THR_LSB,
+		QPNP_M6_LOW_THR_MSB, QPNP_M6_HIGH_THR_LSB,
+		QPNP_M6_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M6,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M6, QPNP_ADC_TM_HIGH_THR_INT_EN_M6,
+		QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN7] = {QPNP_ADC_TM_M7_ADC_CH_SEL_CTL,
+		QPNP_M7_LOW_THR_LSB,
+		QPNP_M7_LOW_THR_MSB, QPNP_M7_HIGH_THR_LSB,
+		QPNP_M7_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M7,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M7, QPNP_ADC_TM_HIGH_THR_INT_EN_M7,
+		QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL},
+};
+
+static struct qpnp_adc_tm_reverse_scale_fn adc_tm_rscale_fn[] = {
+	[SCALE_R_VBATT] = {qpnp_adc_vbatt_rscaler},
+	[SCALE_RBATT_THERM] = {qpnp_adc_btm_scaler},
+	[SCALE_R_USB_ID] = {qpnp_adc_usb_scaler},
+	[SCALE_RPMIC_THERM] = {qpnp_adc_scale_millidegc_pmic_voltage_thr},
+	[SCALE_R_SMB_BATT_THERM] = {qpnp_adc_smb_btm_rscaler},
+	[SCALE_R_ABSOLUTE] = {qpnp_adc_absolute_rthr},
+	[SCALE_QRD_SKUH_RBATT_THERM] = {qpnp_adc_qrd_skuh_btm_scaler},
+	[SCALE_QRD_SKUT1_RBATT_THERM] = {qpnp_adc_qrd_skut1_btm_scaler},
+};
+
+static int32_t qpnp_adc_tm_read_reg(struct qpnp_adc_tm_chip *chip,
+					int16_t reg, u8 *data, int len)
+{
+	int rc = 0;
+
+	rc = regmap_bulk_read(chip->adc->regmap, (chip->adc->offset + reg),
+								data, len);
+	if (rc < 0)
+		pr_err("adc-tm read reg %d failed with %d\n", reg, rc);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_write_reg(struct qpnp_adc_tm_chip *chip,
+					int16_t reg, u8 data, int len)
+{
+	int rc = 0;
+	u8 *buf;
+
+	buf = &data;
+
+	rc = regmap_bulk_write(chip->adc->regmap, (chip->adc->offset + reg),
+								buf, len);
+	if (rc < 0)
+		pr_err("adc-tm write reg %d failed with %d\n", reg, rc);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_fast_avg_en(struct qpnp_adc_tm_chip *chip,
+				uint32_t *fast_avg_sample)
+{
+	int rc = 0, version = 0;
+	u8 fast_avg_en = 0;
+
+	version = qpnp_adc_get_revid_version(chip->dev);
+	if (!((version == QPNP_REV_ID_8916_1_0) ||
+		(version == QPNP_REV_ID_8916_1_1) ||
+		(version == QPNP_REV_ID_8916_2_0))) {
+		pr_debug("fast-avg-en not required for this version\n");
+		return rc;
+	}
+
+	fast_avg_en = QPNP_FAST_AVG_ENABLED;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_EN, fast_avg_en, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable err\n");
+		return rc;
+	}
+
+	if (*fast_avg_sample >= 3)
+		*fast_avg_sample = 2;
+
+	return rc;
+}
+
+static int qpnp_adc_tm_check_vreg_vote(struct qpnp_adc_tm_chip *chip)
+{
+	int rc = 0;
+
+	if (!chip->adc_vote_enable) {
+		if (chip->adc->hkadc_ldo && chip->adc->hkadc_ldo_ok) {
+			rc = qpnp_adc_enable_voltage(chip->adc);
+			if (rc) {
+				pr_err("failed enabling VADC LDO\n");
+				return rc;
+			}
+			chip->adc_vote_enable = true;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_enable(struct qpnp_adc_tm_chip *chip)
+{
+	int rc = 0;
+	u8 data = 0;
+
+	rc = qpnp_adc_tm_check_vreg_vote(chip);
+	if (rc) {
+		pr_err("ADC TM VREG enable failed:%d\n", rc);
+		return rc;
+	}
+
+	data = QPNP_ADC_TM_EN;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm enable failed\n");
+		return rc;
+	}
+
+	if (chip->adc_tm_hc) {
+		data = QPNP_ADC_CONV_REQ_EN;
+		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+		if (rc < 0) {
+			pr_err("adc-tm enable failed\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_disable(struct qpnp_adc_tm_chip *chip)
+{
+	u8 data = 0;
+	int rc = 0;
+
+	if (chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+		if (rc < 0) {
+			pr_err("adc-tm enable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm disable failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qpnp_adc_tm_is_valid(struct qpnp_adc_tm_chip *chip)
+{
+	struct qpnp_adc_tm_chip *adc_tm_chip = NULL;
+
+	list_for_each_entry(adc_tm_chip, &qpnp_adc_tm_device_list, list)
+		if (chip == adc_tm_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static int32_t qpnp_adc_tm_rc_check_channel_en(struct qpnp_adc_tm_chip *chip)
+{
+	u8 adc_tm_ctl = 0, status_low = 0, status_high = 0;
+	int rc = 0, i = 0;
+	bool ldo_en = false;
+
+	for (i = 0; i < chip->max_channels_available; i++) {
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_BTM_Mn_CTL(i),
+							&adc_tm_ctl, 1);
+		if (rc) {
+			pr_err("adc-tm-tm read ctl failed with %d\n", rc);
+			return rc;
+		}
+
+		adc_tm_ctl &= QPNP_BTM_Mn_MEAS_EN;
+		status_low = adc_tm_ctl & QPNP_BTM_Mn_LOW_THR_INT_EN;
+		status_high = adc_tm_ctl & QPNP_BTM_Mn_HIGH_THR_INT_EN;
+
+		/* Enable only if there are pending measurement requests */
+		if ((adc_tm_ctl && status_high) ||
+					(adc_tm_ctl && status_low)) {
+			qpnp_adc_tm_enable(chip);
+			ldo_en = true;
+
+			/* Request conversion */
+			rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
+							QPNP_CONV_REQ_SET, 1);
+			if (rc < 0) {
+				pr_err("adc-tm request conversion failed\n");
+				return rc;
+			}
+		}
+		break;
+	}
+
+	if (!ldo_en) {
+		/* disable the vote if applicable */
+		if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+			qpnp_adc_disable_voltage(chip->adc);
+			chip->adc_vote_enable = false;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_enable_if_channel_meas(
+					struct qpnp_adc_tm_chip *chip)
+{
+	u8 adc_tm_meas_en = 0, status_low = 0, status_high = 0;
+	int rc = 0;
+
+	if (chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_rc_check_channel_en(chip);
+		if (rc) {
+			pr_err("adc_tm channel check failed\n");
+			return rc;
+		}
+	} else {
+		/* Check if a measurement request is still required */
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+							&adc_tm_meas_en, 1);
+		if (rc) {
+			pr_err("read status high failed with %d\n", rc);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+							&status_low, 1);
+		if (rc) {
+			pr_err("read status low failed with %d\n", rc);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+							&status_high, 1);
+		if (rc) {
+			pr_err("read status high failed with %d\n", rc);
+			return rc;
+		}
+
+		/* Enable only if there are pending measurement requests */
+		if ((adc_tm_meas_en && status_high) ||
+				(adc_tm_meas_en && status_low)) {
+			qpnp_adc_tm_enable(chip);
+
+			/* Request conversion */
+			rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
+							QPNP_CONV_REQ_SET, 1);
+			if (rc < 0) {
+				pr_err("adc-tm request conversion failed\n");
+				return rc;
+			}
+		} else {
+			/* disable the vote if applicable */
+			if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+				qpnp_adc_disable_voltage(chip->adc);
+				chip->adc_vote_enable = false;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_mode_select(struct qpnp_adc_tm_chip *chip,
+								u8 mode_ctl)
+{
+	int rc;
+
+	mode_ctl |= (QPNP_ADC_TRIM_EN | QPNP_AMUX_TRIM_EN);
+
+	/* VADC_BTM current sets mode to recurring measurements */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_MODE_CTL, mode_ctl, 1);
+	if (rc < 0)
+		pr_err("adc-tm write mode selection err\n");
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_req_sts_check(struct qpnp_adc_tm_chip *chip)
+{
+	u8 status1 = 0, mode_ctl = 0;
+	int rc, count = 0;
+
+	/* Re-enable the peripheral */
+	rc = qpnp_adc_tm_enable(chip);
+	if (rc) {
+		pr_err("adc-tm re-enable peripheral failed\n");
+		return rc;
+	}
+
+	/* The VADC_TM bank needs to be disabled for new conversion request */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("adc-tm read status1 failed\n");
+		return rc;
+	}
+
+	/* Disable the bank if a conversion is occurring */
+	while (status1 & QPNP_STATUS1_REQ_STS) {
+		if (count > QPNP_RETRY) {
+			pr_err("retry error=%d with 0x%x\n", count, status1);
+			break;
+		}
+		/*
+		 * Wait time is based on the optimum sampling rate
+		 * and adding enough time buffer to account for ADC conversions
+		 * occurring on different peripheral banks
+		 */
+		usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME);
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1,
+							&status1, 1);
+		if (rc < 0) {
+			pr_err("adc-tm disable failed\n");
+			return rc;
+		}
+		count++;
+	}
+
+	if (!chip->adc_tm_hc) {
+		/* Change the mode back to recurring measurement mode */
+		mode_ctl = ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+		if (rc < 0) {
+			pr_err("adc-tm mode change to recurring failed\n");
+			return rc;
+		}
+	}
+
+	/* Disable the peripheral */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc < 0) {
+		pr_err("adc-tm peripheral disable failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_get_btm_idx(struct qpnp_adc_tm_chip *chip,
+				uint32_t btm_chan, uint32_t *btm_chan_idx)
+{
+	int rc = 0, i;
+	bool chan_found = false;
+
+	if (!chip->adc_tm_hc) {
+		for (i = 0; i < QPNP_ADC_TM_CHAN_NONE; i++) {
+			if (adc_tm_data[i].btm_amux_chan == btm_chan) {
+				*btm_chan_idx = i;
+				chan_found = true;
+			}
+		}
+	} else {
+		for (i = 0; i < chip->max_channels_available; i++) {
+			if (chip->sensor[i].btm_channel_num == btm_chan) {
+				*btm_chan_idx = i;
+				chan_found = true;
+				break;
+			}
+		}
+	}
+
+	if (!chan_found)
+		return -EINVAL;
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_check_revision(struct qpnp_adc_tm_chip *chip,
+							uint32_t btm_chan_num)
+{
+	u8 rev, perph_subtype;
+	int rc = 0;
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_REVISION3, &rev, 1);
+	if (rc) {
+		pr_err("adc-tm revision read failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_PERPH_SUBTYPE, &perph_subtype, 1);
+	if (rc) {
+		pr_err("adc-tm perph_subtype read failed\n");
+		return rc;
+	}
+
+	if (perph_subtype == QPNP_PERPH_TYPE2) {
+		if ((rev < QPNP_REVISION_EIGHT_CHANNEL_SUPPORT) &&
+			(btm_chan_num > QPNP_ADC_TM_M4_ADC_CH_SEL_CTL)) {
+			pr_debug("Version does not support more than 5 channels\n");
+			return -EINVAL;
+		}
+	}
+
+	if (perph_subtype == QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT) {
+		if (btm_chan_num > QPNP_ADC_TM_M1_ADC_CH_SEL_CTL) {
+			pr_debug("Version does not support more than 2 channels\n");
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_timer_interval_select(
+		struct qpnp_adc_tm_chip *chip, uint32_t btm_chan,
+		struct qpnp_vadc_chan_properties *chan_prop)
+{
+	int rc, chan_idx = 0, i = 0;
+	bool chan_found = false;
+	u8 meas_interval_timer2 = 0, timer_interval_store = 0;
+	uint32_t btm_chan_idx = 0;
+
+	while (i < chip->max_channels_available) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
+			chan_idx = i;
+			chan_found = true;
+			i++;
+		} else
+			i++;
+	}
+
+	if (!chan_found) {
+		pr_err("Channel not found\n");
+		return -EINVAL;
+	}
+
+	switch (chip->sensor[chan_idx].timer_select) {
+	case ADC_MEAS_TIMER_SELECT1:
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL,
+				chip->sensor[chan_idx].meas_interval, 1);
+		else
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL,
+				chip->sensor[chan_idx].meas_interval, 1);
+		if (rc < 0) {
+			pr_err("timer1 configure failed\n");
+			return rc;
+		}
+	break;
+	case ADC_MEAS_TIMER_SELECT2:
+		/* Thermal channels uses timer2, default to 1 second */
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer2 configure read failed\n");
+			return rc;
+		}
+		timer_interval_store = chip->sensor[chan_idx].meas_interval;
+		timer_interval_store <<= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT;
+		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK;
+		meas_interval_timer2 |= timer_interval_store;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer2 configure failed\n");
+			return rc;
+		}
+	break;
+	case ADC_MEAS_TIMER_SELECT3:
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer3 read failed\n");
+			return rc;
+		}
+		timer_interval_store = chip->sensor[chan_idx].meas_interval;
+		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK;
+		meas_interval_timer2 |= timer_interval_store;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer3 configure failed\n");
+			return rc;
+		}
+	break;
+	default:
+		pr_err("Invalid timer selection\n");
+		return -EINVAL;
+	}
+
+	/* Select the timer to use for the corresponding channel */
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+	if (!chip->adc_tm_hc)
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].meas_interval_ctl,
+				chip->sensor[chan_idx].timer_select, 1);
+	else
+		rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx),
+				chip->sensor[chan_idx].timer_select, 1);
+	if (rc < 0) {
+		pr_err("TM channel timer configure failed\n");
+		return rc;
+	}
+
+	pr_debug("timer select:%d, timer_value_within_select:%d, channel:%x\n",
+			chip->sensor[chan_idx].timer_select,
+			chip->sensor[chan_idx].meas_interval,
+			btm_chan);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_add_to_list(struct qpnp_adc_tm_chip *chip,
+				uint32_t dt_index,
+				struct qpnp_adc_tm_btm_param *param,
+				struct qpnp_vadc_chan_properties *chan_prop)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	bool client_info_exists = false;
+
+	list_for_each_entry(client_info,
+			&chip->sensor[dt_index].thr_list, list) {
+		if (client_info->btm_param == param) {
+			client_info->low_thr_requested = chan_prop->low_thr;
+			client_info->high_thr_requested = chan_prop->high_thr;
+			client_info->state_requested = param->state_request;
+			client_info->state_req_copy = param->state_request;
+			client_info->notify_low_thr = false;
+			client_info->notify_high_thr = false;
+			client_info_exists = true;
+			pr_debug("client found\n");
+		}
+	}
+
+	if (!client_info_exists) {
+		client_info = devm_kzalloc(chip->dev,
+			sizeof(struct qpnp_adc_thr_client_info), GFP_KERNEL);
+		if (!client_info)
+			return -ENOMEM;
+
+		pr_debug("new client\n");
+		client_info->btm_param = param;
+		client_info->low_thr_requested = chan_prop->low_thr;
+		client_info->high_thr_requested = chan_prop->high_thr;
+		client_info->state_requested = param->state_request;
+		client_info->state_req_copy = param->state_request;
+
+		list_add_tail(&client_info->list,
+					&chip->sensor[dt_index].thr_list);
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_adc_tm_reg_update(struct qpnp_adc_tm_chip *chip,
+		uint16_t addr, u8 mask, bool state)
+{
+	u8 reg_value = 0;
+	int rc = 0;
+
+	rc = qpnp_adc_tm_read_reg(chip, addr, &reg_value, 1);
+	if (rc < 0) {
+		pr_err("read failed for addr:0x%x\n", addr);
+		return rc;
+	}
+
+	reg_value = reg_value & ~mask;
+	if (state)
+		reg_value |= mask;
+
+	pr_debug("state:%d, reg:0x%x with bits:0x%x and mask:0x%x\n",
+					state, addr, reg_value, ~mask);
+	rc = qpnp_adc_tm_write_reg(chip, addr, reg_value, 1);
+	if (rc < 0) {
+		pr_err("write failed for addr:%x\n", addr);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_read_thr_value(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan)
+{
+	int rc = 0;
+	u8 data_lsb = 0, data_msb = 0;
+	uint32_t btm_chan_idx = 0;
+	int32_t low_thr = 0, high_thr = 0;
+
+	if (!chip->adc_tm_hc) {
+		pr_err("Not applicable for VADC HC peripheral\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
+			&data_lsb, 1);
+	if (rc < 0) {
+		pr_err("low threshold lsb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+		adc_tm_data[btm_chan_idx].low_thr_msb_addr,
+		&data_msb, 1);
+	if (rc < 0) {
+		pr_err("low threshold msb setting failed\n");
+		return rc;
+	}
+
+	low_thr = (data_msb << 8) | data_lsb;
+
+	rc = qpnp_adc_tm_read_reg(chip,
+		adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
+		&data_lsb, 1);
+	if (rc < 0) {
+		pr_err("high threshold lsb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+		adc_tm_data[btm_chan_idx].high_thr_msb_addr,
+		&data_msb, 1);
+	if (rc < 0) {
+		pr_err("high threshold msb setting failed\n");
+		return rc;
+	}
+
+	high_thr = (data_msb << 8) | data_lsb;
+
+	pr_debug("configured thresholds high:0x%x and low:0x%x\n",
+		high_thr, low_thr);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_thr_update(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan, int32_t high_thr, int32_t low_thr)
+{
+	int rc = 0;
+	uint32_t btm_chan_idx = 0;
+
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
+			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].low_thr_msb_addr,
+			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold msb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
+			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
+		if (rc < 0) {
+			pr_err("high threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].high_thr_msb_addr,
+			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
+		if (rc < 0)
+			pr_err("high threshold msb setting failed\n");
+	} else {
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_LOW_THR0(btm_chan_idx),
+			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_LOW_THR1(btm_chan_idx),
+			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold msb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx),
+			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
+		if (rc < 0) {
+			pr_err("high threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx),
+			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
+		if (rc < 0)
+			pr_err("high threshold msb setting failed\n");
+
+	}
+
+	pr_debug("client requested high:%d and low:%d\n",
+		high_thr, low_thr);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_manage_thresholds(struct qpnp_adc_tm_chip *chip,
+		uint32_t dt_index, uint32_t btm_chan)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	int high_thr = 0, low_thr = 0, rc = 0;
+
+
+	/*
+	 * high_thr/low_thr starting point and reset the high_thr_set and
+	 * low_thr_set back to reset since the thresholds will be
+	 * recomputed.
+	 */
+	list_for_each(thr_list,
+			&chip->sensor[dt_index].thr_list) {
+		client_info = list_entry(thr_list,
+					struct qpnp_adc_thr_client_info, list);
+		high_thr = client_info->high_thr_requested;
+		low_thr = client_info->low_thr_requested;
+		client_info->high_thr_set = false;
+		client_info->low_thr_set = false;
+	}
+
+	pr_debug("init threshold is high:%d and low:%d\n", high_thr, low_thr);
+
+	/* Find the min of high_thr and max of low_thr */
+	list_for_each(thr_list,
+			&chip->sensor[dt_index].thr_list) {
+		client_info = list_entry(thr_list,
+					struct qpnp_adc_thr_client_info, list);
+		if ((client_info->state_req_copy == ADC_TM_HIGH_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (client_info->high_thr_requested < high_thr)
+				high_thr = client_info->high_thr_requested;
+
+		if ((client_info->state_req_copy == ADC_TM_LOW_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (client_info->low_thr_requested > low_thr)
+				low_thr = client_info->low_thr_requested;
+
+		pr_debug("threshold compared is high:%d and low:%d\n",
+				client_info->high_thr_requested,
+				client_info->low_thr_requested);
+		pr_debug("current threshold is high:%d and low:%d\n",
+							high_thr, low_thr);
+	}
+
+	/* Check which of the high_thr and low_thr got set */
+	list_for_each(thr_list,
+			&chip->sensor[dt_index].thr_list) {
+		client_info = list_entry(thr_list,
+					struct qpnp_adc_thr_client_info, list);
+		if ((client_info->state_req_copy == ADC_TM_HIGH_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (high_thr == client_info->high_thr_requested)
+				client_info->high_thr_set = true;
+
+		if ((client_info->state_req_copy == ADC_TM_LOW_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (low_thr == client_info->low_thr_requested)
+				client_info->low_thr_set = true;
+	}
+
+	rc = qpnp_adc_tm_thr_update(chip, btm_chan, high_thr, low_thr);
+	if (rc < 0)
+		pr_err("setting chan:%d threshold failed\n", btm_chan);
+
+	pr_debug("threshold written is high:%d and low:%d\n",
+							high_thr, low_thr);
+
+	return 0;
+}
+
+static int32_t qpnp_adc_tm_channel_configure(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan,
+			struct qpnp_vadc_chan_properties *chan_prop,
+			uint32_t amux_channel)
+{
+	int rc = 0, i = 0, chan_idx = 0;
+	bool chan_found = false, high_thr_set = false, low_thr_set = false;
+	u8 sensor_mask = 0;
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	uint32_t btm_chan_idx = 0;
+
+	while (i < chip->max_channels_available) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
+			chan_idx = i;
+			chan_found = true;
+			i++;
+		} else
+			i++;
+	}
+
+	if (!chan_found) {
+		pr_err("Channel not found\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	sensor_mask = 1 << chan_idx;
+	if (!chip->sensor[chan_idx].thermal_node) {
+		/* Update low and high notification thresholds */
+		rc = qpnp_adc_tm_manage_thresholds(chip, chan_idx,
+				btm_chan);
+		if (rc < 0) {
+			pr_err("setting chan:%d threshold failed\n", btm_chan);
+			return rc;
+		}
+
+		list_for_each_entry(client_info,
+				&chip->sensor[chan_idx].thr_list, list) {
+			if (client_info->high_thr_set == true)
+				high_thr_set = true;
+			if (client_info->low_thr_set == true)
+				low_thr_set = true;
+		}
+
+		if (low_thr_set) {
+			pr_debug("low sensor mask:%x with state:%d\n",
+					sensor_mask, chan_prop->state_request);
+			/* Enable low threshold's interrupt */
+			if (!chip->adc_tm_hc)
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_ADC_TM_LOW_THR_INT_EN,
+					sensor_mask, true);
+			else
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(btm_chan_idx),
+					QPNP_BTM_Mn_LOW_THR_INT_EN, true);
+			if (rc < 0) {
+				pr_err("low thr enable err:%d\n", btm_chan);
+				return rc;
+			}
+		}
+
+		if (high_thr_set) {
+			/* Enable high threshold's interrupt */
+			pr_debug("high sensor mask:%x\n", sensor_mask);
+			if (!chip->adc_tm_hc)
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_ADC_TM_HIGH_THR_INT_EN,
+					sensor_mask, true);
+			else
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(btm_chan_idx),
+					QPNP_BTM_Mn_HIGH_THR_INT_EN, true);
+			if (rc < 0) {
+				pr_err("high thr enable err:%d\n", btm_chan);
+				return rc;
+			}
+		}
+	}
+
+	/* Enable corresponding BTM channel measurement */
+	if (!chip->adc_tm_hc)
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, true);
+	else
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_MEAS_EN, true);
+	if (rc < 0) {
+		pr_err("multi measurement en failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_hc_configure(struct qpnp_adc_tm_chip *chip,
+			struct qpnp_adc_amux_properties *chan_prop)
+{
+	u8 decimation = 0, fast_avg_ctl = 0;
+	u8 buf[8];
+	int rc = 0;
+	uint32_t btm_chan = 0, cal_type = 0, btm_chan_idx = 0;
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc)
+		return rc;
+
+	/* Decimation setup */
+	decimation = chan_prop->decimation;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_HC_ADC_DIG_PARAM,
+						decimation, 1);
+	if (rc < 0) {
+		pr_err("adc-tm digital parameter setup err\n");
+		return rc;
+	}
+
+	/* Fast averaging setup/enable */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_BTM_HC_FAST_AVG_CTL,
+						&fast_avg_ctl, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable read err\n");
+		return rc;
+	}
+	fast_avg_ctl |= chan_prop->fast_avg_setup;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_HC_FAST_AVG_CTL,
+						fast_avg_ctl, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable write err\n");
+		return rc;
+	}
+
+	/* Read block registers for respective BTM channel */
+	btm_chan = chan_prop->chan_prop->tm_channel_select;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+			QPNP_BTM_Mn_ADC_CH_SEL_CTL(btm_chan_idx), buf, 8);
+	if (rc < 0) {
+		pr_err("qpnp adc configure block read failed\n");
+		return rc;
+	}
+
+	/* Update ADC channel sel */
+	rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_ADC_CH_SEL_CTL(btm_chan_idx),
+				chan_prop->amux_channel, 1);
+	if (rc < 0) {
+		pr_err("adc-tm channel amux select failed\n");
+		return rc;
+	}
+
+	/* Manage thresholds */
+	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
+			chan_prop->chan_prop, chan_prop->amux_channel);
+	if (rc < 0) {
+		pr_err("adc-tm channel threshold configure failed\n");
+		return rc;
+	}
+
+	/* Measurement interval setup */
+	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
+						chan_prop->chan_prop);
+	if (rc < 0) {
+		pr_err("adc-tm timer select failed\n");
+		return rc;
+	}
+
+	/* Set calibration select, hw_settle delay */
+	cal_type |= (chan_prop->calib_type << QPNP_BTM_CTL_CAL_SEL_MASK_SHIFT);
+	buf[6] &= ~QPNP_BTM_CTL_HW_SETTLE_DELAY_MASK;
+	buf[6] |= chan_prop->hw_settle_time;
+	buf[6] &= ~QPNP_BTM_CTL_CAL_SEL;
+	buf[6] |= cal_type;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_Mn_CTL(btm_chan_idx),
+								buf[6], 1);
+	if (rc < 0) {
+		pr_err("adc-tm hw-settle, calib sel failed\n");
+		return rc;
+	}
+
+	/* Enable bank */
+	rc = qpnp_adc_tm_enable(chip);
+	if (rc)
+		return rc;
+
+	/* Request conversion */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET, 1);
+	if (rc < 0) {
+		pr_err("adc-tm request conversion failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_adc_tm_configure(struct qpnp_adc_tm_chip *chip,
+			struct qpnp_adc_amux_properties *chan_prop)
+{
+	u8 decimation = 0, op_cntrl = 0, mode_ctl = 0;
+	int rc = 0;
+	uint32_t btm_chan = 0;
+
+	/* Set measurement in single measurement mode */
+	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+	rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+	if (rc < 0) {
+		pr_err("adc-tm single mode select failed\n");
+		return rc;
+	}
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc)
+		return rc;
+
+	/* Check if a conversion is in progress */
+	rc = qpnp_adc_tm_req_sts_check(chip);
+	if (rc < 0) {
+		pr_err("adc-tm req_sts check failed\n");
+		return rc;
+	}
+
+	/* Configure AMUX channel select for the corresponding BTM channel*/
+	btm_chan = chan_prop->chan_prop->tm_channel_select;
+	rc = qpnp_adc_tm_write_reg(chip, btm_chan, chan_prop->amux_channel, 1);
+	if (rc < 0) {
+		pr_err("adc-tm channel selection err\n");
+		return rc;
+	}
+
+	/* Digital parameter setup */
+	decimation |= chan_prop->decimation <<
+				QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_DIG_PARAM, decimation, 1);
+	if (rc < 0) {
+		pr_err("adc-tm digital parameter setup err\n");
+		return rc;
+	}
+
+	/* Hardware setting time */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_HW_SETTLE_DELAY,
+					chan_prop->hw_settle_time, 1);
+	if (rc < 0) {
+		pr_err("adc-tm hw settling time setup err\n");
+		return rc;
+	}
+
+	/* Fast averaging setup/enable */
+	rc = qpnp_adc_tm_fast_avg_en(chip, &chan_prop->fast_avg_setup);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable err\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_CTL,
+				chan_prop->fast_avg_setup, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg setup err\n");
+		return rc;
+	}
+
+	/* Measurement interval setup */
+	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
+						chan_prop->chan_prop);
+	if (rc < 0) {
+		pr_err("adc-tm timer select failed\n");
+		return rc;
+	}
+
+	/* Channel configuration setup */
+	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
+			chan_prop->chan_prop, chan_prop->amux_channel);
+	if (rc < 0) {
+		pr_err("adc-tm channel configure failed\n");
+		return rc;
+	}
+
+	/* Recurring interval measurement enable */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+							&op_cntrl, 1);
+	op_cntrl |= QPNP_ADC_MEAS_INTERVAL_OP;
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+			op_cntrl, true);
+	if (rc < 0) {
+		pr_err("adc-tm meas interval op configure failed\n");
+		return rc;
+	}
+
+	/* Enable bank */
+	rc = qpnp_adc_tm_enable(chip);
+	if (rc)
+		return rc;
+
+	/* Request conversion */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET, 1);
+	if (rc < 0) {
+		pr_err("adc-tm request conversion failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_adc_tm_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+
+	if ((IS_ERR(adc_tm)) || qpnp_adc_tm_check_revision(
+			adc_tm->chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	*mode = adc_tm->mode;
+
+	return 0;
+}
+
+static int qpnp_adc_tm_set_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode mode)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	int rc = 0, channel;
+	u8 sensor_mask = 0, mode_ctl = 0;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
+
+	if (qpnp_adc_tm_is_valid(chip)) {
+		pr_err("invalid device\n");
+		return -ENODEV;
+	}
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	btm_chan = adc_tm->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		goto fail;
+	}
+
+	if (mode == THERMAL_DEVICE_ENABLED) {
+		chip->adc->amux_prop->amux_channel =
+					adc_tm->vadc_channel_num;
+		channel = adc_tm->sensor_num;
+		chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[channel].adc_decimation;
+		chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[channel].hw_settle_time;
+		chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[channel].fast_avg_setup;
+		chip->adc->amux_prop->mode_sel =
+			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+		chip->adc->amux_prop->chan_prop->low_thr = adc_tm->low_thr;
+		chip->adc->amux_prop->chan_prop->high_thr = adc_tm->high_thr;
+		chip->adc->amux_prop->chan_prop->tm_channel_select =
+			adc_tm->btm_channel_num;
+		chip->adc->amux_prop->calib_type =
+			chip->adc->adc_channels[channel].calib_type;
+
+		if (!chip->adc_tm_hc) {
+			rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
+			if (rc) {
+				pr_err("adc-tm configure failed with %d\n", rc);
+				goto fail;
+			}
+		} else {
+			rc = qpnp_adc_tm_hc_configure(chip,
+							chip->adc->amux_prop);
+			if (rc) {
+				pr_err("hc configure failed with %d\n", rc);
+				goto fail;
+			}
+		}
+	} else if (mode == THERMAL_DEVICE_DISABLED) {
+		sensor_mask = 1 << adc_tm->sensor_num;
+
+		if (!chip->adc_tm_hc) {
+			mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+			rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+			if (rc < 0) {
+				pr_err("adc-tm single mode select failed\n");
+				goto fail;
+			}
+		}
+
+		/* Disable bank */
+		rc = qpnp_adc_tm_disable(chip);
+		if (rc < 0) {
+			pr_err("adc-tm disable failed\n");
+			goto fail;
+		}
+
+		if (!chip->adc_tm_hc) {
+			/* Check if a conversion is in progress */
+			rc = qpnp_adc_tm_req_sts_check(chip);
+			if (rc < 0) {
+				pr_err("adc-tm req_sts check failed\n");
+				goto fail;
+			}
+
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, false);
+			if (rc < 0) {
+				pr_err("multi measurement update failed\n");
+				goto fail;
+			}
+		} else {
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_MEAS_EN, false);
+			if (rc < 0) {
+				pr_err("multi measurement disable failed\n");
+				goto fail;
+			}
+		}
+
+		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+		if (rc < 0) {
+			pr_err("re-enabling measurement failed\n");
+			goto fail;
+		}
+	}
+
+	adc_tm->mode = mode;
+
+fail:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	return 0;
+}
+
+static int qpnp_adc_tm_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		*type = THERMAL_TRIP_CONFIGURABLE_HI;
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_adc_tm_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int *temp)
+{
+	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
+	int64_t result = 0;
+	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
+	unsigned int reg, rc = 0;
+	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
+	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm_sensor->btm_channel_num))
+		return -EINVAL;
+
+	btm_chan = adc_tm_sensor->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	if (!chip->adc_tm_hc) {
+		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
+		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
+		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
+		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
+	} else {
+		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
+		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
+		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
+		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
+	}
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_lsb,
+						&trip_warm_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm low_thr_lsb err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_msb,
+						&trip_warm_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm low_thr_msb err\n");
+			return rc;
+		}
+	reg = (trip_warm_thr1 << 8) | trip_warm_thr0;
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_lsb,
+						&trip_cool_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm_tm high_thr_lsb err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_msb,
+						&trip_cool_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm_tm high_thr_lsb err\n");
+			return rc;
+		}
+	reg = (trip_cool_thr1 << 8) | trip_cool_thr0;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = qpnp_adc_tm_scale_voltage_therm_pu2(chip->vadc_dev,
+					chip->adc->adc_prop, reg, &result);
+	if (rc < 0) {
+		pr_err("Failed to lookup the therm thresholds\n");
+		return rc;
+	}
+
+	*temp = result;
+
+	return 0;
+}
+
+static int qpnp_adc_tm_set_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int temp)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	struct qpnp_adc_tm_config tm_config;
+	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
+	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
+	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
+	int rc = 0;
+	uint32_t btm_chan = 0, btm_chan_idx = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	tm_config.channel = adc_tm->vadc_channel_num;
+	tm_config.high_thr_temp = tm_config.low_thr_temp = 0;
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		tm_config.high_thr_temp = temp;
+		break;
+	case ADC_TM_TRIP_LOW_COOL:
+		tm_config.low_thr_temp = temp;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	pr_debug("requested a high - %d and low - %d with trip - %d\n",
+			tm_config.high_thr_temp, tm_config.low_thr_temp, trip);
+	rc = qpnp_adc_tm_scale_therm_voltage_pu2(chip->vadc_dev,
+				chip->adc->adc_prop, &tm_config);
+	if (rc < 0) {
+		pr_err("Failed to lookup the adc-tm thresholds\n");
+		return rc;
+	}
+
+	trip_warm_thr0 = ((tm_config.low_thr_voltage << 24) >> 24);
+	trip_warm_thr1 = ((tm_config.low_thr_voltage << 16) >> 24);
+	trip_cool_thr0 = ((tm_config.high_thr_voltage << 24) >> 24);
+	trip_cool_thr1 = ((tm_config.high_thr_voltage << 16) >> 24);
+
+	pr_debug("low_thr:0x%llx, high_thr:0x%llx\n", tm_config.low_thr_voltage,
+				tm_config.high_thr_voltage);
+
+	btm_chan = adc_tm->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	if (!chip->adc_tm_hc) {
+		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
+		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
+		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
+		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
+	} else {
+		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
+		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
+		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
+		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
+	}
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_lsb,
+						trip_cool_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_msb,
+						trip_cool_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+	adc_tm->low_thr = tm_config.high_thr_voltage;
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_lsb,
+						trip_warm_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_msb,
+						trip_warm_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+	adc_tm->high_thr = tm_config.low_thr_voltage;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void notify_battery_therm(struct qpnp_adc_tm_sensor *adc_tm)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+
+	list_for_each_entry(client_info,
+			&adc_tm->thr_list, list) {
+		/* Batt therm's warm temperature translates to low voltage */
+		if (client_info->notify_low_thr) {
+			/* HIGH_STATE = WARM_TEMP for battery client */
+			client_info->btm_param->threshold_notification(
+			ADC_TM_WARM_STATE, client_info->btm_param->btm_ctx);
+			client_info->notify_low_thr = false;
+		}
+
+		/* Batt therm's cool temperature translates to high voltage */
+		if (client_info->notify_high_thr) {
+			/* LOW_STATE = COOL_TEMP for battery client */
+			client_info->btm_param->threshold_notification(
+			ADC_TM_COOL_STATE, client_info->btm_param->btm_ctx);
+			client_info->notify_high_thr = false;
+		}
+	}
+}
+
+static void notify_clients(struct qpnp_adc_tm_sensor *adc_tm)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+
+	list_for_each_entry(client_info,
+			&adc_tm->thr_list, list) {
+		/* For non batt therm clients */
+		if (client_info->notify_low_thr) {
+			if (client_info->btm_param->threshold_notification
+								!= NULL) {
+				pr_debug("notify kernel with low state\n");
+				client_info->btm_param->threshold_notification(
+					ADC_TM_LOW_STATE,
+					client_info->btm_param->btm_ctx);
+				client_info->notify_low_thr = false;
+			}
+		}
+
+		if (client_info->notify_high_thr) {
+			if (client_info->btm_param->threshold_notification
+								!= NULL) {
+				pr_debug("notify kernel with high state\n");
+				client_info->btm_param->threshold_notification(
+					ADC_TM_HIGH_STATE,
+					client_info->btm_param->btm_ctx);
+				client_info->notify_high_thr = false;
+			}
+		}
+	}
+}
+
+static void notify_adc_tm_fn(struct work_struct *work)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = container_of(work,
+		struct qpnp_adc_tm_sensor, work);
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+
+	if (adc_tm->thermal_node) {
+		sysfs_notify(&adc_tm->tz_dev->device.kobj,
+					NULL, "type");
+		pr_debug("notifying uspace client\n");
+	} else {
+		if (adc_tm->scale_type == SCALE_RBATT_THERM)
+			notify_battery_therm(adc_tm);
+		else
+			notify_clients(adc_tm);
+	}
+
+	atomic_dec(&chip->wq_cnt);
+}
+
+static int qpnp_adc_tm_activate_trip_type(struct thermal_zone_device *thermal,
+			int trip, enum thermal_trip_activation_mode mode)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	int rc = 0, sensor_mask = 0;
+	u8 thr_int_en = 0;
+	bool state = false;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
+		state = true;
+
+	sensor_mask = 1 << adc_tm->sensor_num;
+
+	pr_debug("Sensor number:%x with state:%d\n",
+					adc_tm->sensor_num, state);
+
+	btm_chan = adc_tm->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		/* low_thr (lower voltage) for higher temp */
+		thr_int_en = adc_tm_data[btm_chan_idx].low_thr_int_chan_en;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_ADC_TM_LOW_THR_INT_EN,
+				sensor_mask, state);
+		else
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_LOW_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		/* high_thr (higher voltage) for cooler temp */
+		thr_int_en = adc_tm_data[btm_chan_idx].high_thr_int_chan_en;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_ADC_TM_HIGH_THR_INT_EN,
+				sensor_mask, state);
+		else
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_HIGH_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_adc_tm_recalib_request_check(struct qpnp_adc_tm_chip *chip,
+			int sensor_num, u8 status_high, u8 *notify_check)
+{
+	int rc = 0;
+	u8 sensor_mask = 0, mode_ctl = 0;
+	int32_t old_thr = 0, new_thr = 0;
+	uint32_t channel, btm_chan_num, scale_type;
+	struct qpnp_vadc_result result;
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	bool status = false;
+
+	if (!chip->adc_tm_recalib_check) {
+		*notify_check = 1;
+		return rc;
+	}
+
+	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
+		client_info = list_entry(thr_list,
+				struct qpnp_adc_thr_client_info, list);
+		channel = client_info->btm_param->channel;
+		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+		sensor_mask = 1 << sensor_num;
+
+		rc = qpnp_vadc_read(chip->vadc_dev, channel, &result);
+		if (rc < 0) {
+			pr_err("failure to read vadc channel=%d\n",
+					client_info->btm_param->channel);
+			goto fail;
+		}
+		new_thr = result.physical;
+
+		if (status_high)
+			old_thr = client_info->btm_param->high_thr;
+		else
+			old_thr = client_info->btm_param->low_thr;
+
+		if (new_thr > old_thr)
+			status = (status_high) ? true : false;
+		else
+			status = (status_high) ? false : true;
+
+		pr_debug(
+			"recalib:sen=%d, new_thr=%d, new_thr_adc_code=0x%x, old_thr=%d status=%d valid_status=%d\n",
+			sensor_num, new_thr, result.adc_code,
+			old_thr, status_high, status);
+
+		rc = qpnp_adc_tm_read_thr_value(chip, btm_chan_num);
+		if (rc < 0) {
+			pr_err("adc-tm thresholds read failed\n");
+			goto fail;
+		}
+
+		if (status) {
+			*notify_check = 1;
+			pr_debug("Client can be notify\n");
+			return rc;
+		}
+
+		pr_debug("Client can not be notify, restart measurement\n");
+		/* Set measurement in single measurement mode */
+		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+		if (rc < 0) {
+			pr_err("adc-tm single mode select failed\n");
+			goto fail;
+		}
+
+		/* Disable bank */
+		rc = qpnp_adc_tm_disable(chip);
+		if (rc < 0) {
+			pr_err("adc-tm disable failed\n");
+			goto fail;
+		}
+
+		/* Check if a conversion is in progress */
+		rc = qpnp_adc_tm_req_sts_check(chip);
+		if (rc < 0) {
+			pr_err("adc-tm req_sts check failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+							sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int write failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+							sensor_mask, false);
+		if (rc < 0) {
+			pr_err("high threshold int enable failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+							sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi measurement en failed\n");
+			goto fail;
+		}
+
+		/* restart measurement */
+		scale_type = chip->sensor[sensor_num].scale_type;
+		chip->adc->amux_prop->amux_channel = channel;
+		chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[sensor_num].adc_decimation;
+		chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[sensor_num].hw_settle_time;
+		chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[sensor_num].fast_avg_setup;
+		chip->adc->amux_prop->mode_sel =
+			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+		adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev,
+				client_info->btm_param,
+				&chip->adc->amux_prop->chan_prop->low_thr,
+				&chip->adc->amux_prop->chan_prop->high_thr);
+		qpnp_adc_tm_add_to_list(chip, sensor_num,
+				client_info->btm_param,
+				chip->adc->amux_prop->chan_prop);
+		chip->adc->amux_prop->chan_prop->tm_channel_select =
+				chip->sensor[sensor_num].btm_channel_num;
+		chip->adc->amux_prop->chan_prop->state_request =
+				client_info->btm_param->state_request;
+
+		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("adc-tm configure failed with %d\n", rc);
+			goto fail;
+		}
+		*notify_check = 0;
+		pr_debug("BTM channel reconfigured for measuremnt\n");
+	}
+fail:
+	return rc;
+}
+
+static int qpnp_adc_tm_disable_rearm_high_thresholds(
+			struct qpnp_adc_tm_chip *chip, int sensor_num)
+{
+
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	uint32_t btm_chan_num = 0;
+	u8 sensor_mask = 0, notify_check = 0;
+	int rc = 0;
+
+	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	pr_debug("high:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
+		sensor_num, chip->th_info.adc_tm_high_enable,
+		chip->th_info.adc_tm_low_enable,
+		chip->th_info.qpnp_adc_tm_meas_en);
+	if (!chip->sensor[sensor_num].thermal_node) {
+		/*
+		 * For non thermal registered clients such as usb_id,
+		 * vbatt, pmic_therm
+		 */
+		sensor_mask = 1 << sensor_num;
+		pr_debug("non thermal node - mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_recalib_request_check(chip,
+				sensor_num, true, &notify_check);
+		if (rc < 0 || !notify_check) {
+			pr_debug("Calib recheck re-armed rc=%d\n", rc);
+			chip->th_info.adc_tm_high_enable = 0;
+			return rc;
+		}
+	} else {
+		/*
+		 * Uses the thermal sysfs registered device to disable
+		 * the corresponding high voltage threshold which
+		 * is triggered by low temp
+		 */
+		sensor_mask = 1 << sensor_num;
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_LOW_COOL,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return rc;
+		}
+	}
+	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
+		client_info = list_entry(thr_list,
+				struct qpnp_adc_thr_client_info, list);
+		if (client_info->high_thr_set) {
+			client_info->high_thr_set = false;
+			client_info->notify_high_thr = true;
+			if (client_info->state_req_copy ==
+					ADC_TM_HIGH_LOW_THR_ENABLE)
+				client_info->state_req_copy =
+						ADC_TM_LOW_THR_ENABLE;
+			else
+				client_info->state_req_copy =
+						ADC_TM_HIGH_THR_DISABLE;
+		}
+	}
+	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(sensor_num),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+	if (rc < 0) {
+		pr_err("re-enabling measurement failed\n");
+		return rc;
+	}
+
+	queue_work(chip->sensor[sensor_num].req_wq,
+				&chip->sensor[sensor_num].work);
+
+	return rc;
+}
+
+static int qpnp_adc_tm_disable_rearm_low_thresholds(
+			struct qpnp_adc_tm_chip *chip, int sensor_num)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	uint32_t btm_chan_num = 0;
+	u8 sensor_mask = 0, notify_check = 0;
+	int rc = 0;
+
+	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	pr_debug("low:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
+		sensor_num, chip->th_info.adc_tm_high_enable,
+		chip->th_info.adc_tm_low_enable,
+		chip->th_info.qpnp_adc_tm_meas_en);
+	if (!chip->sensor[sensor_num].thermal_node) {
+		/*
+		 * For non thermal registered clients such as usb_id,
+		 * vbatt, pmic_therm
+		 */
+		pr_debug("non thermal node - mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_recalib_request_check(chip,
+				sensor_num, false, &notify_check);
+		if (rc < 0 || !notify_check) {
+			pr_debug("Calib recheck re-armed rc=%d\n", rc);
+			chip->th_info.adc_tm_low_enable = 0;
+			return rc;
+		}
+		sensor_mask = 1 << sensor_num;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_LOW_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int read failed\n");
+			return rc;
+		}
+	} else {
+		/*
+		 * Uses the thermal sysfs registered device to disable
+		 * the corresponding high voltage threshold which
+		 * is triggered by low temp
+		 */
+		sensor_mask = 1 << sensor_num;
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_HIGH_WARM,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return rc;
+		}
+	}
+	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
+		client_info = list_entry(thr_list,
+				struct qpnp_adc_thr_client_info, list);
+		if (client_info->low_thr_set) {
+			client_info->low_thr_set = false;
+			client_info->notify_low_thr = true;
+			if (client_info->state_req_copy ==
+					ADC_TM_HIGH_LOW_THR_ENABLE)
+				client_info->state_req_copy =
+						ADC_TM_HIGH_THR_ENABLE;
+			else
+				client_info->state_req_copy =
+						ADC_TM_LOW_THR_DISABLE;
+		}
+	}
+	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(sensor_num),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+	if (rc < 0) {
+		pr_err("re-enabling measurement failed\n");
+		return rc;
+	}
+
+	queue_work(chip->sensor[sensor_num].req_wq,
+				&chip->sensor[sensor_num].work);
+
+	return rc;
+}
+
+static int qpnp_adc_tm_read_status(struct qpnp_adc_tm_chip *chip)
+{
+	int rc = 0, sensor_num = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_req_sts_check(chip);
+		if (rc) {
+			pr_err("adc-tm-tm req sts check failed with %d\n", rc);
+			goto fail;
+		}
+	}
+
+	while (sensor_num < chip->max_channels_available) {
+		if (chip->sensor[sensor_num].high_thr_triggered) {
+			rc = qpnp_adc_tm_disable_rearm_high_thresholds(
+					chip, sensor_num);
+			if (rc) {
+				pr_err("rearm threshold failed\n");
+				goto fail;
+			}
+			chip->sensor[sensor_num].high_thr_triggered = false;
+		}
+		sensor_num++;
+	}
+
+	sensor_num = 0;
+	while (sensor_num < chip->max_channels_available) {
+		if (chip->sensor[sensor_num].low_thr_triggered) {
+			rc = qpnp_adc_tm_disable_rearm_low_thresholds(
+					chip, sensor_num);
+			if (rc) {
+				pr_err("rearm threshold failed\n");
+				goto fail;
+			}
+			chip->sensor[sensor_num].low_thr_triggered = false;
+		}
+		sensor_num++;
+	}
+
+fail:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	if (rc < 0 || (!chip->th_info.adc_tm_high_enable &&
+					!chip->th_info.adc_tm_low_enable))
+		atomic_dec(&chip->wq_cnt);
+
+	return rc;
+}
+
+static void qpnp_adc_tm_high_thr_work(struct work_struct *work)
+{
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_high_thr_work);
+	int rc;
+
+	/* disable the vote if applicable */
+	if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+		qpnp_adc_disable_voltage(chip->adc);
+		chip->adc_vote_enable = false;
+	}
+
+	pr_debug("thr:0x%x\n", chip->th_info.adc_tm_high_enable);
+
+	rc = qpnp_adc_tm_read_status(chip);
+	if (rc < 0)
+		pr_err("adc-tm high thr work failed\n");
+}
+
+static irqreturn_t qpnp_adc_tm_high_thr_isr(int irq, void *data)
+{
+	struct qpnp_adc_tm_chip *chip = data;
+	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
+	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
+
+	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+	/* Set measurement in single measurement mode */
+	qpnp_adc_tm_mode_select(chip, mode_ctl);
+
+	qpnp_adc_tm_disable(chip);
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("adc-tm read status1 failed\n");
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH,
+					&chip->th_info.status_high, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+				&chip->th_info.adc_tm_high_thr_set, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read high thr failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * Check which interrupt threshold is lower and measure against the
+	 * enabled channel.
+	 */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+				&chip->th_info.qpnp_adc_tm_meas_en, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	chip->th_info.adc_tm_high_enable = chip->th_info.qpnp_adc_tm_meas_en &
+						chip->th_info.status_high;
+	chip->th_info.adc_tm_high_enable &= chip->th_info.adc_tm_high_thr_set;
+
+	sensor_notify_num = chip->th_info.adc_tm_high_enable;
+	while (i < chip->max_channels_available) {
+		if ((sensor_notify_num & 0x1) == 1)
+			sensor_num = i;
+		sensor_notify_num >>= 1;
+		i++;
+	}
+
+	if (!chip->sensor[sensor_num].thermal_node) {
+		sensor_mask = 1 << sensor_num;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_HIGH_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("high threshold int read failed\n");
+			return IRQ_HANDLED;
+		}
+	} else {
+		/*
+		 * Uses the thermal sysfs registered device to disable
+		 * the corresponding high voltage threshold which
+		 * is triggered by low temp
+		 */
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_LOW_COOL,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return IRQ_HANDLED;
+		}
+	}
+
+	atomic_inc(&chip->wq_cnt);
+	queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static void qpnp_adc_tm_low_thr_work(struct work_struct *work)
+{
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_low_thr_work);
+	int rc;
+
+	/* disable the vote if applicable */
+	if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+		qpnp_adc_disable_voltage(chip->adc);
+		chip->adc_vote_enable = false;
+	}
+
+	pr_debug("thr:0x%x\n", chip->th_info.adc_tm_low_enable);
+
+	rc = qpnp_adc_tm_read_status(chip);
+	if (rc < 0)
+		pr_err("adc-tm low thr work failed\n");
+}
+
+static irqreturn_t qpnp_adc_tm_low_thr_isr(int irq, void *data)
+{
+	struct qpnp_adc_tm_chip *chip = data;
+	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
+	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
+
+	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+	/* Set measurement in single measurement mode */
+	qpnp_adc_tm_mode_select(chip, mode_ctl);
+
+	qpnp_adc_tm_disable(chip);
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("adc-tm read status1 failed\n");
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW,
+					&chip->th_info.status_low, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status low failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+				&chip->th_info.adc_tm_low_thr_set, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read low thr failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+				&chip->th_info.qpnp_adc_tm_meas_en, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	chip->th_info.adc_tm_low_enable = chip->th_info.qpnp_adc_tm_meas_en &
+					chip->th_info.status_low;
+	chip->th_info.adc_tm_low_enable &= chip->th_info.adc_tm_low_thr_set;
+
+	sensor_notify_num = chip->th_info.adc_tm_low_enable;
+	while (i < chip->max_channels_available) {
+		if ((sensor_notify_num & 0x1) == 1)
+			sensor_num = i;
+		sensor_notify_num >>= 1;
+		i++;
+	}
+
+	if (!chip->sensor[sensor_num].thermal_node) {
+		sensor_mask = 1 << sensor_num;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_LOW_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int read failed\n");
+			return IRQ_HANDLED;
+		}
+	} else {
+		/* Uses the thermal sysfs registered device to disable
+		 * the corresponding low voltage threshold which
+		 * is triggered by high temp
+		 */
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_HIGH_WARM,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return IRQ_HANDLED;
+		}
+	}
+
+	atomic_inc(&chip->wq_cnt);
+	queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static int qpnp_adc_tm_rc_check_sensor_trip(struct qpnp_adc_tm_chip *chip,
+			u8 status_low, u8 status_high, int i,
+			int *sensor_low_notify_num, int *sensor_high_notify_num)
+{
+	int rc = 0;
+	u8 ctl = 0, sensor_mask = 0;
+
+	if (((status_low & 0x1) == 1) || ((status_high & 0x1) == 1)) {
+		rc = qpnp_adc_tm_read_reg(chip,
+					QPNP_BTM_Mn_EN(i), &ctl, 1);
+		if (rc) {
+			pr_err("ctl read failed with %d\n", rc);
+			return IRQ_HANDLED;
+		}
+
+		if ((status_low & 0x1) && (ctl & QPNP_BTM_Mn_MEAS_EN)
+			&& (ctl & QPNP_BTM_Mn_LOW_THR_INT_EN)) {
+			/* Mask the corresponding low threshold interrupt en */
+			if (!chip->sensor[i].thermal_node) {
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(i),
+					QPNP_BTM_Mn_LOW_THR_INT_EN, false);
+				if (rc < 0) {
+					pr_err("low thr_int en failed\n");
+					return IRQ_HANDLED;
+				}
+			} else {
+			/*
+			 * Uses the thermal sysfs registered device to disable
+			 * the corresponding low voltage threshold which
+			 * is triggered by high temp
+			 */
+			pr_debug("thermal node with mask:%x\n", sensor_mask);
+				rc = qpnp_adc_tm_activate_trip_type(
+					chip->sensor[i].tz_dev,
+					ADC_TM_TRIP_HIGH_WARM,
+					THERMAL_TRIP_ACTIVATION_DISABLED);
+				if (rc < 0) {
+					pr_err("notify error:%d\n", i);
+					return IRQ_HANDLED;
+				}
+			}
+			*sensor_low_notify_num |= (status_low & 0x1);
+			chip->sensor[i].low_thr_triggered = true;
+		}
+
+		if ((status_high & 0x1) && (ctl & QPNP_BTM_Mn_MEAS_EN) &&
+					(ctl & QPNP_BTM_Mn_HIGH_THR_INT_EN)) {
+			/* Mask the corresponding high threshold interrupt en */
+			if (!chip->sensor[i].thermal_node) {
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(i),
+					QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+				if (rc < 0) {
+					pr_err("high thr_int en failed\n");
+					return IRQ_HANDLED;
+				}
+			} else {
+			/*
+			 * Uses the thermal sysfs registered device to disable
+			 * the corresponding high voltage threshold which
+			 * is triggered by low temp
+			 */
+				pr_debug("thermal node with mask:%x\n", i);
+				rc = qpnp_adc_tm_activate_trip_type(
+					chip->sensor[i].tz_dev,
+					ADC_TM_TRIP_LOW_COOL,
+					THERMAL_TRIP_ACTIVATION_DISABLED);
+				if (rc < 0) {
+					pr_err("notify error:%d\n", i);
+					return IRQ_HANDLED;
+				}
+			}
+			*sensor_high_notify_num |= (status_high & 0x1);
+			chip->sensor[i].high_thr_triggered = true;
+		}
+	}
+
+	return rc;
+}
+
+static irqreturn_t qpnp_adc_tm_rc_thr_isr(int irq, void *data)
+{
+	struct qpnp_adc_tm_chip *chip = data;
+	u8 status_low = 0, status_high = 0;
+	int rc = 0, sensor_low_notify_num = 0, i = 0;
+	int sensor_high_notify_num = 0;
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW,
+						&status_low, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status low failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (status_low)
+		chip->th_info.adc_tm_low_enable = status_low;
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH,
+							&status_high, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (status_high)
+		chip->th_info.adc_tm_high_enable = status_high;
+
+	while (i < chip->max_channels_available) {
+		rc = qpnp_adc_tm_rc_check_sensor_trip(chip,
+				status_low, status_high, i,
+				&sensor_low_notify_num,
+				&sensor_high_notify_num);
+		if (rc) {
+			pr_err("Sensor trip read failed\n");
+			return IRQ_HANDLED;
+		}
+		status_low >>= 1;
+		status_high >>= 1;
+		i++;
+	}
+
+	if (sensor_low_notify_num) {
+		atomic_inc(&chip->wq_cnt);
+		queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
+	}
+
+	if (sensor_high_notify_num) {
+		atomic_inc(&chip->wq_cnt);
+		queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int qpnp_adc_read_temp(struct thermal_zone_device *thermal,
+			     int *temp)
+{
+	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
+	struct qpnp_vadc_result result;
+	int rc = 0;
+
+	rc = qpnp_vadc_read(chip->vadc_dev,
+				adc_tm_sensor->vadc_channel_num, &result);
+	if (rc)
+		return rc;
+
+	*temp = result.physical;
+
+	return rc;
+}
+
+static struct thermal_zone_device_ops qpnp_adc_tm_thermal_ops = {
+	.get_temp = qpnp_adc_read_temp,
+	.get_mode = qpnp_adc_tm_get_mode,
+	.set_mode = qpnp_adc_tm_set_mode,
+	.get_trip_type = qpnp_adc_tm_get_trip_type,
+	.activate_trip_type = qpnp_adc_tm_activate_trip_type,
+	.get_trip_temp = qpnp_adc_tm_get_trip_temp,
+	.set_trip_temp = qpnp_adc_tm_set_trip_temp,
+};
+
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{
+	uint32_t channel, amux_prescaling, dt_index = 0, scale_type = 0;
+	int rc = 0, i = 0, version = 0;
+	bool chan_found = false;
+
+	if (qpnp_adc_tm_is_valid(chip)) {
+		pr_err("chip not valid\n");
+		return -ENODEV;
+	}
+
+	if (param->threshold_notification == NULL) {
+		pr_debug("No notification for high/low temp??\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	channel = param->channel;
+
+	if (channel == VSYS) {
+		version = qpnp_adc_get_revid_version(chip->dev);
+		if (version == QPNP_REV_ID_PM8950_1_0) {
+			pr_debug("Channel not supported\n");
+			rc = -EINVAL;
+			goto fail_unlock;
+		}
+	}
+
+	while (i < chip->max_channels_available) {
+		if (chip->adc->adc_channels[i].channel_num ==
+							channel) {
+			dt_index = i;
+			chan_found = true;
+			i++;
+		} else
+			i++;
+	}
+
+	if (!chan_found)  {
+		pr_err("not a valid ADC_TM channel\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	rc = qpnp_adc_tm_check_revision(chip,
+			chip->sensor[dt_index].btm_channel_num);
+	if (rc < 0)
+		goto fail_unlock;
+
+	scale_type = chip->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_RSCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+
+	amux_prescaling =
+		chip->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	pr_debug("channel:%d, scale_type:%d, dt_idx:%d",
+					channel, scale_type, dt_index);
+	param->gain_num = qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	param->gain_den = qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+	param->adc_tm_hc = chip->adc_tm_hc;
+	chip->adc->amux_prop->amux_channel = channel;
+	chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[dt_index].adc_decimation;
+	chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[dt_index].hw_settle_time;
+	chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[dt_index].fast_avg_setup;
+	chip->adc->amux_prop->mode_sel =
+		ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+	adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev, param,
+			&chip->adc->amux_prop->chan_prop->low_thr,
+			&chip->adc->amux_prop->chan_prop->high_thr);
+	qpnp_adc_tm_add_to_list(chip, dt_index, param,
+				chip->adc->amux_prop->chan_prop);
+	chip->adc->amux_prop->chan_prop->tm_channel_select =
+				chip->sensor[dt_index].btm_channel_num;
+	chip->adc->amux_prop->chan_prop->state_request =
+					param->state_request;
+	chip->adc->amux_prop->calib_type =
+			chip->adc->adc_channels[dt_index].calib_type;
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("adc-tm configure failed with %d\n", rc);
+			goto fail_unlock;
+		}
+	} else {
+		rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("adc-tm hc configure failed with %d\n", rc);
+			goto fail_unlock;
+		}
+	}
+
+	chip->sensor[dt_index].scale_type = scale_type;
+
+fail_unlock:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_channel_measure);
+
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{
+	uint32_t channel, dt_index = 0, btm_chan_num;
+	u8 sensor_mask = 0, mode_ctl = 0;
+	int rc = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	if (!chip->adc_tm_hc) {
+		/* Set measurement in single measurement mode */
+		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+		if (rc < 0) {
+			pr_err("adc-tm single mode select failed\n");
+			goto fail;
+		}
+	}
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc < 0) {
+		pr_err("adc-tm disable failed\n");
+		goto fail;
+	}
+
+	if (!chip->adc_tm_hc) {
+		/* Check if a conversion is in progress */
+		rc = qpnp_adc_tm_req_sts_check(chip);
+		if (rc < 0) {
+			pr_err("adc-tm req_sts check failed\n");
+			goto fail;
+		}
+	}
+
+	channel = param->channel;
+	while ((chip->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < chip->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= chip->max_channels_available) {
+		pr_err("not a valid ADC_TMN channel\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	btm_chan_num = chip->sensor[dt_index].btm_channel_num;
+
+	if (!chip->adc_tm_hc) {
+		sensor_mask = 1 << chip->sensor[dt_index].sensor_num;
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int write failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("high threshold int enable failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi measurement en failed\n");
+			goto fail;
+		}
+	} else {
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+					QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+		if (rc < 0) {
+			pr_err("high thr disable err:%d\n", btm_chan_num);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+					QPNP_BTM_Mn_LOW_THR_INT_EN, false);
+		if (rc < 0) {
+			pr_err("low thr disable err:%d\n", btm_chan_num);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+					QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi measurement disable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+	if (rc < 0)
+		pr_err("re-enabling measurement failed\n");
+
+fail:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_disable_chan_meas);
+
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+				struct qpnp_adc_tm_btm_param *param)
+{
+	param->channel = LR_MUX10_PU2_AMUX_USB_ID_LV;
+	return qpnp_adc_tm_channel_measure(chip, param);
+}
+EXPORT_SYMBOL(qpnp_adc_tm_usbid_configure);
+
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
+{
+	struct qpnp_adc_tm_btm_param param;
+
+	return qpnp_adc_tm_disable_chan_meas(chip, &param);
+}
+EXPORT_SYMBOL(qpnp_adc_tm_usbid_end);
+
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name)
+{
+	struct qpnp_adc_tm_chip *chip;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
+
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-adc_tm", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(chip, &qpnp_adc_tm_device_list, list)
+		if (chip->adc->pdev->dev.of_node == node)
+			return chip;
+
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qpnp_get_adc_tm);
+
+static int qpnp_adc_tm_initial_setup(struct qpnp_adc_tm_chip *chip)
+{
+	u8 thr_init = 0;
+	int rc = 0;
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+							thr_init, 1);
+	if (rc < 0) {
+		pr_err("high thr init failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+							thr_init, 1);
+	if (rc < 0) {
+		pr_err("low thr init failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+							thr_init, 1);
+	if (rc < 0) {
+		pr_err("multi meas en failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static const struct of_device_id qpnp_adc_tm_match_table[] = {
+	{	.compatible = "qcom,qpnp-adc-tm" },
+	{	.compatible = "qcom,qpnp-adc-tm-hc" },
+	{}
+};
+
+static int qpnp_adc_tm_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node, *child;
+	struct qpnp_adc_tm_chip *chip;
+	struct qpnp_adc_drv *adc_qpnp;
+	int32_t count_adc_channel_list = 0, rc, sen_idx = 0, i = 0;
+	bool thermal_node = false;
+	const struct of_device_id *id;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_node(qpnp_adc_tm_match_table, node);
+	if (id == NULL) {
+		pr_err("qpnp_adc_tm_match of_node prop not present\n");
+		return -ENODEV;
+	}
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_tm_chip) +
+			(count_adc_channel_list *
+			sizeof(struct qpnp_adc_tm_sensor)),
+				GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	adc_qpnp = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_drv),
+			GFP_KERNEL);
+	if (!adc_qpnp) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	chip->dev = &(pdev->dev);
+	chip->adc = adc_qpnp;
+	chip->adc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->adc->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	if (of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+		chip->adc_tm_hc = true;
+		chip->adc->adc_hc = true;
+	}
+
+	rc = qpnp_adc_get_devicetree_data(pdev, chip->adc);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to read device tree\n");
+		goto fail;
+	}
+	mutex_init(&chip->adc->adc_lock);
+
+	/* Register the ADC peripheral interrupt */
+	if (!chip->adc_tm_hc) {
+		chip->adc->adc_high_thr_irq = platform_get_irq_byname(pdev,
+						"high-thr-en-set");
+		if (chip->adc->adc_high_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto fail;
+		}
+
+		chip->adc->adc_low_thr_irq = platform_get_irq_byname(pdev,
+						"low-thr-en-set");
+		if (chip->adc->adc_low_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto fail;
+		}
+	}
+
+	chip->vadc_dev = qpnp_get_vadc(&pdev->dev, "adc_tm");
+	if (IS_ERR(chip->vadc_dev)) {
+		rc = PTR_ERR(chip->vadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("vadc property missing, rc=%d\n", rc);
+		goto fail;
+	}
+
+	chip->adc_tm_recalib_check = of_property_read_bool(node,
+				"qcom,adc-tm-recalib-check");
+
+	for_each_child_of_node(node, child) {
+		char name[25];
+		int btm_channel_num, timer_select = 0;
+
+		rc = of_property_read_u32(child,
+				"qcom,btm-channel-number", &btm_channel_num);
+		if (rc) {
+			pr_err("Invalid btm channel number\n");
+			goto fail;
+		}
+		rc = of_property_read_u32(child,
+				"qcom,meas-interval-timer-idx", &timer_select);
+		if (rc) {
+			pr_debug("Default to timer2 with interval of 1 sec\n");
+			chip->sensor[sen_idx].timer_select =
+							ADC_MEAS_TIMER_SELECT2;
+			chip->sensor[sen_idx].meas_interval =
+							ADC_MEAS2_INTERVAL_1S;
+		} else {
+			if (timer_select >= ADC_MEAS_TIMER_NUM) {
+				pr_err("Invalid timer selection number\n");
+				goto fail;
+			}
+			chip->sensor[sen_idx].timer_select = timer_select;
+			if (timer_select == ADC_MEAS_TIMER_SELECT1)
+				chip->sensor[sen_idx].meas_interval =
+						ADC_MEAS1_INTERVAL_3P9MS;
+			else if (timer_select == ADC_MEAS_TIMER_SELECT3)
+				chip->sensor[sen_idx].meas_interval =
+						ADC_MEAS3_INTERVAL_4S;
+			else if (timer_select == ADC_MEAS_TIMER_SELECT2)
+				chip->sensor[sen_idx].meas_interval =
+						ADC_MEAS2_INTERVAL_1S;
+		}
+
+		chip->sensor[sen_idx].btm_channel_num = btm_channel_num;
+		chip->sensor[sen_idx].vadc_channel_num =
+				chip->adc->adc_channels[sen_idx].channel_num;
+		chip->sensor[sen_idx].sensor_num = sen_idx;
+		chip->sensor[sen_idx].chip = chip;
+		pr_debug("btm_chan:%x, vadc_chan:%x\n", btm_channel_num,
+			chip->adc->adc_channels[sen_idx].channel_num);
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node) {
+			/* Register with the thermal zone */
+			pr_debug("thermal node%x\n", btm_channel_num);
+			chip->sensor[sen_idx].mode = THERMAL_DEVICE_DISABLED;
+			chip->sensor[sen_idx].thermal_node = true;
+			snprintf(name, sizeof(name), "%s",
+				chip->adc->adc_channels[sen_idx].name);
+			chip->sensor[sen_idx].meas_interval =
+				QPNP_ADC_TM_MEAS_INTERVAL;
+			chip->sensor[sen_idx].low_thr =
+						QPNP_ADC_TM_M0_LOW_THR;
+			chip->sensor[sen_idx].high_thr =
+						QPNP_ADC_TM_M0_HIGH_THR;
+			chip->sensor[sen_idx].tz_dev =
+				thermal_zone_device_register(name,
+				ADC_TM_TRIP_NUM, ADC_TM_WRITABLE_TRIPS_MASK,
+				&chip->sensor[sen_idx],
+				&qpnp_adc_tm_thermal_ops, NULL, 0, 0);
+			if (IS_ERR(chip->sensor[sen_idx].tz_dev))
+				pr_err("thermal device register failed.\n");
+		}
+		chip->sensor[sen_idx].req_wq = alloc_workqueue(
+				"qpnp_adc_notify_wq", WQ_HIGHPRI, 0);
+		if (!chip->sensor[sen_idx].req_wq) {
+			pr_err("Requesting priority wq failed\n");
+			goto fail;
+		}
+		INIT_WORK(&chip->sensor[sen_idx].work, notify_adc_tm_fn);
+		INIT_LIST_HEAD(&chip->sensor[sen_idx].thr_list);
+		sen_idx++;
+	}
+	chip->max_channels_available = count_adc_channel_list;
+	chip->high_thr_wq = alloc_workqueue("qpnp_adc_tm_high_thr_wq",
+							WQ_HIGHPRI, 0);
+	if (!chip->high_thr_wq) {
+		pr_err("Requesting high thr priority wq failed\n");
+		goto fail;
+	}
+	chip->low_thr_wq = alloc_workqueue("qpnp_adc_tm_low_thr_wq",
+							WQ_HIGHPRI, 0);
+	if (!chip->low_thr_wq) {
+		pr_err("Requesting low thr priority wq failed\n");
+		goto fail;
+	}
+	chip->thr_wq = alloc_workqueue("qpnp_adc_tm_thr_wq",
+						WQ_HIGHPRI, 0);
+	if (!chip->thr_wq) {
+		pr_err("Requesting thr priority wq failed\n");
+		goto fail;
+	}
+
+	INIT_WORK(&chip->trigger_high_thr_work, qpnp_adc_tm_high_thr_work);
+	INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
+	atomic_set(&chip->wq_cnt, 0);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_initial_setup(chip);
+		if (rc)
+			goto fail;
+
+		rc = devm_request_irq(&pdev->dev, chip->adc->adc_high_thr_irq,
+				qpnp_adc_tm_high_thr_isr,
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", chip);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto fail;
+		} else {
+			enable_irq_wake(chip->adc->adc_high_thr_irq);
+		}
+
+		rc = devm_request_irq(&pdev->dev, chip->adc->adc_low_thr_irq,
+					qpnp_adc_tm_low_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", chip);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto fail;
+		} else {
+			enable_irq_wake(chip->adc->adc_low_thr_irq);
+		}
+	} else {
+		rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
+				qpnp_adc_tm_rc_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
+		if (rc)
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+		else
+			enable_irq_wake(chip->adc->adc_irq_eoc);
+	}
+
+	chip->adc_vote_enable = false;
+	dev_set_drvdata(&pdev->dev, chip);
+	list_add(&chip->list, &qpnp_adc_tm_device_list);
+
+	pr_debug("OK\n");
+	return 0;
+fail:
+	for_each_child_of_node(node, child) {
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node) {
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
+			if (chip->sensor[i].req_wq)
+				destroy_workqueue(chip->sensor[sen_idx].req_wq);
+		}
+	}
+	if (chip->high_thr_wq)
+		destroy_workqueue(chip->high_thr_wq);
+	if (chip->low_thr_wq)
+		destroy_workqueue(chip->low_thr_wq);
+	dev_set_drvdata(&pdev->dev, NULL);
+	return rc;
+}
+
+static int qpnp_adc_tm_remove(struct platform_device *pdev)
+{
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
+	struct device_node *node = pdev->dev.of_node, *child;
+	bool thermal_node = false;
+	int i = 0;
+
+	for_each_child_of_node(node, child) {
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node) {
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
+			if (chip->sensor[i].req_wq)
+				destroy_workqueue(chip->sensor[i].req_wq);
+		}
+		i++;
+	}
+
+	if (chip->high_thr_wq)
+		destroy_workqueue(chip->high_thr_wq);
+	if (chip->low_thr_wq)
+		destroy_workqueue(chip->low_thr_wq);
+	if (chip->adc->hkadc_ldo && chip->adc->hkadc_ldo_ok)
+		qpnp_adc_free_voltage_resource(chip->adc);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static void qpnp_adc_tm_shutdown(struct platform_device *pdev)
+{
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
+	int rc = 0;
+	u8 reg_val = 0, status1 = 0, en_ctl1 = 0;
+
+	/* Set measurement in single measurement mode */
+	reg_val = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+	rc = qpnp_adc_tm_mode_select(chip, reg_val);
+	if (rc < 0)
+		pr_err("adc-tm single mode select failed\n");
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc < 0)
+		pr_err("adc-tm disable failed\n");
+
+	/* Check if a conversion is in progress */
+	rc = qpnp_adc_tm_req_sts_check(chip);
+	if (rc < 0)
+		pr_err("adc-tm req_sts check failed\n");
+
+	/* Disable multimeasurement */
+	reg_val = 0;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, reg_val, 1);
+	if (rc < 0)
+		pr_err("adc-tm multi-measurement mode disable failed\n");
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
+	if (rc < 0)
+		pr_err("adc-tm status1 read failed\n");
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_EN_CTL1, &en_ctl1, 1);
+	if (rc < 0)
+		pr_err("adc-tm en_ctl1 read failed\n");
+
+	pr_debug("adc-tm status1=0%x, en_ctl1=0x%x\n", status1, en_ctl1);
+}
+
+static int qpnp_adc_tm_suspend_noirq(struct device *dev)
+{
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(dev);
+
+	if (atomic_read(&chip->wq_cnt) != 0) {
+		pr_err(
+			"Aborting suspend, adc_tm notification running while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops qpnp_adc_tm_pm_ops = {
+	.suspend_noirq	= qpnp_adc_tm_suspend_noirq,
+};
+
+static struct platform_driver qpnp_adc_tm_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-adc-tm",
+		.of_match_table	= qpnp_adc_tm_match_table,
+		.pm		= &qpnp_adc_tm_pm_ops,
+	},
+	.probe		= qpnp_adc_tm_probe,
+	.remove		= qpnp_adc_tm_remove,
+	.shutdown	= qpnp_adc_tm_shutdown,
+};
+
+static int __init qpnp_adc_tm_init(void)
+{
+	return platform_driver_register(&qpnp_adc_tm_driver);
+}
+module_init(qpnp_adc_tm_init);
+
+static void __exit qpnp_adc_tm_exit(void)
+{
+	platform_driver_unregister(&qpnp_adc_tm_driver);
+}
+module_exit(qpnp_adc_tm_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC ADC Threshold Monitoring driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 644e978..001c807 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -105,6 +105,8 @@
 
 source "drivers/usb/isp1760/Kconfig"
 
+source "drivers/usb/pd/Kconfig"
+
 comment "USB port drivers"
 
 if USB
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index dca7856..7a11362 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -61,3 +61,5 @@
 obj-$(CONFIG_USB_COMMON)	+= common/
 
 obj-$(CONFIG_USBIP_CORE)	+= usbip/
+
+obj-$(CONFIG_USB_PD)            += pd/
diff --git a/drivers/usb/pd/Kconfig b/drivers/usb/pd/Kconfig
new file mode 100644
index 0000000..f3facc6
--- /dev/null
+++ b/drivers/usb/pd/Kconfig
@@ -0,0 +1,29 @@
+#
+# USB Power Delivery driver configuration
+#
+menu "USB Power Delivery"
+
+config USB_PD
+	def_bool n
+
+config USB_PD_POLICY
+	tristate "USB Power Delivery Protocol and Policy Engine"
+	depends on EXTCON
+	depends on DUAL_ROLE_USB_INTF
+	select USB_PD
+	help
+          Say Y here to enable USB PD protocol and policy engine.
+	  This driver provides a class that implements the upper
+	  layers of the USB Power Delivery stack. It requires a
+	  PD PHY driver in order to transmit and receive PD
+	  messages on its behalf.
+
+config QPNP_USB_PDPHY
+	tristate "QPNP USB Power Delivery PHY"
+	depends on SPMI
+	help
+          Say Y here to enable QPNP USB PD PHY peripheral driver
+	  which communicates over the SPMI bus. The is used to handle
+	  the PHY layer communication of the Power Delivery stack.
+
+endmenu
diff --git a/drivers/usb/pd/Makefile b/drivers/usb/pd/Makefile
new file mode 100644
index 0000000..f487070
--- /dev/null
+++ b/drivers/usb/pd/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for USB Power Delivery drivers
+#
+
+obj-$(CONFIG_USB_PD_POLICY)	+= policy_engine.o
+obj-$(CONFIG_QPNP_USB_PDPHY)	+= qpnp-pdphy.o
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
new file mode 100644
index 0000000..2e731af
--- /dev/null
+++ b/drivers/usb/pd/policy_engine.c
@@ -0,0 +1,3250 @@
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/extcon.h>
+#include <linux/usb/class-dual-role.h>
+#include <linux/usb/usbpd.h>
+#include "usbpd.h"
+
+enum usbpd_state {
+	PE_UNKNOWN,
+	PE_ERROR_RECOVERY,
+	PE_SRC_DISABLED,
+	PE_SRC_STARTUP,
+	PE_SRC_SEND_CAPABILITIES,
+	PE_SRC_SEND_CAPABILITIES_WAIT, /* substate to wait for Request */
+	PE_SRC_NEGOTIATE_CAPABILITY,
+	PE_SRC_TRANSITION_SUPPLY,
+	PE_SRC_READY,
+	PE_SRC_HARD_RESET,
+	PE_SRC_SOFT_RESET,
+	PE_SRC_SEND_SOFT_RESET,
+	PE_SRC_DISCOVERY,
+	PE_SRC_TRANSITION_TO_DEFAULT,
+	PE_SNK_STARTUP,
+	PE_SNK_DISCOVERY,
+	PE_SNK_WAIT_FOR_CAPABILITIES,
+	PE_SNK_EVALUATE_CAPABILITY,
+	PE_SNK_SELECT_CAPABILITY,
+	PE_SNK_TRANSITION_SINK,
+	PE_SNK_READY,
+	PE_SNK_HARD_RESET,
+	PE_SNK_SOFT_RESET,
+	PE_SNK_SEND_SOFT_RESET,
+	PE_SNK_TRANSITION_TO_DEFAULT,
+	PE_DRS_SEND_DR_SWAP,
+	PE_PRS_SNK_SRC_SEND_SWAP,
+	PE_PRS_SNK_SRC_TRANSITION_TO_OFF,
+	PE_PRS_SNK_SRC_SOURCE_ON,
+	PE_PRS_SRC_SNK_SEND_SWAP,
+	PE_PRS_SRC_SNK_TRANSITION_TO_OFF,
+	PE_PRS_SRC_SNK_WAIT_SOURCE_ON,
+	PE_VCS_WAIT_FOR_VCONN,
+};
+
+static const char * const usbpd_state_strings[] = {
+	"UNKNOWN",
+	"ERROR_RECOVERY",
+	"SRC_Disabled",
+	"SRC_Startup",
+	"SRC_Send_Capabilities",
+	"SRC_Send_Capabilities (Wait for Request)",
+	"SRC_Negotiate_Capability",
+	"SRC_Transition_Supply",
+	"SRC_Ready",
+	"SRC_Hard_Reset",
+	"SRC_Soft_Reset",
+	"SRC_Send_Soft_Reset",
+	"SRC_Discovery",
+	"SRC_Transition_to_default",
+	"SNK_Startup",
+	"SNK_Discovery",
+	"SNK_Wait_for_Capabilities",
+	"SNK_Evaluate_Capability",
+	"SNK_Select_Capability",
+	"SNK_Transition_Sink",
+	"SNK_Ready",
+	"SNK_Hard_Reset",
+	"SNK_Soft_Reset",
+	"SNK_Send_Soft_Reset",
+	"SNK_Transition_to_default",
+	"DRS_Send_DR_Swap",
+	"PRS_SNK_SRC_Send_Swap",
+	"PRS_SNK_SRC_Transition_to_off",
+	"PRS_SNK_SRC_Source_on",
+	"PRS_SRC_SNK_Send_Swap",
+	"PRS_SRC_SNK_Transition_to_off",
+	"PRS_SRC_SNK_Wait_Source_on",
+	"VCS_Wait_for_VCONN",
+};
+
+enum usbpd_control_msg_type {
+	MSG_RESERVED = 0,
+	MSG_GOODCRC,
+	MSG_GOTOMIN,
+	MSG_ACCEPT,
+	MSG_REJECT,
+	MSG_PING,
+	MSG_PS_RDY,
+	MSG_GET_SOURCE_CAP,
+	MSG_GET_SINK_CAP,
+	MSG_DR_SWAP,
+	MSG_PR_SWAP,
+	MSG_VCONN_SWAP,
+	MSG_WAIT,
+	MSG_SOFT_RESET,
+};
+
+enum usbpd_data_msg_type {
+	MSG_SOURCE_CAPABILITIES = 1,
+	MSG_REQUEST,
+	MSG_BIST,
+	MSG_SINK_CAPABILITIES,
+	MSG_VDM = 0xF,
+};
+
+enum vdm_state {
+	VDM_NONE,
+	DISCOVERED_ID,
+	DISCOVERED_SVIDS,
+	DISCOVERED_MODES,
+	MODE_ENTERED,
+	MODE_EXITED,
+};
+
+static void *usbpd_ipc_log;
+#define usbpd_dbg(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_dbg(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define usbpd_info(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_info(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define usbpd_warn(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_warn(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define usbpd_err(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_err(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define NUM_LOG_PAGES		10
+
+/* Timeouts (in ms) */
+#define ERROR_RECOVERY_TIME	25
+#define SENDER_RESPONSE_TIME	26
+#define SINK_WAIT_CAP_TIME	500
+#define PS_TRANSITION_TIME	450
+#define SRC_CAP_TIME		120
+#define SRC_TRANSITION_TIME	25
+#define SRC_RECOVER_TIME	750
+#define PS_HARD_RESET_TIME	25
+#define PS_SOURCE_ON		400
+#define PS_SOURCE_OFF		750
+#define SWAP_SOURCE_START_TIME	20
+#define VDM_BUSY_TIME		50
+#define VCONN_ON_TIME		100
+
+/* tPSHardReset + tSafe0V */
+#define SNK_HARD_RESET_VBUS_OFF_TIME	(35 + 650)
+
+/* tSrcRecover + tSrcTurnOn */
+#define SNK_HARD_RESET_VBUS_ON_TIME	(1000 + 275)
+
+#define PD_CAPS_COUNT		50
+
+#define PD_MAX_MSG_ID		7
+
+#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
+	(((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
+	 ((pr) << 8) | ((id) << 9) | ((cnt) << 12))
+#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
+#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0xF)
+#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+
+#define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
+		(((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
+		 ((usb_comm) << 25) | ((no_usb_susp) << 24) | \
+		 ((curr1) << 10) | (curr2))
+
+#define PD_RDO_AUGMENTED(obj, mismatch, usb_comm, no_usb_susp, volt, curr) \
+		(((obj) << 28) | ((mismatch) << 26) | ((usb_comm) << 25) | \
+		 ((no_usb_susp) << 24) | ((volt) << 9) | (curr))
+
+#define PD_RDO_OBJ_POS(rdo)		((rdo) >> 28 & 7)
+#define PD_RDO_GIVEBACK(rdo)		((rdo) >> 27 & 1)
+#define PD_RDO_MISMATCH(rdo)		((rdo) >> 26 & 1)
+#define PD_RDO_USB_COMM(rdo)		((rdo) >> 25 & 1)
+#define PD_RDO_NO_USB_SUSP(rdo)		((rdo) >> 24 & 1)
+#define PD_RDO_FIXED_CURR(rdo)		((rdo) >> 10 & 0x3FF)
+#define PD_RDO_FIXED_CURR_MINMAX(rdo)	((rdo) & 0x3FF)
+#define PD_RDO_PROG_VOLTAGE(rdo)	((rdo) >> 9 & 0x7FF)
+#define PD_RDO_PROG_CURR(rdo)		((rdo) & 0x7F)
+
+#define PD_SRC_PDO_TYPE(pdo)		(((pdo) >> 30) & 3)
+#define PD_SRC_PDO_TYPE_FIXED		0
+#define PD_SRC_PDO_TYPE_BATTERY		1
+#define PD_SRC_PDO_TYPE_VARIABLE	2
+#define PD_SRC_PDO_TYPE_AUGMENTED	3
+
+#define PD_SRC_PDO_FIXED_PR_SWAP(pdo)		(((pdo) >> 29) & 1)
+#define PD_SRC_PDO_FIXED_USB_SUSP(pdo)		(((pdo) >> 28) & 1)
+#define PD_SRC_PDO_FIXED_EXT_POWERED(pdo)	(((pdo) >> 27) & 1)
+#define PD_SRC_PDO_FIXED_USB_COMM(pdo)		(((pdo) >> 26) & 1)
+#define PD_SRC_PDO_FIXED_DR_SWAP(pdo)		(((pdo) >> 25) & 1)
+#define PD_SRC_PDO_FIXED_PEAK_CURR(pdo)		(((pdo) >> 20) & 3)
+#define PD_SRC_PDO_FIXED_VOLTAGE(pdo)		(((pdo) >> 10) & 0x3FF)
+#define PD_SRC_PDO_FIXED_MAX_CURR(pdo)		((pdo) & 0x3FF)
+
+#define PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo)	(((pdo) >> 20) & 0x3FF)
+#define PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo)	(((pdo) >> 10) & 0x3FF)
+#define PD_SRC_PDO_VAR_BATT_MAX(pdo)		((pdo) & 0x3FF)
+
+#define PD_APDO_PPS(pdo)			(((pdo) >> 28) & 3)
+#define PD_APDO_MAX_VOLT(pdo)			(((pdo) >> 17) & 0xFF)
+#define PD_APDO_MIN_VOLT(pdo)			(((pdo) >> 8) & 0xFF)
+#define PD_APDO_MAX_CURR(pdo)			((pdo) & 0x7F)
+
+/* Vendor Defined Messages */
+#define MAX_CRC_RECEIVE_TIME	9 /* ~(2 * tReceive_max(1.1ms) * # retry 4) */
+#define MAX_VDM_RESPONSE_TIME	60 /* 2 * tVDMSenderResponse_max(30ms) */
+#define MAX_VDM_BUSY_TIME	100 /* 2 * tVDMBusy (50ms) */
+
+/* VDM header is the first 32-bit object following the 16-bit PD header */
+#define VDM_HDR_SVID(hdr)	((hdr) >> 16)
+#define VDM_IS_SVDM(hdr)	((hdr) & 0x8000)
+#define SVDM_HDR_OBJ_POS(hdr)	(((hdr) >> 8) & 0x7)
+#define SVDM_HDR_CMD_TYPE(hdr)	(((hdr) >> 6) & 0x3)
+#define SVDM_HDR_CMD(hdr)	((hdr) & 0x1f)
+
+#define SVDM_HDR(svid, ver, obj, cmd_type, cmd) \
+	(((svid) << 16) | (1 << 15) | ((ver) << 13) \
+	| ((obj) << 8) | ((cmd_type) << 6) | (cmd))
+
+/* discover id response vdo bit fields */
+#define ID_HDR_USB_HOST		BIT(31)
+#define ID_HDR_USB_DEVICE	BIT(30)
+#define ID_HDR_MODAL_OPR	BIT(26)
+#define ID_HDR_PRODUCT_TYPE(n)	((n) >> 27)
+#define ID_HDR_PRODUCT_PER_MASK	(2 << 27)
+#define ID_HDR_PRODUCT_HUB	1
+#define ID_HDR_PRODUCT_PER	2
+#define ID_HDR_PRODUCT_AMA	5
+#define ID_HDR_VID		0x05c6 /* qcom */
+#define PROD_VDO_PID		0x0a00 /* TBD */
+
+static bool check_vsafe0v = true;
+module_param(check_vsafe0v, bool, 0600);
+
+static int min_sink_current = 900;
+module_param(min_sink_current, int, 0600);
+
+static const u32 default_src_caps[] = { 0x36019096 };	/* VSafe5V @ 1.5A */
+static const u32 default_snk_caps[] = { 0x2601905A };	/* 5V @ 900mA */
+
+struct vdm_tx {
+	u32			data[7];
+	int			size;
+};
+
+struct rx_msg {
+	u8			type;
+	u8			len;
+	u32			payload[7];
+	struct list_head	entry;
+};
+
+#define IS_DATA(m, t) ((m) && ((m)->len) && ((m)->type == (t)))
+#define IS_CTRL(m, t) ((m) && !((m)->len) && ((m)->type == (t)))
+
+struct usbpd {
+	struct device		dev;
+	struct workqueue_struct	*wq;
+	struct work_struct	sm_work;
+	struct hrtimer		timer;
+	bool			sm_queued;
+
+	struct extcon_dev	*extcon;
+
+	enum usbpd_state	current_state;
+	bool			hard_reset_recvd;
+	struct list_head	rx_q;
+	spinlock_t		rx_lock;
+
+	u32			received_pdos[7];
+	int			src_cap_id;
+	u8			selected_pdo;
+	u8			requested_pdo;
+	u32			rdo;	/* can be either source or sink */
+	int			current_voltage;	/* uV */
+	int			requested_voltage;	/* uV */
+	int			requested_current;	/* mA */
+	bool			pd_connected;
+	bool			in_explicit_contract;
+	bool			peer_usb_comm;
+	bool			peer_pr_swap;
+	bool			peer_dr_swap;
+
+	struct power_supply	*usb_psy;
+	struct notifier_block	psy_nb;
+
+	enum power_supply_typec_mode typec_mode;
+	enum power_supply_type	psy_type;
+	enum power_supply_typec_power_role forced_pr;
+	bool			vbus_present;
+
+	enum pd_spec_rev	spec_rev;
+	enum data_role		current_dr;
+	enum power_role		current_pr;
+	bool			in_pr_swap;
+	bool			pd_phy_opened;
+	struct completion	swap_complete;
+
+	struct dual_role_phy_instance	*dual_role;
+	struct dual_role_phy_desc	dr_desc;
+	bool			send_pr_swap;
+	bool			send_dr_swap;
+
+	struct regulator	*vbus;
+	struct regulator	*vconn;
+	bool			vbus_enabled;
+	bool			vconn_enabled;
+	bool			vconn_is_external;
+
+	u8			tx_msgid;
+	u8			rx_msgid;
+	int			caps_count;
+	int			hard_reset_count;
+
+	enum vdm_state		vdm_state;
+	u16			*discovered_svids;
+	int			num_svids;
+	struct vdm_tx		*vdm_tx;
+	struct vdm_tx		*vdm_tx_retry;
+	struct list_head	svid_handlers;
+
+	struct list_head	instance;
+};
+
+static LIST_HEAD(_usbpd);	/* useful for debugging */
+
+static const unsigned int usbpd_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_USB_CC,
+	EXTCON_USB_SPEED,
+	EXTCON_NONE,
+};
+
+/* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
+static const u32 usbpd_extcon_exclusive[] = {0x3, 0};
+
+enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
+{
+	int ret;
+	union power_supply_propval val;
+
+	ret = power_supply_get_property(pd->usb_psy,
+		POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, &val);
+	if (ret)
+		return ORIENTATION_NONE;
+
+	return val.intval;
+}
+EXPORT_SYMBOL(usbpd_get_plug_orientation);
+
+static inline void stop_usb_host(struct usbpd *pd)
+{
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 0);
+}
+
+static inline void start_usb_host(struct usbpd *pd, bool ss)
+{
+	enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+			cc == ORIENTATION_CC2);
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, ss);
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 1);
+}
+
+static inline void stop_usb_peripheral(struct usbpd *pd)
+{
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+}
+
+static inline void start_usb_peripheral(struct usbpd *pd)
+{
+	enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+			cc == ORIENTATION_CC2);
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, 1);
+	extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
+}
+
+static int set_power_role(struct usbpd *pd, enum power_role pr)
+{
+	union power_supply_propval val = {0};
+
+	switch (pr) {
+	case PR_NONE:
+		val.intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		break;
+	case PR_SINK:
+		val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+		break;
+	case PR_SRC:
+		val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+		break;
+	}
+
+	return power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+}
+
+static struct usbpd_svid_handler *find_svid_handler(struct usbpd *pd, u16 svid)
+{
+	struct usbpd_svid_handler *handler;
+
+	list_for_each_entry(handler, &pd->svid_handlers, entry)
+		if (svid == handler->svid)
+			return handler;
+
+	return NULL;
+}
+
+/* Reset protocol layer */
+static inline void pd_reset_protocol(struct usbpd *pd)
+{
+	/*
+	 * first Rx ID should be 0; set this to a sentinel of -1 so that in
+	 * phy_msg_received() we can check if we had seen it before.
+	 */
+	pd->rx_msgid = -1;
+	pd->tx_msgid = 0;
+}
+
+static int pd_send_msg(struct usbpd *pd, u8 hdr_type, const u32 *data,
+		size_t num_data, enum pd_msg_type type)
+{
+	int ret;
+	u16 hdr;
+
+	hdr = PD_MSG_HDR(hdr_type, pd->current_dr, pd->current_pr,
+			pd->tx_msgid, num_data, pd->spec_rev);
+	ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), type, 15);
+	/* TODO figure out timeout. based on tReceive=1.1ms x nRetryCount? */
+
+	/* MessageID incremented regardless of Tx error */
+	pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+
+	if (ret < 0)
+		return ret;
+	else if (ret != num_data * sizeof(u32))
+		return -EIO;
+	return 0;
+}
+
+static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
+{
+	int curr;
+	int max_current;
+	bool mismatch = false;
+	u8 type;
+	u32 pdo = pd->received_pdos[pdo_pos - 1];
+
+	type = PD_SRC_PDO_TYPE(pdo);
+	if (type == PD_SRC_PDO_TYPE_FIXED) {
+		curr = max_current = PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10;
+
+		/*
+		 * Check if the PDO has enough current, otherwise set the
+		 * Capability Mismatch flag
+		 */
+		if (curr < min_sink_current) {
+			mismatch = true;
+			max_current = min_sink_current;
+		}
+
+		pd->requested_voltage =
+			PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50 * 1000;
+		pd->rdo = PD_RDO_FIXED(pdo_pos, 0, mismatch, 1, 1, curr / 10,
+				max_current / 10);
+	} else if (type == PD_SRC_PDO_TYPE_AUGMENTED) {
+		if ((uv / 100000) > PD_APDO_MAX_VOLT(pdo) ||
+			(uv / 100000) < PD_APDO_MIN_VOLT(pdo) ||
+			(ua / 50000) > PD_APDO_MAX_CURR(pdo) || (ua < 0)) {
+			usbpd_err(&pd->dev, "uv (%d) and ua (%d) out of range of APDO\n",
+					uv, ua);
+			return -EINVAL;
+		}
+
+		curr = ua / 1000;
+		pd->requested_voltage = uv;
+		pd->rdo = PD_RDO_AUGMENTED(pdo_pos, mismatch, 1, 1,
+				uv / 20000, ua / 50000);
+	} else {
+		usbpd_err(&pd->dev, "Only Fixed or Programmable PDOs supported\n");
+		return -ENOTSUPP;
+	}
+
+	/* Can't sink more than 5V if VCONN is sourced from the VBUS input */
+	if (pd->vconn_enabled && !pd->vconn_is_external &&
+			pd->requested_voltage > 5000000)
+		return -ENOTSUPP;
+
+	pd->requested_current = curr;
+	pd->requested_pdo = pdo_pos;
+
+	return 0;
+}
+
+static int pd_eval_src_caps(struct usbpd *pd)
+{
+	union power_supply_propval val;
+	u32 first_pdo = pd->received_pdos[0];
+
+	if (PD_SRC_PDO_TYPE(first_pdo) != PD_SRC_PDO_TYPE_FIXED) {
+		usbpd_err(&pd->dev, "First src_cap invalid! %08x\n", first_pdo);
+		return -EINVAL;
+	}
+
+	pd->peer_usb_comm = PD_SRC_PDO_FIXED_USB_COMM(first_pdo);
+	pd->peer_pr_swap = PD_SRC_PDO_FIXED_PR_SWAP(first_pdo);
+	pd->peer_dr_swap = PD_SRC_PDO_FIXED_DR_SWAP(first_pdo);
+
+	val.intval = PD_SRC_PDO_FIXED_USB_SUSP(first_pdo);
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
+
+	/* Select the first PDO (vSafe5V) immediately. */
+	pd_select_pdo(pd, 1, 0, 0);
+
+	return 0;
+}
+
+static void pd_send_hard_reset(struct usbpd *pd)
+{
+	usbpd_dbg(&pd->dev, "send hard reset");
+
+	/* Force CC logic to source/sink to keep Rp/Rd unchanged */
+	set_power_role(pd, pd->current_pr);
+	pd->hard_reset_count++;
+	pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
+	pd->in_pr_swap = false;
+}
+
+static void kick_sm(struct usbpd *pd, int ms)
+{
+	pm_stay_awake(&pd->dev);
+	pd->sm_queued = true;
+
+	if (ms)
+		hrtimer_start(&pd->timer, ms_to_ktime(ms), HRTIMER_MODE_REL);
+	else
+		queue_work(pd->wq, &pd->sm_work);
+}
+
+static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
+{
+	if (type != HARD_RESET_SIG) {
+		usbpd_err(&pd->dev, "invalid signal (%d) received\n", type);
+		return;
+	}
+
+	usbpd_dbg(&pd->dev, "hard reset received\n");
+
+	/* Force CC logic to source/sink to keep Rp/Rd unchanged */
+	set_power_role(pd, pd->current_pr);
+	pd->hard_reset_recvd = true;
+	kick_sm(pd, 0);
+}
+
+static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
+		u8 *buf, size_t len)
+{
+	struct rx_msg *rx_msg;
+	unsigned long flags;
+	u16 header;
+
+	if (type != SOP_MSG) {
+		usbpd_err(&pd->dev, "invalid msg type (%d) received; only SOP supported\n",
+				type);
+		return;
+	}
+
+	if (len < 2) {
+		usbpd_err(&pd->dev, "invalid message received, len=%zd\n", len);
+		return;
+	}
+
+	header = *((u16 *)buf);
+	buf += sizeof(u16);
+	len -= sizeof(u16);
+
+	if (len % 4 != 0) {
+		usbpd_err(&pd->dev, "len=%zd not multiple of 4\n", len);
+		return;
+	}
+
+	/* if MSGID already seen, discard */
+	if (PD_MSG_HDR_ID(header) == pd->rx_msgid &&
+			PD_MSG_HDR_TYPE(header) != MSG_SOFT_RESET) {
+		usbpd_dbg(&pd->dev, "MessageID already seen, discarding\n");
+		return;
+	}
+
+	pd->rx_msgid = PD_MSG_HDR_ID(header);
+
+	/* discard Pings */
+	if (PD_MSG_HDR_TYPE(header) == MSG_PING && !len)
+		return;
+
+	/* check header's count field to see if it matches len */
+	if (PD_MSG_HDR_COUNT(header) != (len / 4)) {
+		usbpd_err(&pd->dev, "header count (%d) mismatch, len=%zd\n",
+				PD_MSG_HDR_COUNT(header), len);
+		return;
+	}
+
+	/* if spec rev differs (i.e. is older), update PHY */
+	if (PD_MSG_HDR_REV(header) < pd->spec_rev) {
+		pd->spec_rev = PD_MSG_HDR_REV(header);
+		pd_phy_update_spec_rev(pd->spec_rev);
+	}
+
+	rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
+	if (!rx_msg)
+		return;
+
+	rx_msg->type = PD_MSG_HDR_TYPE(header);
+	rx_msg->len = PD_MSG_HDR_COUNT(header);
+	memcpy(&rx_msg->payload, buf, min(len, sizeof(rx_msg->payload)));
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	list_add_tail(&rx_msg->entry, &pd->rx_q);
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+	usbpd_dbg(&pd->dev, "received message: type(%d) len(%d)\n",
+			rx_msg->type, rx_msg->len);
+
+	kick_sm(pd, 0);
+}
+
+static void phy_shutdown(struct usbpd *pd)
+{
+	usbpd_dbg(&pd->dev, "shutdown");
+}
+
+static enum hrtimer_restart pd_timeout(struct hrtimer *timer)
+{
+	struct usbpd *pd = container_of(timer, struct usbpd, timer);
+
+	usbpd_dbg(&pd->dev, "timeout");
+	queue_work(pd->wq, &pd->sm_work);
+
+	return HRTIMER_NORESTART;
+}
+
+/* Enters new state and executes actions on entry */
+static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
+{
+	struct pd_phy_params phy_params = {
+		.signal_cb		= phy_sig_received,
+		.msg_rx_cb		= phy_msg_received,
+		.shutdown_cb		= phy_shutdown,
+		.frame_filter_val	= FRAME_FILTER_EN_SOP |
+					  FRAME_FILTER_EN_HARD_RESET,
+		.spec_rev		= USBPD_REV_20,
+	};
+	union power_supply_propval val = {0};
+	unsigned long flags;
+	int ret;
+
+	usbpd_dbg(&pd->dev, "%s -> %s\n",
+			usbpd_state_strings[pd->current_state],
+			usbpd_state_strings[next_state]);
+
+	pd->current_state = next_state;
+
+	switch (next_state) {
+	case PE_ERROR_RECOVERY: /* perform hard disconnect/reconnect */
+		pd->in_pr_swap = false;
+		pd->current_pr = PR_NONE;
+		set_power_role(pd, PR_NONE);
+		pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
+		kick_sm(pd, 0);
+		break;
+
+	/* Source states */
+	case PE_SRC_STARTUP:
+		if (pd->current_dr == DR_NONE) {
+			pd->current_dr = DR_DFP;
+			/*
+			 * Defer starting USB host mode until PE_SRC_READY or
+			 * when PE_SRC_SEND_CAPABILITIES fails
+			 */
+		}
+
+		dual_role_instance_changed(pd->dual_role);
+
+		/* Set CC back to DRP toggle for the next disconnect */
+		val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+
+		/* support only PD 2.0 as a source */
+		pd->spec_rev = USBPD_REV_20;
+		pd_reset_protocol(pd);
+
+		if (!pd->in_pr_swap) {
+			if (pd->pd_phy_opened) {
+				pd_phy_close();
+				pd->pd_phy_opened = false;
+			}
+
+			phy_params.data_role = pd->current_dr;
+			phy_params.power_role = pd->current_pr;
+			phy_params.spec_rev = pd->spec_rev;
+
+			ret = pd_phy_open(&phy_params);
+			if (ret) {
+				WARN_ON_ONCE(1);
+				usbpd_err(&pd->dev, "error opening PD PHY %d\n",
+						ret);
+				pd->current_state = PE_UNKNOWN;
+				return;
+			}
+
+			pd->pd_phy_opened = true;
+		} else {
+			pd_phy_update_spec_rev(pd->spec_rev);
+		}
+
+		pd->current_state = PE_SRC_SEND_CAPABILITIES;
+		if (pd->in_pr_swap) {
+			kick_sm(pd, SWAP_SOURCE_START_TIME);
+			pd->in_pr_swap = false;
+			break;
+		}
+
+		/* fall-through */
+
+	case PE_SRC_SEND_CAPABILITIES:
+		kick_sm(pd, 0);
+		break;
+
+	case PE_SRC_NEGOTIATE_CAPABILITY:
+		if (PD_RDO_OBJ_POS(pd->rdo) != 1 ||
+			PD_RDO_FIXED_CURR(pd->rdo) >
+				PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps) ||
+			PD_RDO_FIXED_CURR_MINMAX(pd->rdo) >
+				PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) {
+			/* send Reject */
+			ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Reject\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+
+			usbpd_err(&pd->dev, "Invalid request: %08x\n", pd->rdo);
+
+			if (pd->in_explicit_contract)
+				usbpd_set_state(pd, PE_SRC_READY);
+			else
+				/*
+				 * bypass PE_SRC_Capability_Response and
+				 * PE_SRC_Wait_New_Capabilities in this
+				 * implementation for simplicity.
+				 */
+				usbpd_set_state(pd, PE_SRC_SEND_CAPABILITIES);
+			break;
+		}
+
+		/* PE_SRC_TRANSITION_SUPPLY pseudo-state */
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending Accept\n");
+			usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+			break;
+		}
+
+		/* tSrcTransition required after ACCEPT */
+		usleep_range(SRC_TRANSITION_TIME * USEC_PER_MSEC,
+				(SRC_TRANSITION_TIME + 5) * USEC_PER_MSEC);
+
+		/*
+		 * Normally a voltage change should occur within tSrcReady
+		 * but since we only support VSafe5V there is nothing more to
+		 * prepare from the power supply so send PS_RDY right away.
+		 */
+		ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+			usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+			break;
+		}
+
+		usbpd_set_state(pd, PE_SRC_READY);
+		break;
+
+	case PE_SRC_READY:
+		pd->in_explicit_contract = true;
+		if (pd->current_dr == DR_DFP) {
+			/* don't start USB host until after SVDM discovery */
+			if (pd->vdm_state == VDM_NONE)
+				usbpd_send_svdm(pd, USBPD_SID,
+						USBPD_SVDM_DISCOVER_IDENTITY,
+						SVDM_CMD_TYPE_INITIATOR, 0,
+						NULL, 0);
+		}
+
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+		complete(&pd->swap_complete);
+		dual_role_instance_changed(pd->dual_role);
+		break;
+
+	case PE_SRC_HARD_RESET:
+	case PE_SNK_HARD_RESET:
+		/* hard reset may sleep; handle it in the workqueue */
+		kick_sm(pd, 0);
+		break;
+
+	case PE_SRC_SEND_SOFT_RESET:
+	case PE_SNK_SEND_SOFT_RESET:
+		pd_reset_protocol(pd);
+
+		ret = pd_send_msg(pd, MSG_SOFT_RESET, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending Soft Reset, do Hard Reset\n");
+			usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+					PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+			break;
+		}
+
+		/* wait for ACCEPT */
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+		break;
+
+	/* Sink states */
+	case PE_SNK_STARTUP:
+		if (pd->current_dr == DR_NONE || pd->current_dr == DR_UFP) {
+			pd->current_dr = DR_UFP;
+
+			if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
+				pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP)
+				start_usb_peripheral(pd);
+		}
+
+		dual_role_instance_changed(pd->dual_role);
+
+		ret = power_supply_get_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_ALLOWED, &val);
+		if (ret) {
+			usbpd_err(&pd->dev, "Unable to read USB PROP_PD_ALLOWED: %d\n",
+					ret);
+			break;
+		}
+
+		if (!val.intval)
+			break;
+
+		/*
+		 * support up to PD 3.0 as a sink; if source is 2.0,
+		 * phy_msg_received() will handle the downgrade.
+		 */
+		pd->spec_rev = USBPD_REV_30;
+		pd_reset_protocol(pd);
+
+		if (!pd->in_pr_swap) {
+			if (pd->pd_phy_opened) {
+				pd_phy_close();
+				pd->pd_phy_opened = false;
+			}
+
+			phy_params.data_role = pd->current_dr;
+			phy_params.power_role = pd->current_pr;
+			phy_params.spec_rev = pd->spec_rev;
+
+			ret = pd_phy_open(&phy_params);
+			if (ret) {
+				WARN_ON_ONCE(1);
+				usbpd_err(&pd->dev, "error opening PD PHY %d\n",
+						ret);
+				pd->current_state = PE_UNKNOWN;
+				return;
+			}
+
+			pd->pd_phy_opened = true;
+		} else {
+			pd_phy_update_spec_rev(pd->spec_rev);
+		}
+
+		pd->current_voltage = pd->requested_voltage = 5000000;
+		val.intval = pd->requested_voltage; /* set max range to 5V */
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_MAX, &val);
+
+		if (!pd->vbus_present) {
+			pd->current_state = PE_SNK_DISCOVERY;
+			/* max time for hard reset to turn vbus back on */
+			kick_sm(pd, SNK_HARD_RESET_VBUS_ON_TIME);
+			break;
+		}
+
+		pd->current_state = PE_SNK_WAIT_FOR_CAPABILITIES;
+		/* fall-through */
+
+	case PE_SNK_WAIT_FOR_CAPABILITIES:
+		spin_lock_irqsave(&pd->rx_lock, flags);
+		if (list_empty(&pd->rx_q))
+			kick_sm(pd, SINK_WAIT_CAP_TIME);
+		spin_unlock_irqrestore(&pd->rx_lock, flags);
+		break;
+
+	case PE_SNK_EVALUATE_CAPABILITY:
+		pd->pd_connected = true; /* we know peer is PD capable */
+		pd->hard_reset_count = 0;
+
+		/* evaluate PDOs and select one */
+		ret = pd_eval_src_caps(pd);
+		if (ret < 0) {
+			usbpd_err(&pd->dev, "Invalid src_caps received. Skipping request\n");
+			break;
+		}
+		pd->current_state = PE_SNK_SELECT_CAPABILITY;
+		/* fall-through */
+
+	case PE_SNK_SELECT_CAPABILITY:
+		ret = pd_send_msg(pd, MSG_REQUEST, &pd->rdo, 1, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending Request\n");
+			usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+			break;
+		}
+
+		/* wait for ACCEPT */
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+		break;
+
+	case PE_SNK_TRANSITION_SINK:
+		/* wait for PS_RDY */
+		kick_sm(pd, PS_TRANSITION_TIME);
+		break;
+
+	case PE_SNK_READY:
+		pd->in_explicit_contract = true;
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+		complete(&pd->swap_complete);
+		dual_role_instance_changed(pd->dual_role);
+		break;
+
+	case PE_SNK_TRANSITION_TO_DEFAULT:
+		if (pd->current_dr != DR_UFP) {
+			stop_usb_host(pd);
+			start_usb_peripheral(pd);
+			pd->current_dr = DR_UFP;
+			pd_phy_update_roles(pd->current_dr, pd->current_pr);
+		}
+		if (pd->vconn_enabled) {
+			regulator_disable(pd->vconn);
+			pd->vconn_enabled = false;
+		}
+
+		/* max time for hard reset to turn vbus off */
+		kick_sm(pd, SNK_HARD_RESET_VBUS_OFF_TIME);
+		break;
+
+	case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+		val.intval = pd->requested_current = 0; /* suspend charging */
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+
+		pd->in_explicit_contract = false;
+
+		/*
+		 * need to update PR bit in message header so that
+		 * proper GoodCRC is sent when receiving next PS_RDY
+		 */
+		pd_phy_update_roles(pd->current_dr, PR_SRC);
+
+		/* wait for PS_RDY */
+		kick_sm(pd, PS_SOURCE_OFF);
+		break;
+
+	default:
+		usbpd_dbg(&pd->dev, "No action for state %s\n",
+				usbpd_state_strings[pd->current_state]);
+		break;
+	}
+}
+
+int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
+{
+	if (find_svid_handler(pd, hdlr->svid)) {
+		usbpd_err(&pd->dev, "SVID 0x%04x already registered\n",
+				hdlr->svid);
+		return -EINVAL;
+	}
+
+	/* require connect/disconnect callbacks be implemented */
+	if (!hdlr->connect || !hdlr->disconnect) {
+		usbpd_err(&pd->dev, "SVID 0x%04x connect/disconnect must be non-NULL\n",
+				hdlr->svid);
+		return -EINVAL;
+	}
+
+	usbpd_dbg(&pd->dev, "registered handler for SVID 0x%04x\n", hdlr->svid);
+
+	list_add_tail(&hdlr->entry, &pd->svid_handlers);
+
+	/* already connected with this SVID discovered? */
+	if (pd->vdm_state >= DISCOVERED_SVIDS) {
+		int i;
+
+		for (i = 0; i < pd->num_svids; i++) {
+			if (pd->discovered_svids[i] == hdlr->svid) {
+				hdlr->connect(hdlr);
+				hdlr->discovered = true;
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(usbpd_register_svid);
+
+void usbpd_unregister_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
+{
+	list_del_init(&hdlr->entry);
+}
+EXPORT_SYMBOL(usbpd_unregister_svid);
+
+int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos)
+{
+	struct vdm_tx *vdm_tx;
+
+	if (!pd->in_explicit_contract || pd->vdm_tx)
+		return -EBUSY;
+
+	vdm_tx = kzalloc(sizeof(*vdm_tx), GFP_KERNEL);
+	if (!vdm_tx)
+		return -ENOMEM;
+
+	vdm_tx->data[0] = vdm_hdr;
+	if (vdos && num_vdos)
+		memcpy(&vdm_tx->data[1], vdos, num_vdos * sizeof(u32));
+	vdm_tx->size = num_vdos + 1; /* include the header */
+
+	/* VDM will get sent in PE_SRC/SNK_READY state handling */
+	pd->vdm_tx = vdm_tx;
+
+	/* slight delay before queuing to prioritize handling of incoming VDM */
+	kick_sm(pd, 2);
+
+	return 0;
+}
+EXPORT_SYMBOL(usbpd_send_vdm);
+
+int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
+		enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
+		const u32 *vdos, int num_vdos)
+{
+	u32 svdm_hdr = SVDM_HDR(svid, 0, obj_pos, cmd_type, cmd);
+
+	usbpd_dbg(&pd->dev, "VDM tx: svid:%x cmd:%x cmd_type:%x svdm_hdr:%x\n",
+			svid, cmd, cmd_type, svdm_hdr);
+
+	return usbpd_send_vdm(pd, svdm_hdr, vdos, num_vdos);
+}
+EXPORT_SYMBOL(usbpd_send_svdm);
+
+static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	u32 vdm_hdr = rx_msg->payload[0];
+	u32 *vdos = &rx_msg->payload[1];
+	u16 svid = VDM_HDR_SVID(vdm_hdr);
+	u16 *psvid;
+	u8 i, num_vdos = rx_msg->len - 1;	/* num objects minus header */
+	u8 cmd = SVDM_HDR_CMD(vdm_hdr);
+	u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
+	bool has_dp = false;
+	struct usbpd_svid_handler *handler;
+
+	usbpd_dbg(&pd->dev, "VDM rx: svid:%x cmd:%x cmd_type:%x vdm_hdr:%x\n",
+			svid, cmd, cmd_type, vdm_hdr);
+
+	/* if it's a supported SVID, pass the message to the handler */
+	handler = find_svid_handler(pd, svid);
+
+	/* Unstructured VDM */
+	if (!VDM_IS_SVDM(vdm_hdr)) {
+		if (handler && handler->vdm_received)
+			handler->vdm_received(handler, vdm_hdr, vdos, num_vdos);
+		return;
+	}
+
+	/* if this interrupts a previous exchange, abort queued response */
+	if (cmd_type == SVDM_CMD_TYPE_INITIATOR && pd->vdm_tx) {
+		usbpd_dbg(&pd->dev, "Discarding previously queued SVDM tx (SVID:0x%04x)\n",
+				VDM_HDR_SVID(pd->vdm_tx->data[0]));
+
+		kfree(pd->vdm_tx);
+		pd->vdm_tx = NULL;
+	}
+
+	if (handler && handler->svdm_received) {
+		handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
+		return;
+	}
+
+	/* Standard Discovery or unhandled messages go here */
+	switch (cmd_type) {
+	case SVDM_CMD_TYPE_INITIATOR:
+		if (svid == USBPD_SID && cmd == USBPD_SVDM_DISCOVER_IDENTITY) {
+			u32 tx_vdos[3] = {
+				ID_HDR_USB_HOST | ID_HDR_USB_DEVICE |
+					ID_HDR_PRODUCT_PER_MASK | ID_HDR_VID,
+				0x0, /* TBD: Cert Stat VDO */
+				(PROD_VDO_PID << 16),
+				/* TBD: Get these from gadget */
+			};
+
+			usbpd_send_svdm(pd, USBPD_SID, cmd,
+					SVDM_CMD_TYPE_RESP_ACK, 0, tx_vdos, 3);
+		} else if (cmd != USBPD_SVDM_ATTENTION) {
+			usbpd_send_svdm(pd, svid, cmd, SVDM_CMD_TYPE_RESP_NAK,
+					SVDM_HDR_OBJ_POS(vdm_hdr), NULL, 0);
+		}
+		break;
+
+	case SVDM_CMD_TYPE_RESP_ACK:
+		if (svid != USBPD_SID) {
+			usbpd_err(&pd->dev, "unhandled ACK for SVID:0x%x\n",
+					svid);
+			break;
+		}
+
+		switch (cmd) {
+		case USBPD_SVDM_DISCOVER_IDENTITY:
+			kfree(pd->vdm_tx_retry);
+			pd->vdm_tx_retry = NULL;
+
+			pd->vdm_state = DISCOVERED_ID;
+			usbpd_send_svdm(pd, USBPD_SID,
+					USBPD_SVDM_DISCOVER_SVIDS,
+					SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+			break;
+
+		case USBPD_SVDM_DISCOVER_SVIDS:
+			pd->vdm_state = DISCOVERED_SVIDS;
+
+			kfree(pd->vdm_tx_retry);
+			pd->vdm_tx_retry = NULL;
+
+			if (!pd->discovered_svids) {
+				pd->num_svids = 2 * num_vdos;
+				pd->discovered_svids = kcalloc(pd->num_svids,
+								sizeof(u16),
+								GFP_KERNEL);
+				if (!pd->discovered_svids) {
+					usbpd_err(&pd->dev, "unable to allocate SVIDs\n");
+					break;
+				}
+
+				psvid = pd->discovered_svids;
+			} else { /* handle > 12 SVIDs */
+				void *ptr;
+				size_t oldsize = pd->num_svids * sizeof(u16);
+				size_t newsize = oldsize +
+						(2 * num_vdos * sizeof(u16));
+
+				ptr = krealloc(pd->discovered_svids, newsize,
+						GFP_KERNEL);
+				if (!ptr) {
+					usbpd_err(&pd->dev, "unable to realloc SVIDs\n");
+					break;
+				}
+
+				pd->discovered_svids = ptr;
+				psvid = pd->discovered_svids + pd->num_svids;
+				memset(psvid, 0, (2 * num_vdos));
+				pd->num_svids += 2 * num_vdos;
+			}
+
+			/* convert 32-bit VDOs to list of 16-bit SVIDs */
+			for (i = 0; i < num_vdos * 2; i++) {
+				/*
+				 * Within each 32-bit VDO,
+				 *    SVID[i]: upper 16-bits
+				 *    SVID[i+1]: lower 16-bits
+				 * where i is even.
+				 */
+				if (!(i & 1))
+					svid = vdos[i >> 1] >> 16;
+				else
+					svid = vdos[i >> 1] & 0xFFFF;
+
+				/*
+				 * There are some devices that incorrectly
+				 * swap the order of SVIDs within a VDO. So in
+				 * case of an odd-number of SVIDs it could end
+				 * up with SVID[i] as 0 while SVID[i+1] is
+				 * non-zero. Just skip over the zero ones.
+				 */
+				if (svid) {
+					usbpd_dbg(&pd->dev, "Discovered SVID: 0x%04x\n",
+							svid);
+					*psvid++ = svid;
+				}
+			}
+
+			/* if more than 12 SVIDs, resend the request */
+			if (num_vdos == 6 && vdos[5] != 0) {
+				usbpd_send_svdm(pd, USBPD_SID,
+						USBPD_SVDM_DISCOVER_SVIDS,
+						SVDM_CMD_TYPE_INITIATOR, 0,
+						NULL, 0);
+				break;
+			}
+
+			/* now that all SVIDs are discovered, notify handlers */
+			for (i = 0; i < pd->num_svids; i++) {
+				svid = pd->discovered_svids[i];
+				if (svid) {
+					handler = find_svid_handler(pd, svid);
+					if (handler) {
+						handler->connect(handler);
+						handler->discovered = true;
+					}
+				}
+
+				if (svid == 0xFF01)
+					has_dp = true;
+			}
+
+			/*
+			 * Finally start USB host now that we have determined
+			 * if DisplayPort mode is present or not and limit USB
+			 * to HS-only mode if so.
+			 */
+			start_usb_host(pd, !has_dp);
+
+			break;
+
+		default:
+			usbpd_dbg(&pd->dev, "unhandled ACK for command:0x%x\n",
+					cmd);
+			break;
+		}
+		break;
+
+	case SVDM_CMD_TYPE_RESP_NAK:
+		usbpd_info(&pd->dev, "VDM NAK received for SVID:0x%04x command:0x%x\n",
+				svid, cmd);
+
+		switch (cmd) {
+		case USBPD_SVDM_DISCOVER_IDENTITY:
+		case USBPD_SVDM_DISCOVER_SVIDS:
+			start_usb_host(pd, true);
+			break;
+		default:
+			break;
+		}
+
+		break;
+
+	case SVDM_CMD_TYPE_RESP_BUSY:
+		switch (cmd) {
+		case USBPD_SVDM_DISCOVER_IDENTITY:
+		case USBPD_SVDM_DISCOVER_SVIDS:
+			if (!pd->vdm_tx_retry) {
+				usbpd_err(&pd->dev, "Discover command %d VDM was unexpectedly freed\n",
+						cmd);
+				break;
+			}
+
+			/* wait tVDMBusy, then retry */
+			pd->vdm_tx = pd->vdm_tx_retry;
+			pd->vdm_tx_retry = NULL;
+			kick_sm(pd, VDM_BUSY_TIME);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+}
+
+static void handle_vdm_tx(struct usbpd *pd)
+{
+	int ret;
+	unsigned long flags;
+
+	/* only send one VDM at a time */
+	if (pd->vdm_tx) {
+		u32 vdm_hdr = pd->vdm_tx->data[0];
+
+		/* bail out and try again later if a message just arrived */
+		spin_lock_irqsave(&pd->rx_lock, flags);
+		if (!list_empty(&pd->rx_q)) {
+			spin_unlock_irqrestore(&pd->rx_lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+		ret = pd_send_msg(pd, MSG_VDM, pd->vdm_tx->data,
+				pd->vdm_tx->size, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error (%d) sending VDM command %d\n",
+					ret, SVDM_HDR_CMD(pd->vdm_tx->data[0]));
+
+			/* retry when hitting PE_SRC/SNK_Ready again */
+			if (ret != -EBUSY)
+				usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+					PE_SRC_SEND_SOFT_RESET :
+					PE_SNK_SEND_SOFT_RESET);
+
+			return;
+		}
+
+		/*
+		 * special case: keep initiated Discover ID/SVIDs
+		 * around in case we need to re-try when receiving BUSY
+		 */
+		if (VDM_IS_SVDM(vdm_hdr) &&
+			SVDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR &&
+			SVDM_HDR_CMD(vdm_hdr) <= USBPD_SVDM_DISCOVER_SVIDS) {
+			if (pd->vdm_tx_retry) {
+				usbpd_dbg(&pd->dev, "Previous Discover VDM command %d not ACKed/NAKed\n",
+					SVDM_HDR_CMD(
+						pd->vdm_tx_retry->data[0]));
+				kfree(pd->vdm_tx_retry);
+			}
+			pd->vdm_tx_retry = pd->vdm_tx;
+		} else {
+			kfree(pd->vdm_tx);
+		}
+
+		pd->vdm_tx = NULL;
+	}
+}
+
+static void reset_vdm_state(struct usbpd *pd)
+{
+	struct usbpd_svid_handler *handler;
+
+	list_for_each_entry(handler, &pd->svid_handlers, entry) {
+		if (handler->discovered) {
+			handler->disconnect(handler);
+			handler->discovered = false;
+		}
+	}
+
+	pd->vdm_state = VDM_NONE;
+	kfree(pd->vdm_tx_retry);
+	pd->vdm_tx_retry = NULL;
+	kfree(pd->discovered_svids);
+	pd->discovered_svids = NULL;
+	pd->num_svids = 0;
+	kfree(pd->vdm_tx);
+	pd->vdm_tx = NULL;
+}
+
+static void dr_swap(struct usbpd *pd)
+{
+	reset_vdm_state(pd);
+
+	if (pd->current_dr == DR_DFP) {
+		stop_usb_host(pd);
+		start_usb_peripheral(pd);
+		pd->current_dr = DR_UFP;
+	} else if (pd->current_dr == DR_UFP) {
+		stop_usb_peripheral(pd);
+		pd->current_dr = DR_DFP;
+
+		/* don't start USB host until after SVDM discovery */
+		usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_IDENTITY,
+				SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+	}
+
+	pd_phy_update_roles(pd->current_dr, pd->current_pr);
+}
+
+
+static void vconn_swap(struct usbpd *pd)
+{
+	int ret;
+
+	if (pd->vconn_enabled) {
+		pd->current_state = PE_VCS_WAIT_FOR_VCONN;
+		kick_sm(pd, VCONN_ON_TIME);
+	} else {
+		ret = regulator_enable(pd->vconn);
+		if (ret) {
+			usbpd_err(&pd->dev, "Unable to enable vconn\n");
+			return;
+		}
+
+		pd->vconn_enabled = true;
+
+		/*
+		 * Small delay to ensure Vconn has ramped up. This is well
+		 * below tVCONNSourceOn (100ms) so we still send PS_RDY within
+		 * the allowed time.
+		 */
+		usleep_range(5000, 10000);
+
+		ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+			usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+					PE_SRC_SEND_SOFT_RESET :
+					PE_SNK_SEND_SOFT_RESET);
+			return;
+		}
+	}
+}
+
+static int enable_vbus(struct usbpd *pd)
+{
+	union power_supply_propval val = {0};
+	int count = 100;
+	int ret;
+
+	if (!check_vsafe0v)
+		goto enable_reg;
+
+	/*
+	 * Check to make sure there's no lingering charge on
+	 * VBUS before enabling it as a source. If so poll here
+	 * until it goes below VSafe0V (0.8V) before proceeding.
+	 */
+	while (count--) {
+		ret = power_supply_get_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+		if (ret || val.intval <= 800000)
+			break;
+		usleep_range(20000, 30000);
+	}
+
+	if (count < 99)
+		msleep(100);	/* need to wait an additional tCCDebounce */
+
+enable_reg:
+	ret = regulator_enable(pd->vbus);
+	if (ret)
+		usbpd_err(&pd->dev, "Unable to enable vbus (%d)\n", ret);
+	else
+		pd->vbus_enabled = true;
+
+	return ret;
+}
+
+static inline void rx_msg_cleanup(struct usbpd *pd)
+{
+	struct rx_msg *msg, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	list_for_each_entry_safe(msg, tmp, &pd->rx_q, entry) {
+		list_del(&msg->entry);
+		kfree(msg);
+	}
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+}
+
+/* For PD 3.0, check SinkTxOk before allowing initiating AMS */
+static inline bool is_sink_tx_ok(struct usbpd *pd)
+{
+	if (pd->spec_rev == USBPD_REV_30)
+		return pd->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+
+	return true;
+}
+
+/* Handles current state and determines transitions */
+static void usbpd_sm(struct work_struct *w)
+{
+	struct usbpd *pd = container_of(w, struct usbpd, sm_work);
+	union power_supply_propval val = {0};
+	int ret;
+	struct rx_msg *rx_msg = NULL;
+	unsigned long flags;
+
+	usbpd_dbg(&pd->dev, "handle state %s\n",
+			usbpd_state_strings[pd->current_state]);
+
+	hrtimer_cancel(&pd->timer);
+	pd->sm_queued = false;
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	if (!list_empty(&pd->rx_q)) {
+		rx_msg = list_first_entry(&pd->rx_q, struct rx_msg, entry);
+		list_del(&rx_msg->entry);
+	}
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+	/* Disconnect? */
+	if (pd->current_pr == PR_NONE) {
+		if (pd->current_state == PE_UNKNOWN)
+			goto sm_done;
+
+		usbpd_info(&pd->dev, "USB Type-C disconnect\n");
+
+		if (pd->pd_phy_opened) {
+			pd_phy_close();
+			pd->pd_phy_opened = false;
+		}
+
+		pd->in_pr_swap = false;
+		pd->pd_connected = false;
+		pd->in_explicit_contract = false;
+		pd->hard_reset_recvd = false;
+		pd->caps_count = 0;
+		pd->hard_reset_count = 0;
+		pd->src_cap_id = 0;
+		pd->requested_voltage = 0;
+		pd->requested_current = 0;
+		memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
+		rx_msg_cleanup(pd);
+
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+				&val);
+
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
+		if (pd->vbus_enabled) {
+			regulator_disable(pd->vbus);
+			pd->vbus_enabled = false;
+		}
+
+		if (pd->vconn_enabled) {
+			regulator_disable(pd->vconn);
+			pd->vconn_enabled = false;
+		}
+
+		if (pd->current_dr == DR_UFP)
+			stop_usb_peripheral(pd);
+		else if (pd->current_dr == DR_DFP)
+			stop_usb_host(pd);
+
+		pd->current_pr = PR_NONE;
+		pd->current_dr = DR_NONE;
+
+		reset_vdm_state(pd);
+
+		if (pd->current_state == PE_ERROR_RECOVERY)
+			/* forced disconnect, wait before resetting to DRP */
+			usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
+				(ERROR_RECOVERY_TIME + 5) * USEC_PER_MSEC);
+
+		/* set due to dual_role class "mode" change */
+		if (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
+			val.intval = pd->forced_pr;
+		else
+			/* Set CC back to DRP toggle */
+			val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+		pd->forced_pr = POWER_SUPPLY_TYPEC_PR_NONE;
+
+		pd->current_state = PE_UNKNOWN;
+
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+		dual_role_instance_changed(pd->dual_role);
+
+		goto sm_done;
+	}
+
+	/* Hard reset? */
+	if (pd->hard_reset_recvd) {
+		pd->hard_reset_recvd = false;
+
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+		pd->in_pr_swap = false;
+		rx_msg_cleanup(pd);
+		reset_vdm_state(pd);
+
+		if (pd->current_pr == PR_SINK) {
+			usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
+		} else {
+			pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+			kick_sm(pd, PS_HARD_RESET_TIME);
+		}
+
+		goto sm_done;
+	}
+
+	/* Soft reset? */
+	if (IS_CTRL(rx_msg, MSG_SOFT_RESET)) {
+		usbpd_dbg(&pd->dev, "Handle soft reset\n");
+
+		if (pd->current_pr == PR_SRC)
+			pd->current_state = PE_SRC_SOFT_RESET;
+		else if (pd->current_pr == PR_SINK)
+			pd->current_state = PE_SNK_SOFT_RESET;
+	}
+
+	switch (pd->current_state) {
+	case PE_UNKNOWN:
+		if (pd->current_pr == PR_SINK) {
+			usbpd_set_state(pd, PE_SNK_STARTUP);
+		} else if (pd->current_pr == PR_SRC) {
+			enable_vbus(pd);
+			if (!pd->vconn_enabled &&
+					pd->typec_mode ==
+					POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) {
+				ret = regulator_enable(pd->vconn);
+				if (ret)
+					usbpd_err(&pd->dev, "Unable to enable vconn\n");
+				else
+					pd->vconn_enabled = true;
+			}
+
+			usbpd_set_state(pd, PE_SRC_STARTUP);
+		}
+		break;
+
+	case PE_SRC_STARTUP:
+		usbpd_set_state(pd, PE_SRC_STARTUP);
+		break;
+
+	case PE_SRC_SEND_CAPABILITIES:
+		ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
+				ARRAY_SIZE(default_src_caps), SOP_MSG);
+		if (ret) {
+			pd->caps_count++;
+
+			if (pd->caps_count == 10 && pd->current_dr == DR_DFP) {
+				/* Likely not PD-capable, start host now */
+				start_usb_host(pd, true);
+			} else if (pd->caps_count >= PD_CAPS_COUNT) {
+				usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
+				usbpd_set_state(pd, PE_SRC_DISABLED);
+
+				val.intval = 0;
+				power_supply_set_property(pd->usb_psy,
+						POWER_SUPPLY_PROP_PD_ACTIVE,
+						&val);
+				break;
+			}
+
+			kick_sm(pd, SRC_CAP_TIME);
+			break;
+		}
+
+		/* transmit was successful if GoodCRC was received */
+		pd->caps_count = 0;
+		pd->hard_reset_count = 0;
+		pd->pd_connected = true; /* we know peer is PD capable */
+
+		/* wait for REQUEST */
+		pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+		break;
+
+	case PE_SRC_SEND_CAPABILITIES_WAIT:
+		if (IS_DATA(rx_msg, MSG_REQUEST)) {
+			pd->rdo = rx_msg->payload[0];
+			usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
+		} else if (rx_msg) {
+			usbpd_err(&pd->dev, "Unexpected message received\n");
+			usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+		} else {
+			usbpd_set_state(pd, PE_SRC_HARD_RESET);
+		}
+		break;
+
+	case PE_SRC_READY:
+		if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
+			ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
+					default_src_caps,
+					ARRAY_SIZE(default_src_caps), SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending SRC CAPs\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
+			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
+					default_snk_caps,
+					ARRAY_SIZE(default_snk_caps), SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Sink Caps\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+			}
+		} else if (IS_DATA(rx_msg, MSG_REQUEST)) {
+			pd->rdo = rx_msg->payload[0];
+			usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
+		} else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
+			if (pd->vdm_state == MODE_ENTERED) {
+				usbpd_set_state(pd, PE_SRC_HARD_RESET);
+				break;
+			}
+
+			ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Accept\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+
+			dr_swap(pd);
+		} else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
+			/* lock in current mode */
+			set_power_role(pd, pd->current_pr);
+
+			/* we'll happily accept Src->Sink requests anytime */
+			ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Accept\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+
+			pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
+			kick_sm(pd, SRC_TRANSITION_TIME);
+			break;
+		} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
+			ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Accept\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+
+			vconn_swap(pd);
+		} else if (IS_DATA(rx_msg, MSG_VDM)) {
+			handle_vdm_rx(pd, rx_msg);
+		} else if (pd->send_pr_swap) {
+			pd->send_pr_swap = false;
+			ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
+			if (ret) {
+				dev_err(&pd->dev, "Error sending PR Swap\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+
+			pd->current_state = PE_PRS_SRC_SNK_SEND_SWAP;
+			kick_sm(pd, SENDER_RESPONSE_TIME);
+		} else if (pd->send_dr_swap) {
+			pd->send_dr_swap = false;
+			ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
+			if (ret) {
+				dev_err(&pd->dev, "Error sending DR Swap\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+
+			pd->current_state = PE_DRS_SEND_DR_SWAP;
+			kick_sm(pd, SENDER_RESPONSE_TIME);
+		} else {
+			handle_vdm_tx(pd);
+		}
+		break;
+
+	case PE_SRC_TRANSITION_TO_DEFAULT:
+		if (pd->vconn_enabled)
+			regulator_disable(pd->vconn);
+		if (pd->vbus_enabled)
+			regulator_disable(pd->vbus);
+
+		if (pd->current_dr != DR_DFP) {
+			extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+			pd->current_dr = DR_DFP;
+			pd_phy_update_roles(pd->current_dr, pd->current_pr);
+		}
+
+		msleep(SRC_RECOVER_TIME);
+
+		pd->vbus_enabled = false;
+		enable_vbus(pd);
+
+		if (pd->vconn_enabled) {
+			ret = regulator_enable(pd->vconn);
+			if (ret) {
+				usbpd_err(&pd->dev, "Unable to enable vconn\n");
+				pd->vconn_enabled = false;
+			}
+		}
+
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+		usbpd_set_state(pd, PE_SRC_STARTUP);
+		break;
+
+	case PE_SRC_HARD_RESET:
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+		pd_send_hard_reset(pd);
+		pd->in_explicit_contract = false;
+		rx_msg_cleanup(pd);
+		reset_vdm_state(pd);
+
+		pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+		kick_sm(pd, PS_HARD_RESET_TIME);
+		break;
+
+	case PE_SNK_STARTUP:
+		usbpd_set_state(pd, PE_SNK_STARTUP);
+		break;
+
+	case PE_SNK_DISCOVERY:
+		if (!rx_msg) {
+			if (pd->vbus_present)
+				usbpd_set_state(pd,
+						PE_SNK_WAIT_FOR_CAPABILITIES);
+
+			/*
+			 * Handle disconnection in the middle of PR_Swap.
+			 * Since in psy_changed() if pd->in_pr_swap is true
+			 * we ignore the typec_mode==NONE change since that is
+			 * expected to happen. However if the cable really did
+			 * get disconnected we need to check for it here after
+			 * waiting for VBUS presence times out.
+			 */
+			if (!pd->typec_mode) {
+				pd->current_pr = PR_NONE;
+				kick_sm(pd, 0);
+			}
+
+			break;
+		}
+		/* else fall-through */
+
+	case PE_SNK_WAIT_FOR_CAPABILITIES:
+		pd->in_pr_swap = false;
+
+		if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
+			val.intval = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+					&val);
+
+			/* save the PDOs so userspace can further evaluate */
+			memcpy(&pd->received_pdos, rx_msg->payload,
+					sizeof(pd->received_pdos));
+			pd->src_cap_id++;
+
+			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
+
+			val.intval = 1;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+		} else if (pd->hard_reset_count < 3) {
+			usbpd_set_state(pd, PE_SNK_HARD_RESET);
+		} else if (pd->pd_connected) {
+			usbpd_info(&pd->dev, "Sink hard reset count exceeded, forcing reconnect\n");
+
+			val.intval = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+					&val);
+
+			usbpd_set_state(pd, PE_ERROR_RECOVERY);
+		} else {
+			usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
+
+			val.intval = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+					&val);
+
+			val.intval = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+		}
+		break;
+
+	case PE_SNK_SELECT_CAPABILITY:
+		if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+			usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
+
+			/* prepare for voltage increase/decrease */
+			val.intval = pd->requested_voltage;
+			power_supply_set_property(pd->usb_psy,
+				pd->requested_voltage >= pd->current_voltage ?
+					POWER_SUPPLY_PROP_VOLTAGE_MAX :
+					POWER_SUPPLY_PROP_VOLTAGE_MIN,
+					&val);
+
+			/*
+			 * if we are changing voltages, we must lower input
+			 * current to pSnkStdby (2.5W). Calculate it and set
+			 * PD_CURRENT_MAX accordingly.
+			 */
+			if (pd->requested_voltage != pd->current_voltage) {
+				int mv = max(pd->requested_voltage,
+						pd->current_voltage) / 1000;
+				val.intval = (2500000 / mv) * 1000;
+				power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+			} else {
+				/* decreasing current? */
+				ret = power_supply_get_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+				if (!ret &&
+					pd->requested_current < val.intval) {
+					val.intval =
+						pd->requested_current * 1000;
+					power_supply_set_property(pd->usb_psy,
+					     POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+					     &val);
+				}
+			}
+
+			pd->selected_pdo = pd->requested_pdo;
+		} else if (IS_CTRL(rx_msg, MSG_REJECT) ||
+				IS_CTRL(rx_msg, MSG_WAIT)) {
+			if (pd->in_explicit_contract)
+				usbpd_set_state(pd, PE_SNK_READY);
+			else
+				usbpd_set_state(pd,
+						PE_SNK_WAIT_FOR_CAPABILITIES);
+		} else if (rx_msg) {
+			usbpd_err(&pd->dev, "Invalid response to sink request\n");
+			usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+		} else {
+			/* timed out; go to hard reset */
+			usbpd_set_state(pd, PE_SNK_HARD_RESET);
+		}
+		break;
+
+	case PE_SNK_TRANSITION_SINK:
+		if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
+			val.intval = pd->requested_voltage;
+			power_supply_set_property(pd->usb_psy,
+				pd->requested_voltage >= pd->current_voltage ?
+					POWER_SUPPLY_PROP_VOLTAGE_MIN :
+					POWER_SUPPLY_PROP_VOLTAGE_MAX, &val);
+			pd->current_voltage = pd->requested_voltage;
+
+			/* resume charging */
+			val.intval = pd->requested_current * 1000; /* mA->uA */
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+
+			usbpd_set_state(pd, PE_SNK_READY);
+		} else {
+			/* timed out; go to hard reset */
+			usbpd_set_state(pd, PE_SNK_HARD_RESET);
+		}
+		break;
+
+	case PE_SNK_READY:
+		if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
+			/* save the PDOs so userspace can further evaluate */
+			memcpy(&pd->received_pdos, rx_msg->payload,
+					sizeof(pd->received_pdos));
+			pd->src_cap_id++;
+
+			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
+		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
+			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
+					default_snk_caps,
+					ARRAY_SIZE(default_snk_caps), SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Sink Caps\n");
+				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+			}
+		} else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
+			ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
+					default_src_caps,
+					ARRAY_SIZE(default_src_caps), SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending SRC CAPs\n");
+				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+				break;
+			}
+		} else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
+			if (pd->vdm_state == MODE_ENTERED) {
+				usbpd_set_state(pd, PE_SNK_HARD_RESET);
+				break;
+			}
+
+			ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Accept\n");
+				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+				break;
+			}
+
+			dr_swap(pd);
+		} else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
+			/* lock in current mode */
+			set_power_role(pd, pd->current_pr);
+
+			/* TODO: should we Reject in certain circumstances? */
+			ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Accept\n");
+				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+				break;
+			}
+
+			pd->in_pr_swap = true;
+			usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+			break;
+		} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
+			/*
+			 * if VCONN is connected to VBUS, make sure we are
+			 * not in high voltage contract, otherwise reject.
+			 */
+			if (!pd->vconn_is_external &&
+					(pd->requested_voltage > 5000000)) {
+				ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
+						SOP_MSG);
+				if (ret) {
+					usbpd_err(&pd->dev, "Error sending Reject\n");
+					usbpd_set_state(pd,
+							PE_SNK_SEND_SOFT_RESET);
+				}
+
+				break;
+			}
+
+			ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+			if (ret) {
+				usbpd_err(&pd->dev, "Error sending Accept\n");
+				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+				break;
+			}
+
+			vconn_swap(pd);
+		} else if (IS_DATA(rx_msg, MSG_VDM)) {
+			handle_vdm_rx(pd, rx_msg);
+		} else if (pd->send_pr_swap && is_sink_tx_ok(pd)) {
+			pd->send_pr_swap = false;
+			ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
+			if (ret) {
+				dev_err(&pd->dev, "Error sending PR Swap\n");
+				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+				break;
+			}
+
+			pd->current_state = PE_PRS_SNK_SRC_SEND_SWAP;
+			kick_sm(pd, SENDER_RESPONSE_TIME);
+		} else if (pd->send_dr_swap && is_sink_tx_ok(pd)) {
+			pd->send_dr_swap = false;
+			ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
+			if (ret) {
+				dev_err(&pd->dev, "Error sending DR Swap\n");
+				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+				break;
+			}
+
+			pd->current_state = PE_DRS_SEND_DR_SWAP;
+			kick_sm(pd, SENDER_RESPONSE_TIME);
+		} else if (is_sink_tx_ok(pd)) {
+			handle_vdm_tx(pd);
+		}
+		break;
+
+	case PE_SNK_TRANSITION_TO_DEFAULT:
+		usbpd_set_state(pd, PE_SNK_STARTUP);
+		break;
+
+	case PE_SRC_SOFT_RESET:
+	case PE_SNK_SOFT_RESET:
+		pd_reset_protocol(pd);
+
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "%s: Error sending Accept, do Hard Reset\n",
+					usbpd_state_strings[pd->current_state]);
+			usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+					PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+			break;
+		}
+
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_SEND_CAPABILITIES :
+				PE_SNK_WAIT_FOR_CAPABILITIES);
+		break;
+
+	case PE_SRC_SEND_SOFT_RESET:
+	case PE_SNK_SEND_SOFT_RESET:
+		if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+			usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+					PE_SRC_SEND_CAPABILITIES :
+					PE_SNK_WAIT_FOR_CAPABILITIES);
+		} else {
+			usbpd_err(&pd->dev, "%s: Did not see Accept, do Hard Reset\n",
+					usbpd_state_strings[pd->current_state]);
+			usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+					PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+		}
+		break;
+
+	case PE_SNK_HARD_RESET:
+		/* prepare charger for VBUS change */
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+		pd->requested_voltage = 5000000;
+
+		if (pd->requested_current) {
+			val.intval = pd->requested_current = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+		}
+
+		val.intval = pd->requested_voltage;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_MIN, &val);
+
+		pd_send_hard_reset(pd);
+		pd->in_explicit_contract = false;
+		reset_vdm_state(pd);
+		usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
+		break;
+
+	case PE_DRS_SEND_DR_SWAP:
+		if (IS_CTRL(rx_msg, MSG_ACCEPT))
+			dr_swap(pd);
+
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_READY : PE_SNK_READY);
+		break;
+
+	case PE_PRS_SRC_SNK_SEND_SWAP:
+		if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
+			pd->current_state = PE_SRC_READY;
+			break;
+		}
+
+		pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
+		kick_sm(pd, SRC_TRANSITION_TIME);
+		break;
+
+	case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
+		pd->in_pr_swap = true;
+		pd->in_explicit_contract = false;
+
+		if (pd->vbus_enabled) {
+			regulator_disable(pd->vbus);
+			pd->vbus_enabled = false;
+		}
+
+		/* PE_PRS_SRC_SNK_Assert_Rd */
+		pd->current_pr = PR_SINK;
+		set_power_role(pd, pd->current_pr);
+		pd_phy_update_roles(pd->current_dr, pd->current_pr);
+
+		/* allow time for Vbus discharge, must be < tSrcSwapStdby */
+		msleep(500);
+
+		ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+			usbpd_set_state(pd, PE_ERROR_RECOVERY);
+			break;
+		}
+
+		pd->current_state = PE_PRS_SRC_SNK_WAIT_SOURCE_ON;
+		kick_sm(pd, PS_SOURCE_ON);
+		break;
+
+	case PE_PRS_SRC_SNK_WAIT_SOURCE_ON:
+		if (IS_CTRL(rx_msg, MSG_PS_RDY))
+			usbpd_set_state(pd, PE_SNK_STARTUP);
+		else
+			usbpd_set_state(pd, PE_ERROR_RECOVERY);
+		break;
+
+	case PE_PRS_SNK_SRC_SEND_SWAP:
+		if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
+			pd->current_state = PE_SNK_READY;
+			break;
+		}
+
+		pd->in_pr_swap = true;
+		usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+		break;
+
+	case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+		if (!IS_CTRL(rx_msg, MSG_PS_RDY)) {
+			usbpd_set_state(pd, PE_ERROR_RECOVERY);
+			break;
+		}
+
+		/* PE_PRS_SNK_SRC_Assert_Rp */
+		pd->current_pr = PR_SRC;
+		set_power_role(pd, pd->current_pr);
+		pd->current_state = PE_PRS_SNK_SRC_SOURCE_ON;
+
+		/* fall-through */
+
+	case PE_PRS_SNK_SRC_SOURCE_ON:
+		enable_vbus(pd);
+		msleep(200); /* allow time VBUS ramp-up, must be < tNewSrc */
+
+		ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+			usbpd_set_state(pd, PE_ERROR_RECOVERY);
+			break;
+		}
+
+		usbpd_set_state(pd, PE_SRC_STARTUP);
+		break;
+
+	case PE_VCS_WAIT_FOR_VCONN:
+		if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
+			/*
+			 * hopefully redundant check but in case not enabled
+			 * avoids unbalanced regulator disable count
+			 */
+			if (pd->vconn_enabled)
+				regulator_disable(pd->vconn);
+			pd->vconn_enabled = false;
+
+			pd->current_state = pd->current_pr == PR_SRC ?
+				PE_SRC_READY : PE_SNK_READY;
+		} else {
+			/* timed out; go to hard reset */
+			usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+					PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+		}
+
+		break;
+
+	default:
+		usbpd_err(&pd->dev, "Unhandled state %s\n",
+				usbpd_state_strings[pd->current_state]);
+		break;
+	}
+
+sm_done:
+	kfree(rx_msg);
+
+	if (!pd->sm_queued)
+		pm_relax(&pd->dev);
+}
+
+static inline const char *src_current(enum power_supply_typec_mode typec_mode)
+{
+	switch (typec_mode) {
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+		return "default";
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+		return "medium - 1.5A";
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		return "high - 3.0A";
+	default:
+		return "";
+	}
+}
+
+static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
+{
+	struct usbpd *pd = container_of(nb, struct usbpd, psy_nb);
+	union power_supply_propval val;
+	enum power_supply_typec_mode typec_mode;
+	int ret;
+
+	if (ptr != pd->usb_psy || evt != PSY_EVENT_PROP_CHANGED)
+		return 0;
+
+	ret = power_supply_get_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_MODE, &val);
+	if (ret) {
+		usbpd_err(&pd->dev, "Unable to read USB TYPEC_MODE: %d\n", ret);
+		return ret;
+	}
+
+	typec_mode = val.intval;
+
+	ret = power_supply_get_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PE_START, &val);
+	if (ret) {
+		usbpd_err(&pd->dev, "Unable to read USB PROP_PE_START: %d\n",
+				ret);
+		return ret;
+	}
+
+	/* Don't proceed if PE_START=0 as other props may still change */
+	if (!val.intval && !pd->pd_connected &&
+			typec_mode != POWER_SUPPLY_TYPEC_NONE)
+		return 0;
+
+	ret = power_supply_get_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PRESENT, &val);
+	if (ret) {
+		usbpd_err(&pd->dev, "Unable to read USB PRESENT: %d\n", ret);
+		return ret;
+	}
+
+	pd->vbus_present = val.intval;
+
+	ret = power_supply_get_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_TYPE, &val);
+	if (ret) {
+		usbpd_err(&pd->dev, "Unable to read USB TYPE: %d\n", ret);
+		return ret;
+	}
+
+	pd->psy_type = val.intval;
+
+	/*
+	 * For sink hard reset, state machine needs to know when VBUS changes
+	 *   - when in PE_SNK_TRANSITION_TO_DEFAULT, notify when VBUS falls
+	 *   - when in PE_SNK_DISCOVERY, notify when VBUS rises
+	 */
+	if (typec_mode && ((!pd->vbus_present &&
+			pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT) ||
+		(pd->vbus_present && pd->current_state == PE_SNK_DISCOVERY))) {
+		usbpd_dbg(&pd->dev, "hard reset: typec mode:%d present:%d\n",
+			typec_mode, pd->vbus_present);
+		pd->typec_mode = typec_mode;
+		kick_sm(pd, 0);
+		return 0;
+	}
+
+	if (pd->typec_mode == typec_mode)
+		return 0;
+
+	pd->typec_mode = typec_mode;
+
+	usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d orientation:%d\n",
+			typec_mode, pd->vbus_present, pd->psy_type,
+			usbpd_get_plug_orientation(pd));
+
+	switch (typec_mode) {
+	/* Disconnect */
+	case POWER_SUPPLY_TYPEC_NONE:
+		if (pd->in_pr_swap) {
+			usbpd_dbg(&pd->dev, "Ignoring disconnect due to PR swap\n");
+			return 0;
+		}
+
+		pd->current_pr = PR_NONE;
+		break;
+
+	/* Sink states */
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
+				src_current(typec_mode));
+
+		/* if waiting for SinkTxOk to start an AMS */
+		if (pd->spec_rev == USBPD_REV_30 &&
+			typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH &&
+			(pd->send_pr_swap || pd->send_dr_swap || pd->vdm_tx))
+			break;
+
+		if (pd->current_pr == PR_SINK)
+			return 0;
+
+		pd->current_pr = PR_SINK;
+		break;
+
+	/* Source states */
+	case POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE:
+	case POWER_SUPPLY_TYPEC_SINK:
+		usbpd_info(&pd->dev, "Type-C Sink%s connected\n",
+				typec_mode == POWER_SUPPLY_TYPEC_SINK ?
+					"" : " (powered)");
+
+		if (pd->current_pr == PR_SRC)
+			return 0;
+
+		pd->current_pr = PR_SRC;
+		break;
+
+	case POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY:
+		usbpd_info(&pd->dev, "Type-C Debug Accessory connected\n");
+		break;
+	case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
+		usbpd_info(&pd->dev, "Type-C Analog Audio Adapter connected\n");
+		break;
+	default:
+		usbpd_warn(&pd->dev, "Unsupported typec mode:%d\n",
+				typec_mode);
+		break;
+	}
+
+	/* queue state machine due to CC state change */
+	kick_sm(pd, 0);
+	return 0;
+}
+
+static enum dual_role_property usbpd_dr_properties[] = {
+	DUAL_ROLE_PROP_SUPPORTED_MODES,
+	DUAL_ROLE_PROP_MODE,
+	DUAL_ROLE_PROP_PR,
+	DUAL_ROLE_PROP_DR,
+};
+
+static int usbpd_dr_get_property(struct dual_role_phy_instance *dual_role,
+		enum dual_role_property prop, unsigned int *val)
+{
+	struct usbpd *pd = dual_role_get_drvdata(dual_role);
+
+	if (!pd)
+		return -ENODEV;
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+		/* For now associate UFP/DFP with data role only */
+		if (pd->current_dr == DR_UFP)
+			*val = DUAL_ROLE_PROP_MODE_UFP;
+		else if (pd->current_dr == DR_DFP)
+			*val = DUAL_ROLE_PROP_MODE_DFP;
+		else
+			*val = DUAL_ROLE_PROP_MODE_NONE;
+		break;
+	case DUAL_ROLE_PROP_PR:
+		if (pd->current_pr == PR_SRC)
+			*val = DUAL_ROLE_PROP_PR_SRC;
+		else if (pd->current_pr == PR_SINK)
+			*val = DUAL_ROLE_PROP_PR_SNK;
+		else
+			*val = DUAL_ROLE_PROP_PR_NONE;
+		break;
+	case DUAL_ROLE_PROP_DR:
+		if (pd->current_dr == DR_UFP)
+			*val = DUAL_ROLE_PROP_DR_DEVICE;
+		else if (pd->current_dr == DR_DFP)
+			*val = DUAL_ROLE_PROP_DR_HOST;
+		else
+			*val = DUAL_ROLE_PROP_DR_NONE;
+		break;
+	default:
+		usbpd_warn(&pd->dev, "unsupported property %d\n", prop);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+static int usbpd_dr_set_property(struct dual_role_phy_instance *dual_role,
+		enum dual_role_property prop, const unsigned int *val)
+{
+	struct usbpd *pd = dual_role_get_drvdata(dual_role);
+	bool do_swap = false;
+
+	if (!pd)
+		return -ENODEV;
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+		usbpd_dbg(&pd->dev, "Setting mode to %d\n", *val);
+
+		/*
+		 * Forces disconnect on CC and re-establishes connection.
+		 * This does not use PD-based PR/DR swap
+		 */
+		if (*val == DUAL_ROLE_PROP_MODE_UFP)
+			pd->forced_pr = POWER_SUPPLY_TYPEC_PR_SINK;
+		else if (*val == DUAL_ROLE_PROP_MODE_DFP)
+			pd->forced_pr = POWER_SUPPLY_TYPEC_PR_SOURCE;
+
+		/* new mode will be applied in disconnect handler */
+		set_power_role(pd, PR_NONE);
+
+		/* wait until it takes effect */
+		while (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
+			msleep(20);
+
+		break;
+
+	case DUAL_ROLE_PROP_DR:
+		usbpd_dbg(&pd->dev, "Setting data_role to %d\n", *val);
+
+		if (*val == DUAL_ROLE_PROP_DR_HOST) {
+			if (pd->current_dr == DR_UFP)
+				do_swap = true;
+		} else if (*val == DUAL_ROLE_PROP_DR_DEVICE) {
+			if (pd->current_dr == DR_DFP)
+				do_swap = true;
+		} else {
+			usbpd_warn(&pd->dev, "setting data_role to 'none' unsupported\n");
+			return -ENOTSUPP;
+		}
+
+		if (do_swap) {
+			if (pd->current_state != PE_SRC_READY &&
+					pd->current_state != PE_SNK_READY) {
+				usbpd_err(&pd->dev, "data_role swap not allowed: PD not in Ready state\n");
+				return -EAGAIN;
+			}
+
+			if (pd->current_state == PE_SNK_READY &&
+					!is_sink_tx_ok(pd)) {
+				usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
+				return -EAGAIN;
+			}
+
+			reinit_completion(&pd->swap_complete);
+			pd->send_dr_swap = true;
+			kick_sm(pd, 0);
+
+			/* wait for operation to complete */
+			if (!wait_for_completion_timeout(&pd->swap_complete,
+					msecs_to_jiffies(100))) {
+				usbpd_err(&pd->dev, "data_role swap timed out\n");
+				return -ETIMEDOUT;
+			}
+
+			if ((*val == DUAL_ROLE_PROP_DR_HOST &&
+					pd->current_dr != DR_DFP) ||
+				(*val == DUAL_ROLE_PROP_DR_DEVICE &&
+					 pd->current_dr != DR_UFP)) {
+				usbpd_err(&pd->dev, "incorrect state (%s) after data_role swap\n",
+						pd->current_dr == DR_DFP ?
+						"dfp" : "ufp");
+				return -EPROTO;
+			}
+		}
+
+		break;
+
+	case DUAL_ROLE_PROP_PR:
+		usbpd_dbg(&pd->dev, "Setting power_role to %d\n", *val);
+
+		if (*val == DUAL_ROLE_PROP_PR_SRC) {
+			if (pd->current_pr == PR_SINK)
+				do_swap = true;
+		} else if (*val == DUAL_ROLE_PROP_PR_SNK) {
+			if (pd->current_pr == PR_SRC)
+				do_swap = true;
+		} else {
+			usbpd_warn(&pd->dev, "setting power_role to 'none' unsupported\n");
+			return -ENOTSUPP;
+		}
+
+		if (do_swap) {
+			if (pd->current_state != PE_SRC_READY &&
+					pd->current_state != PE_SNK_READY) {
+				usbpd_err(&pd->dev, "power_role swap not allowed: PD not in Ready state\n");
+				return -EAGAIN;
+			}
+
+			if (pd->current_state == PE_SNK_READY &&
+					!is_sink_tx_ok(pd)) {
+				usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
+				return -EAGAIN;
+			}
+
+			reinit_completion(&pd->swap_complete);
+			pd->send_pr_swap = true;
+			kick_sm(pd, 0);
+
+			/* wait for operation to complete */
+			if (!wait_for_completion_timeout(&pd->swap_complete,
+					msecs_to_jiffies(2000))) {
+				usbpd_err(&pd->dev, "power_role swap timed out\n");
+				return -ETIMEDOUT;
+			}
+
+			if ((*val == DUAL_ROLE_PROP_PR_SRC &&
+					pd->current_pr != PR_SRC) ||
+				(*val == DUAL_ROLE_PROP_PR_SNK &&
+					 pd->current_pr != PR_SINK)) {
+				usbpd_err(&pd->dev, "incorrect state (%s) after power_role swap\n",
+						pd->current_pr == PR_SRC ?
+						"source" : "sink");
+				return -EPROTO;
+			}
+		}
+		break;
+
+	default:
+		usbpd_warn(&pd->dev, "unsupported property %d\n", prop);
+		return -ENOTSUPP;
+	}
+
+	return 0;
+}
+
+static int usbpd_dr_prop_writeable(struct dual_role_phy_instance *dual_role,
+		enum dual_role_property prop)
+{
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+	case DUAL_ROLE_PROP_DR:
+	case DUAL_ROLE_PROP_PR:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int i;
+
+	add_uevent_var(env, "DATA_ROLE=%s", pd->current_dr == DR_DFP ?
+			"dfp" : "ufp");
+
+	if (pd->current_pr == PR_SINK) {
+		add_uevent_var(env, "POWER_ROLE=sink");
+		add_uevent_var(env, "SRC_CAP_ID=%d", pd->src_cap_id);
+
+		for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++)
+			add_uevent_var(env, "PDO%d=%08x", i,
+					pd->received_pdos[i]);
+
+		add_uevent_var(env, "REQUESTED_PDO=%d", pd->requested_pdo);
+		add_uevent_var(env, "SELECTED_PDO=%d", pd->selected_pdo);
+	} else {
+		add_uevent_var(env, "POWER_ROLE=source");
+		for (i = 0; i < ARRAY_SIZE(default_src_caps); i++)
+			add_uevent_var(env, "PDO%d=%08x", i,
+					default_src_caps[i]);
+	}
+
+	add_uevent_var(env, "RDO=%08x", pd->rdo);
+	add_uevent_var(env, "CONTRACT=%s", pd->in_explicit_contract ?
+				"explicit" : "implicit");
+	add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
+
+	return 0;
+}
+
+static ssize_t contract_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			pd->in_explicit_contract ?  "explicit" : "implicit");
+}
+static DEVICE_ATTR_RO(contract);
+
+static ssize_t current_pr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *pr = "none";
+
+	if (pd->current_pr == PR_SINK)
+		pr = "sink";
+	else if (pd->current_pr == PR_SRC)
+		pr = "source";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", pr);
+}
+static DEVICE_ATTR_RO(current_pr);
+
+static ssize_t initial_pr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *pr = "none";
+
+	if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+		pr = "sink";
+	else if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SINK)
+		pr = "source";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", pr);
+}
+static DEVICE_ATTR_RO(initial_pr);
+
+static ssize_t current_dr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *dr = "none";
+
+	if (pd->current_dr == DR_UFP)
+		dr = "ufp";
+	else if (pd->current_dr == DR_DFP)
+		dr = "dfp";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", dr);
+}
+static DEVICE_ATTR_RO(current_dr);
+
+static ssize_t initial_dr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *dr = "none";
+
+	if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+		dr = "ufp";
+	else if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SINK)
+		dr = "dfp";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", dr);
+}
+static DEVICE_ATTR_RO(initial_dr);
+
+static ssize_t src_cap_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", pd->src_cap_id);
+}
+static DEVICE_ATTR_RO(src_cap_id);
+
+/* Dump received source PDOs in human-readable format */
+static ssize_t pdo_h_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int i;
+	ssize_t cnt = 0;
+
+	for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++) {
+		u32 pdo = pd->received_pdos[i];
+
+		if (pdo == 0)
+			break;
+
+		cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt, "PDO %d\n", i + 1);
+
+		if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_FIXED) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tFixed supply\n"
+					"\tDual-Role Power:%d\n"
+					"\tUSB Suspend Supported:%d\n"
+					"\tExternally Powered:%d\n"
+					"\tUSB Communications Capable:%d\n"
+					"\tData Role Swap:%d\n"
+					"\tPeak Current:%d\n"
+					"\tVoltage:%d (mV)\n"
+					"\tMax Current:%d (mA)\n",
+					PD_SRC_PDO_FIXED_PR_SWAP(pdo),
+					PD_SRC_PDO_FIXED_USB_SUSP(pdo),
+					PD_SRC_PDO_FIXED_EXT_POWERED(pdo),
+					PD_SRC_PDO_FIXED_USB_COMM(pdo),
+					PD_SRC_PDO_FIXED_DR_SWAP(pdo),
+					PD_SRC_PDO_FIXED_PEAK_CURR(pdo),
+					PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50,
+					PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10);
+		} else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_BATTERY) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tBattery supply\n"
+					"\tMax Voltage:%d (mV)\n"
+					"\tMin Voltage:%d (mV)\n"
+					"\tMax Power:%d (mW)\n",
+					PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MAX(pdo) * 250);
+		} else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_VARIABLE) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tVariable supply\n"
+					"\tMax Voltage:%d (mV)\n"
+					"\tMin Voltage:%d (mV)\n"
+					"\tMax Current:%d (mA)\n",
+					PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MAX(pdo) * 10);
+		} else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_AUGMENTED) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tProgrammable Power supply\n"
+					"\tMax Voltage:%d (mV)\n"
+					"\tMin Voltage:%d (mV)\n"
+					"\tMax Current:%d (mA)\n",
+					PD_APDO_MAX_VOLT(pdo) * 100,
+					PD_APDO_MIN_VOLT(pdo) * 100,
+					PD_APDO_MAX_CURR(pdo) * 50);
+		} else {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"Invalid PDO\n");
+		}
+
+		buf[cnt++] = '\n';
+	}
+
+	return cnt;
+}
+static DEVICE_ATTR_RO(pdo_h);
+
+static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
+		char *buf);
+
+#define PDO_ATTR(n) {					\
+	.attr	= { .name = __stringify(pdo##n), .mode = 0444 },	\
+	.show	= pdo_n_show,				\
+}
+static struct device_attribute dev_attr_pdos[] = {
+	PDO_ATTR(1),
+	PDO_ATTR(2),
+	PDO_ATTR(3),
+	PDO_ATTR(4),
+	PDO_ATTR(5),
+	PDO_ATTR(6),
+	PDO_ATTR(7),
+};
+
+static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev_attr_pdos); i++)
+		if (attr == &dev_attr_pdos[i])
+			/* dump the PDO as a hex string */
+			return snprintf(buf, PAGE_SIZE, "%08x\n",
+					pd->received_pdos[i]);
+
+	usbpd_err(&pd->dev, "Invalid PDO index\n");
+	return -EINVAL;
+}
+
+static ssize_t select_pdo_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int src_cap_id;
+	int pdo, uv = 0, ua = 0;
+	int ret;
+
+	/* Only allowed if we are already in explicit sink contract */
+	if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+		usbpd_err(&pd->dev, "select_pdo: Cannot select new PDO yet\n");
+		return -EBUSY;
+	}
+
+	ret = sscanf(buf, "%d %d %d %d", &src_cap_id, &pdo, &uv, &ua);
+	if (ret != 2 && ret != 4) {
+		usbpd_err(&pd->dev, "select_pdo: Must specify <src cap id> <PDO> [<uV> <uA>]\n");
+		return -EINVAL;
+	}
+
+	if (src_cap_id != pd->src_cap_id) {
+		usbpd_err(&pd->dev, "select_pdo: src_cap_id mismatch.  Requested:%d, current:%d\n",
+				src_cap_id, pd->src_cap_id);
+		return -EINVAL;
+	}
+
+	if (pdo < 1 || pdo > 7) {
+		usbpd_err(&pd->dev, "select_pdo: invalid PDO:%d\n", pdo);
+		return -EINVAL;
+	}
+
+	ret = pd_select_pdo(pd, pdo, uv, ua);
+	if (ret)
+		return ret;
+
+	usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
+
+	return size;
+}
+
+static ssize_t select_pdo_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", pd->selected_pdo);
+}
+static DEVICE_ATTR_RW(select_pdo);
+
+static ssize_t rdo_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	/* dump the RDO as a hex string */
+	return snprintf(buf, PAGE_SIZE, "%08x\n", pd->rdo);
+}
+static DEVICE_ATTR_RO(rdo);
+
+static ssize_t rdo_h_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int pos = PD_RDO_OBJ_POS(pd->rdo);
+	int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos]);
+	int len;
+
+	len = scnprintf(buf, PAGE_SIZE, "Request Data Object\n"
+			"\tObj Pos:%d\n"
+			"\tGiveback:%d\n"
+			"\tCapability Mismatch:%d\n"
+			"\tUSB Communications Capable:%d\n"
+			"\tNo USB Suspend:%d\n",
+			PD_RDO_OBJ_POS(pd->rdo),
+			PD_RDO_GIVEBACK(pd->rdo),
+			PD_RDO_MISMATCH(pd->rdo),
+			PD_RDO_USB_COMM(pd->rdo),
+			PD_RDO_NO_USB_SUSP(pd->rdo));
+
+	switch (type) {
+	case PD_SRC_PDO_TYPE_FIXED:
+	case PD_SRC_PDO_TYPE_VARIABLE:
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"(Fixed/Variable)\n"
+				"\tOperating Current:%d (mA)\n"
+				"\t%s Current:%d (mA)\n",
+				PD_RDO_FIXED_CURR(pd->rdo) * 10,
+				PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+				PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 10);
+		break;
+
+	case PD_SRC_PDO_TYPE_BATTERY:
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"(Battery)\n"
+				"\tOperating Power:%d (mW)\n"
+				"\t%s Power:%d (mW)\n",
+				PD_RDO_FIXED_CURR(pd->rdo) * 250,
+				PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+				PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 250);
+		break;
+
+	case PD_SRC_PDO_TYPE_AUGMENTED:
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"(Programmable)\n"
+				"\tOutput Voltage:%d (mV)\n"
+				"\tOperating Current:%d (mA)\n",
+				PD_RDO_PROG_VOLTAGE(pd->rdo) * 20,
+				PD_RDO_PROG_CURR(pd->rdo) * 50);
+		break;
+	}
+
+	return len;
+}
+static DEVICE_ATTR_RO(rdo_h);
+
+static ssize_t hard_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int val = 0;
+
+	if (sscanf(buf, "%d\n", &val) != 1)
+		return -EINVAL;
+
+	if (val)
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+
+	return size;
+}
+static DEVICE_ATTR_WO(hard_reset);
+
+static struct attribute *usbpd_attrs[] = {
+	&dev_attr_contract.attr,
+	&dev_attr_initial_pr.attr,
+	&dev_attr_current_pr.attr,
+	&dev_attr_initial_dr.attr,
+	&dev_attr_current_dr.attr,
+	&dev_attr_src_cap_id.attr,
+	&dev_attr_pdo_h.attr,
+	&dev_attr_pdos[0].attr,
+	&dev_attr_pdos[1].attr,
+	&dev_attr_pdos[2].attr,
+	&dev_attr_pdos[3].attr,
+	&dev_attr_pdos[4].attr,
+	&dev_attr_pdos[5].attr,
+	&dev_attr_pdos[6].attr,
+	&dev_attr_select_pdo.attr,
+	&dev_attr_rdo.attr,
+	&dev_attr_rdo_h.attr,
+	&dev_attr_hard_reset.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(usbpd);
+
+static struct class usbpd_class = {
+	.name = "usbpd",
+	.owner = THIS_MODULE,
+	.dev_uevent = usbpd_uevent,
+	.dev_groups = usbpd_groups,
+};
+
+static int match_usbpd_device(struct device *dev, const void *data)
+{
+	return dev->parent == data;
+}
+
+static void devm_usbpd_put(struct device *dev, void *res)
+{
+	struct usbpd **ppd = res;
+
+	put_device(&(*ppd)->dev);
+}
+
+struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle)
+{
+	struct usbpd **ptr, *pd = NULL;
+	struct device_node *pd_np;
+	struct platform_device *pdev;
+	struct device *pd_dev;
+
+	if (!usbpd_class.p) /* usbpd_init() not yet called */
+		return ERR_PTR(-EAGAIN);
+
+	if (!dev->of_node)
+		return ERR_PTR(-EINVAL);
+
+	pd_np = of_parse_phandle(dev->of_node, phandle, 0);
+	if (!pd_np)
+		return ERR_PTR(-ENXIO);
+
+	pdev = of_find_device_by_node(pd_np);
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pd_dev = class_find_device(&usbpd_class, NULL, &pdev->dev,
+			match_usbpd_device);
+	if (!pd_dev) {
+		platform_device_put(pdev);
+		/* device was found but maybe hadn't probed yet, so defer */
+		return ERR_PTR(-EPROBE_DEFER);
+	}
+
+	ptr = devres_alloc(devm_usbpd_put, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr) {
+		put_device(pd_dev);
+		platform_device_put(pdev);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pd = dev_get_drvdata(pd_dev);
+	if (!pd)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	*ptr = pd;
+	devres_add(dev, ptr);
+
+	return pd;
+}
+EXPORT_SYMBOL(devm_usbpd_get_by_phandle);
+
+static int num_pd_instances;
+
+/**
+ * usbpd_create - Create a new instance of USB PD protocol/policy engine
+ * @parent - parent device to associate with
+ *
+ * This creates a new usbpd class device which manages the state of a
+ * USB PD-capable port. The parent device that is passed in should be
+ * associated with the physical device port, e.g. a PD PHY.
+ *
+ * Return: struct usbpd pointer, or an ERR_PTR value
+ */
+struct usbpd *usbpd_create(struct device *parent)
+{
+	int ret;
+	struct usbpd *pd;
+
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return ERR_PTR(-ENOMEM);
+
+	device_initialize(&pd->dev);
+	pd->dev.class = &usbpd_class;
+	pd->dev.parent = parent;
+	dev_set_drvdata(&pd->dev, pd);
+
+	ret = dev_set_name(&pd->dev, "usbpd%d", num_pd_instances++);
+	if (ret)
+		goto free_pd;
+
+	ret = device_init_wakeup(&pd->dev, true);
+	if (ret)
+		goto free_pd;
+
+	ret = device_add(&pd->dev);
+	if (ret)
+		goto free_pd;
+
+	pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE);
+	if (!pd->wq) {
+		ret = -ENOMEM;
+		goto del_pd;
+	}
+	INIT_WORK(&pd->sm_work, usbpd_sm);
+	hrtimer_init(&pd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	pd->timer.function = pd_timeout;
+
+	pd->usb_psy = power_supply_get_by_name("usb");
+	if (!pd->usb_psy) {
+		usbpd_dbg(&pd->dev, "Could not get USB power_supply, deferring probe\n");
+		ret = -EPROBE_DEFER;
+		goto destroy_wq;
+	}
+
+	/*
+	 * associate extcon with the parent dev as it could have a DT
+	 * node which will be useful for extcon_get_edev_by_phandle()
+	 */
+	pd->extcon = devm_extcon_dev_allocate(parent, usbpd_extcon_cable);
+	if (IS_ERR(pd->extcon)) {
+		usbpd_err(&pd->dev, "failed to allocate extcon device\n");
+		ret = PTR_ERR(pd->extcon);
+		goto put_psy;
+	}
+
+	pd->extcon->mutually_exclusive = usbpd_extcon_exclusive;
+	ret = devm_extcon_dev_register(parent, pd->extcon);
+	if (ret) {
+		usbpd_err(&pd->dev, "failed to register extcon device\n");
+		goto put_psy;
+	}
+
+	pd->vbus = devm_regulator_get(parent, "vbus");
+	if (IS_ERR(pd->vbus)) {
+		ret = PTR_ERR(pd->vbus);
+		goto put_psy;
+	}
+
+	pd->vconn = devm_regulator_get(parent, "vconn");
+	if (IS_ERR(pd->vconn)) {
+		ret = PTR_ERR(pd->vconn);
+		goto put_psy;
+	}
+
+	pd->vconn_is_external = device_property_present(parent,
+					"qcom,vconn-uses-external-source");
+
+	/*
+	 * Register the Android dual-role class (/sys/class/dual_role_usb/).
+	 * The first instance should be named "otg_default" as that's what
+	 * Android expects.
+	 * Note this is different than the /sys/class/usbpd/ created above.
+	 */
+	pd->dr_desc.name = (num_pd_instances == 1) ?
+				"otg_default" : dev_name(&pd->dev);
+	pd->dr_desc.supported_modes = DUAL_ROLE_SUPPORTED_MODES_DFP_AND_UFP;
+	pd->dr_desc.properties = usbpd_dr_properties;
+	pd->dr_desc.num_properties = ARRAY_SIZE(usbpd_dr_properties);
+	pd->dr_desc.get_property = usbpd_dr_get_property;
+	pd->dr_desc.set_property = usbpd_dr_set_property;
+	pd->dr_desc.property_is_writeable = usbpd_dr_prop_writeable;
+
+	pd->dual_role = devm_dual_role_instance_register(&pd->dev,
+			&pd->dr_desc);
+	if (IS_ERR(pd->dual_role)) {
+		usbpd_err(&pd->dev, "could not register dual_role instance\n");
+		goto put_psy;
+	} else {
+		pd->dual_role->drv_data = pd;
+	}
+
+	pd->current_pr = PR_NONE;
+	pd->current_dr = DR_NONE;
+	list_add_tail(&pd->instance, &_usbpd);
+
+	spin_lock_init(&pd->rx_lock);
+	INIT_LIST_HEAD(&pd->rx_q);
+	INIT_LIST_HEAD(&pd->svid_handlers);
+	init_completion(&pd->swap_complete);
+
+	pd->psy_nb.notifier_call = psy_changed;
+	ret = power_supply_reg_notifier(&pd->psy_nb);
+	if (ret)
+		goto del_inst;
+
+	/* force read initial power_supply values */
+	psy_changed(&pd->psy_nb, PSY_EVENT_PROP_CHANGED, pd->usb_psy);
+
+	return pd;
+
+del_inst:
+	list_del(&pd->instance);
+put_psy:
+	power_supply_put(pd->usb_psy);
+destroy_wq:
+	destroy_workqueue(pd->wq);
+del_pd:
+	device_del(&pd->dev);
+free_pd:
+	num_pd_instances--;
+	kfree(pd);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(usbpd_create);
+
+/**
+ * usbpd_destroy - Removes and frees a usbpd instance
+ * @pd: the instance to destroy
+ */
+void usbpd_destroy(struct usbpd *pd)
+{
+	if (!pd)
+		return;
+
+	list_del(&pd->instance);
+	power_supply_unreg_notifier(&pd->psy_nb);
+	power_supply_put(pd->usb_psy);
+	destroy_workqueue(pd->wq);
+	device_del(&pd->dev);
+	kfree(pd);
+}
+EXPORT_SYMBOL(usbpd_destroy);
+
+static int __init usbpd_init(void)
+{
+	usbpd_ipc_log = ipc_log_context_create(NUM_LOG_PAGES, "usb_pd", 0);
+	return class_register(&usbpd_class);
+}
+module_init(usbpd_init);
+
+static void __exit usbpd_exit(void)
+{
+	class_unregister(&usbpd_class);
+}
+module_exit(usbpd_exit);
+
+MODULE_DESCRIPTION("USB Power Delivery Policy Engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
new file mode 100644
index 0000000..4caee72
--- /dev/null
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -0,0 +1,903 @@
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include "usbpd.h"
+
+#define USB_PDPHY_MAX_DATA_OBJ_LEN	28
+#define USB_PDPHY_MSG_HDR_LEN		2
+
+/* PD PHY register offsets and bit fields */
+#define USB_PDPHY_MSG_CONFIG		0x40
+#define MSG_CONFIG_PORT_DATA_ROLE	BIT(3)
+#define MSG_CONFIG_PORT_POWER_ROLE	BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK	(BIT(1) | BIT(0))
+
+#define USB_PDPHY_EN_CONTROL		0x46
+#define CONTROL_ENABLE			BIT(0)
+
+#define USB_PDPHY_RX_STATUS		0x4A
+#define RX_FRAME_TYPE			(BIT(0) | BIT(1) | BIT(2))
+
+#define USB_PDPHY_FRAME_FILTER		0x4C
+#define FRAME_FILTER_EN_HARD_RESET	BIT(5)
+#define FRAME_FILTER_EN_SOP		BIT(0)
+
+#define USB_PDPHY_TX_SIZE		0x42
+#define TX_SIZE_MASK			0xF
+
+#define USB_PDPHY_TX_CONTROL		0x44
+#define TX_CONTROL_RETRY_COUNT		(BIT(6) | BIT(5))
+#define TX_CONTROL_FRAME_TYPE		(BIT(4) | BIT(3) | BIT(2))
+#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
+#define TX_CONTROL_SEND_SIGNAL		BIT(1)
+#define TX_CONTROL_SEND_MSG		BIT(0)
+
+#define USB_PDPHY_RX_SIZE		0x48
+
+#define USB_PDPHY_RX_ACKNOWLEDGE	0x4B
+#define RX_BUFFER_TOKEN			BIT(0)
+
+#define USB_PDPHY_BIST_MODE		0x4E
+#define BIST_MODE_MASK			0xF
+#define BIST_ENABLE			BIT(7)
+#define PD_MSG_BIST			0x3
+#define PD_BIST_TEST_DATA_MODE		0x8
+
+#define USB_PDPHY_TX_BUFFER_HDR		0x60
+#define USB_PDPHY_TX_BUFFER_DATA	0x62
+
+#define USB_PDPHY_RX_BUFFER		0x80
+
+#define USB_PDPHY_SEC_ACCESS		0xD0
+#define USB_PDPHY_TRIM_3		0xF3
+
+/* VDD regulator */
+#define VDD_PDPHY_VOL_MIN		3088000 /* uV */
+#define VDD_PDPHY_VOL_MAX		3088000 /* uV */
+#define VDD_PDPHY_HPM_LOAD		3000 /* uA */
+
+struct usb_pdphy {
+	struct device *dev;
+	struct regmap *regmap;
+
+	u16 base;
+	struct regulator *vdd_pdphy;
+
+	/* irqs */
+	int sig_tx_irq;
+	int sig_rx_irq;
+	int msg_tx_irq;
+	int msg_rx_irq;
+	int msg_tx_failed_irq;
+	int msg_tx_discarded_irq;
+	int msg_rx_discarded_irq;
+
+	void (*signal_cb)(struct usbpd *pd, enum pd_sig_type type);
+	void (*msg_rx_cb)(struct usbpd *pd, enum pd_msg_type type,
+			  u8 *buf, size_t len);
+	void (*shutdown_cb)(struct usbpd *pd);
+
+	/* write waitq */
+	wait_queue_head_t tx_waitq;
+
+	bool is_opened;
+	int tx_status;
+	u8 frame_filter_val;
+	bool in_test_data_mode;
+
+	enum data_role data_role;
+	enum power_role power_role;
+
+	struct usbpd *usbpd;
+
+	/* debug */
+	struct dentry *debug_root;
+	unsigned int tx_bytes; /* hdr + data */
+	unsigned int rx_bytes; /* hdr + data */
+	unsigned int sig_tx_cnt;
+	unsigned int sig_rx_cnt;
+	unsigned int msg_tx_cnt;
+	unsigned int msg_rx_cnt;
+	unsigned int msg_tx_failed_cnt;
+	unsigned int msg_tx_discarded_cnt;
+	unsigned int msg_rx_discarded_cnt;
+};
+
+static struct usb_pdphy *__pdphy;
+
+static int pdphy_dbg_status(struct seq_file *s, void *p)
+{
+	struct usb_pdphy *pdphy = s->private;
+
+	seq_printf(s,
+		"PD Phy driver status\n"
+		"==================================================\n");
+	seq_printf(s, "opened:         %10d\n", pdphy->is_opened);
+	seq_printf(s, "tx status:      %10d\n", pdphy->tx_status);
+	seq_printf(s, "tx bytes:       %10u\n", pdphy->tx_bytes);
+	seq_printf(s, "rx bytes:       %10u\n", pdphy->rx_bytes);
+	seq_printf(s, "data role:      %10u\n", pdphy->data_role);
+	seq_printf(s, "power role:     %10u\n", pdphy->power_role);
+	seq_printf(s, "frame filter:   %10u\n", pdphy->frame_filter_val);
+	seq_printf(s, "sig tx cnt:     %10u\n", pdphy->sig_tx_cnt);
+	seq_printf(s, "sig rx cnt:     %10u\n", pdphy->sig_rx_cnt);
+	seq_printf(s, "msg tx cnt:     %10u\n", pdphy->msg_tx_cnt);
+	seq_printf(s, "msg rx cnt:     %10u\n", pdphy->msg_rx_cnt);
+	seq_printf(s, "msg tx failed cnt:    %10u\n",
+			pdphy->msg_tx_failed_cnt);
+	seq_printf(s, "msg tx discarded cnt: %10u\n",
+			pdphy->msg_tx_discarded_cnt);
+	seq_printf(s, "msg rx discarded cnt: %10u\n",
+			pdphy->msg_rx_discarded_cnt);
+
+	return 0;
+}
+
+static int pdphy_dbg_status_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pdphy_dbg_status, inode->i_private);
+}
+
+static const struct file_operations status_ops = {
+	.owner		= THIS_MODULE,
+	.open		= pdphy_dbg_status_open,
+	.llseek		= seq_lseek,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+static void pdphy_create_debugfs_entries(struct usb_pdphy *pdphy)
+{
+	struct dentry *ent;
+
+	pdphy->debug_root = debugfs_create_dir("usb-pdphy", NULL);
+	if (!pdphy->debug_root) {
+		dev_warn(pdphy->dev, "Couldn't create debug dir\n");
+		return;
+	}
+
+	ent = debugfs_create_file("status", 0400, pdphy->debug_root, pdphy,
+				  &status_ops);
+	if (!ent) {
+		dev_warn(pdphy->dev, "Couldn't create status file\n");
+		debugfs_remove(pdphy->debug_root);
+	}
+}
+
+static int pdphy_enable_power(struct usb_pdphy *pdphy, bool on)
+{
+	int ret = 0;
+
+	dev_dbg(pdphy->dev, "%s turn %s regulator.\n", __func__,
+		on ? "on" : "off");
+
+	if (!on)
+		goto disable_pdphy_vdd;
+
+	ret = regulator_set_load(pdphy->vdd_pdphy, VDD_PDPHY_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(pdphy->dev, "Unable to set HPM of vdd_pdphy:%d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(pdphy->vdd_pdphy, VDD_PDPHY_VOL_MIN,
+						VDD_PDPHY_VOL_MAX);
+	if (ret) {
+		dev_err(pdphy->dev,
+				"set voltage failed for vdd_pdphy:%d\n", ret);
+		goto put_pdphy_vdd_lpm;
+	}
+
+	ret = regulator_enable(pdphy->vdd_pdphy);
+	if (ret) {
+		dev_err(pdphy->dev, "Unable to enable vdd_pdphy:%d\n", ret);
+		goto unset_pdphy_vdd;
+	}
+
+	dev_dbg(pdphy->dev, "%s: PD PHY regulator turned ON.\n", __func__);
+	return ret;
+
+disable_pdphy_vdd:
+	ret = regulator_disable(pdphy->vdd_pdphy);
+	if (ret)
+		dev_err(pdphy->dev, "Unable to disable vdd_pdphy:%d\n", ret);
+
+unset_pdphy_vdd:
+	ret = regulator_set_voltage(pdphy->vdd_pdphy, 0, VDD_PDPHY_VOL_MAX);
+	if (ret)
+		dev_err(pdphy->dev,
+			"Unable to set (0) voltage for vdd_pdphy:%d\n", ret);
+
+put_pdphy_vdd_lpm:
+	ret = regulator_set_load(pdphy->vdd_pdphy, 0);
+	if (ret < 0)
+		dev_err(pdphy->dev, "Unable to set (0) HPM of vdd_pdphy\n");
+
+	return ret;
+}
+
+void pdphy_enable_irq(struct usb_pdphy *pdphy, bool enable)
+{
+	if (enable) {
+		enable_irq(pdphy->sig_tx_irq);
+		enable_irq(pdphy->sig_rx_irq);
+		enable_irq_wake(pdphy->sig_rx_irq);
+		enable_irq(pdphy->msg_tx_irq);
+		if (!pdphy->in_test_data_mode) {
+			enable_irq(pdphy->msg_rx_irq);
+			enable_irq_wake(pdphy->msg_rx_irq);
+		}
+		enable_irq(pdphy->msg_tx_failed_irq);
+		enable_irq(pdphy->msg_tx_discarded_irq);
+		enable_irq(pdphy->msg_rx_discarded_irq);
+		return;
+	}
+
+	disable_irq(pdphy->sig_tx_irq);
+	disable_irq(pdphy->sig_rx_irq);
+	disable_irq_wake(pdphy->sig_rx_irq);
+	disable_irq(pdphy->msg_tx_irq);
+	if (!pdphy->in_test_data_mode) {
+		disable_irq(pdphy->msg_rx_irq);
+		disable_irq_wake(pdphy->msg_rx_irq);
+	}
+	disable_irq(pdphy->msg_tx_failed_irq);
+	disable_irq(pdphy->msg_tx_discarded_irq);
+	disable_irq(pdphy->msg_rx_discarded_irq);
+}
+
+static int pdphy_reg_read(struct usb_pdphy *pdphy, u8 *val, u16 addr, int count)
+{
+	int ret;
+
+	ret = regmap_bulk_read(pdphy->regmap, pdphy->base + addr, val, count);
+	if (ret) {
+		dev_err(pdphy->dev, "read failed: addr=0x%04x, ret=%d\n",
+			pdphy->base + addr, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Write multiple registers to device with block of data */
+static int pdphy_bulk_reg_write(struct usb_pdphy *pdphy, u16 addr,
+	const void *val, u8 val_cnt)
+{
+	int ret;
+
+	ret = regmap_bulk_write(pdphy->regmap, pdphy->base + addr,
+			val, val_cnt);
+	if (ret) {
+		dev_err(pdphy->dev, "bulk write failed: addr=0x%04x, ret=%d\n",
+				pdphy->base + addr, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Writes a single byte to the specified register */
+static inline int pdphy_reg_write(struct usb_pdphy *pdphy, u16 addr, u8 val)
+{
+	return pdphy_bulk_reg_write(pdphy, addr, &val, 1);
+}
+
+/* Writes to the specified register limited by the bit mask */
+static int pdphy_masked_write(struct usb_pdphy *pdphy, u16 addr,
+	u8 mask, u8 val)
+{
+	int ret;
+
+	ret = regmap_update_bits(pdphy->regmap, pdphy->base + addr, mask, val);
+	if (ret) {
+		dev_err(pdphy->dev, "write failed: addr=0x%04x, ret=%d\n",
+				pdphy->base + addr, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int pd_phy_update_roles(enum data_role dr, enum power_role pr)
+{
+	struct usb_pdphy *pdphy = __pdphy;
+
+	return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+		(MSG_CONFIG_PORT_DATA_ROLE | MSG_CONFIG_PORT_POWER_ROLE),
+		((dr == DR_DFP ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
+		 (pr == PR_SRC ? MSG_CONFIG_PORT_POWER_ROLE : 0)));
+}
+EXPORT_SYMBOL(pd_phy_update_roles);
+
+int pd_phy_update_spec_rev(enum pd_spec_rev rev)
+{
+	struct usb_pdphy *pdphy = __pdphy;
+
+	return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+			MSG_CONFIG_SPEC_REV_MASK, rev);
+}
+EXPORT_SYMBOL(pd_phy_update_spec_rev);
+
+int pd_phy_open(struct pd_phy_params *params)
+{
+	int ret;
+	struct usb_pdphy *pdphy = __pdphy;
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (pdphy->is_opened) {
+		dev_err(pdphy->dev, "%s: already opened\n", __func__);
+		return -EBUSY;
+	}
+
+	pdphy->signal_cb = params->signal_cb;
+	pdphy->msg_rx_cb = params->msg_rx_cb;
+	pdphy->shutdown_cb = params->shutdown_cb;
+	pdphy->data_role = params->data_role;
+	pdphy->power_role = params->power_role;
+	pdphy->frame_filter_val = params->frame_filter_val;
+
+	dev_dbg(pdphy->dev, "%s: DR %x PR %x frame filter val %x\n", __func__,
+		pdphy->data_role, pdphy->power_role, pdphy->frame_filter_val);
+
+	ret = pdphy_enable_power(pdphy, true);
+	if (ret)
+		return ret;
+
+	/* update data and power role to be used in GoodCRC generation */
+	ret = pd_phy_update_roles(pdphy->data_role, pdphy->power_role);
+	if (ret)
+		return ret;
+
+	ret = pd_phy_update_spec_rev(params->spec_rev);
+	if (ret)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, 0);
+	if (ret)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, CONTROL_ENABLE);
+	if (ret)
+		return ret;
+
+	/* update frame filter */
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER,
+			pdphy->frame_filter_val);
+	if (ret)
+		return ret;
+
+	/* initialize Rx buffer ownership to PDPHY HW */
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+	if (ret)
+		return ret;
+
+	pdphy->is_opened = true;
+	pdphy_enable_irq(pdphy, true);
+
+	return ret;
+}
+EXPORT_SYMBOL(pd_phy_open);
+
+int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
+{
+	u8 val;
+	int ret;
+	struct usb_pdphy *pdphy = __pdphy;
+
+	dev_dbg(pdphy->dev, "%s: type %d timeout %u\n", __func__, type,
+			timeout_ms);
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!pdphy->is_opened) {
+		dev_dbg(pdphy->dev, "%s: pdphy disabled\n", __func__);
+		return -ENODEV;
+	}
+
+	pdphy->tx_status = -EINPROGRESS;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+	if (ret)
+		return ret;
+
+	usleep_range(2, 3);
+
+	val = (type == CABLE_RESET_SIG ? TX_CONTROL_FRAME_TYPE_CABLE_RESET : 0)
+		| TX_CONTROL_SEND_SIGNAL;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
+	if (ret)
+		return ret;
+
+	ret = wait_event_interruptible_timeout(pdphy->tx_waitq,
+		pdphy->tx_status != -EINPROGRESS, msecs_to_jiffies(timeout_ms));
+	if (ret <= 0) {
+		dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret);
+		return ret ? ret : -ETIMEDOUT;
+	}
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+
+	if (pdphy->tx_status)
+		return pdphy->tx_status;
+
+	if (type == HARD_RESET_SIG)
+		/* Frame filter is reconfigured in pd_phy_open() */
+		return pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(pd_phy_signal);
+
+int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
+	enum pd_msg_type type, unsigned int timeout_ms)
+{
+	u8 val;
+	int ret;
+	size_t total_len = data_len + USB_PDPHY_MSG_HDR_LEN;
+	struct usb_pdphy *pdphy = __pdphy;
+
+	dev_dbg(pdphy->dev, "%s: hdr %x frame type %d timeout %u\n",
+			__func__, hdr, type, timeout_ms);
+
+	if (data && data_len)
+		print_hex_dump_debug("tx data obj:", DUMP_PREFIX_NONE, 32, 4,
+				data, data_len, false);
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!pdphy->is_opened) {
+		dev_dbg(pdphy->dev, "%s: pdphy disabled\n", __func__);
+		return -ENODEV;
+	}
+
+	if (data_len > USB_PDPHY_MAX_DATA_OBJ_LEN) {
+		dev_err(pdphy->dev, "%s: invalid data object len %zu\n",
+			__func__, data_len);
+		return -EINVAL;
+	}
+
+	pdphy->tx_status = -EINPROGRESS;
+
+	/* write 2 byte SOP message header */
+	ret = pdphy_bulk_reg_write(pdphy, USB_PDPHY_TX_BUFFER_HDR, (u8 *)&hdr,
+			USB_PDPHY_MSG_HDR_LEN);
+	if (ret)
+		return ret;
+
+	if (data && data_len) {
+		/* write data objects of SOP message */
+		ret = pdphy_bulk_reg_write(pdphy, USB_PDPHY_TX_BUFFER_DATA,
+				data, data_len);
+		if (ret)
+			return ret;
+	}
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_SIZE, total_len - 1);
+	if (ret)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+	if (ret)
+		return ret;
+
+	usleep_range(2, 3);
+
+	val = TX_CONTROL_RETRY_COUNT | (type << 2) | TX_CONTROL_SEND_MSG;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
+	if (ret)
+		return ret;
+
+	ret = wait_event_interruptible_timeout(pdphy->tx_waitq,
+		pdphy->tx_status != -EINPROGRESS, msecs_to_jiffies(timeout_ms));
+	if (ret <= 0) {
+		dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret);
+		return ret ? ret : -ETIMEDOUT;
+	}
+
+	if (hdr && !pdphy->tx_status)
+		pdphy->tx_bytes += data_len + USB_PDPHY_MSG_HDR_LEN;
+
+	return pdphy->tx_status ? pdphy->tx_status : data_len;
+}
+EXPORT_SYMBOL(pd_phy_write);
+
+void pd_phy_close(void)
+{
+	int ret;
+	struct usb_pdphy *pdphy = __pdphy;
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return;
+	}
+
+	if (!pdphy->is_opened) {
+		dev_err(pdphy->dev, "%s: not opened\n", __func__);
+		return;
+	}
+
+	pdphy->is_opened = false;
+	pdphy_enable_irq(pdphy, false);
+
+	pdphy->tx_status = -ESHUTDOWN;
+
+	wake_up_all(&pdphy->tx_waitq);
+
+	pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+	pdphy->in_test_data_mode = false;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+	if (ret)
+		return;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, 0);
+	if (ret)
+		return;
+
+	pdphy_enable_power(pdphy, false);
+}
+EXPORT_SYMBOL(pd_phy_close);
+
+static irqreturn_t pdphy_msg_tx_irq(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	if (irq == pdphy->msg_tx_irq) {
+		pdphy->msg_tx_cnt++;
+		pdphy->tx_status = 0;
+	} else if (irq == pdphy->msg_tx_discarded_irq) {
+		pdphy->msg_tx_discarded_cnt++;
+		pdphy->tx_status = -EBUSY;
+	} else if (irq == pdphy->msg_tx_failed_irq) {
+		pdphy->msg_tx_failed_cnt++;
+		pdphy->tx_status = -EFAULT;
+	} else {
+		dev_err(pdphy->dev, "spurious irq #%d received\n", irq);
+		return IRQ_NONE;
+	}
+
+	wake_up(&pdphy->tx_waitq);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_msg_rx_discarded_irq(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->msg_rx_discarded_cnt++;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_sig_rx_irq_thread(int irq, void *data)
+{
+	u8 rx_status, frame_type;
+	int ret;
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->sig_rx_cnt++;
+
+	ret = pdphy_reg_read(pdphy, &rx_status, USB_PDPHY_RX_STATUS, 1);
+	if (ret)
+		goto done;
+
+	frame_type = rx_status & RX_FRAME_TYPE;
+	if (frame_type != HARD_RESET_SIG) {
+		dev_err(pdphy->dev, "%s:unsupported frame type %d\n",
+			__func__, frame_type);
+		goto done;
+	}
+
+	/* Frame filter is reconfigured in pd_phy_open() */
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, 0);
+
+	if (pdphy->signal_cb)
+		pdphy->signal_cb(pdphy->usbpd, frame_type);
+
+done:
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_sig_tx_irq_thread(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	/* in case of exit from BIST Carrier Mode 2, clear BIST_MODE */
+	pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+
+	pdphy->sig_tx_cnt++;
+	pdphy->tx_status = 0;
+	wake_up(&pdphy->tx_waitq);
+
+	return IRQ_HANDLED;
+}
+
+static int pd_phy_bist_mode(u8 bist_mode)
+{
+	struct usb_pdphy *pdphy = __pdphy;
+
+	dev_dbg(pdphy->dev, "%s: enter BIST mode %d\n", __func__, bist_mode);
+
+	pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+
+	udelay(5);
+
+	return pdphy_masked_write(pdphy, USB_PDPHY_BIST_MODE,
+			BIST_MODE_MASK | BIST_ENABLE, bist_mode | BIST_ENABLE);
+}
+
+static irqreturn_t pdphy_msg_rx_irq_thread(int irq, void *data)
+{
+	u8 size, rx_status, frame_type;
+	u8 buf[32];
+	int ret;
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->msg_rx_cnt++;
+
+	ret = pdphy_reg_read(pdphy, &size, USB_PDPHY_RX_SIZE, 1);
+	if (ret)
+		goto done;
+
+	if (!size || size > 31) {
+		dev_err(pdphy->dev, "%s: invalid size %d\n", __func__, size);
+		goto done;
+	}
+
+	ret = pdphy_reg_read(pdphy, &rx_status, USB_PDPHY_RX_STATUS, 1);
+	if (ret)
+		goto done;
+
+	frame_type = rx_status & RX_FRAME_TYPE;
+	if (frame_type != SOP_MSG) {
+		dev_err(pdphy->dev, "%s:unsupported frame type %d\n",
+			__func__, frame_type);
+		goto done;
+	}
+
+	ret = pdphy_reg_read(pdphy, buf, USB_PDPHY_RX_BUFFER, size + 1);
+	if (ret)
+		goto done;
+
+	/* ack to change ownership of rx buffer back to PDPHY RX HW */
+	pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+
+	if (((buf[0] & 0xf) == PD_MSG_BIST) && size >= 5) { /* BIST */
+		u8 mode = buf[5] >> 4; /* [31:28] of 1st data object */
+
+		pd_phy_bist_mode(mode);
+		pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+
+		if (mode == PD_BIST_TEST_DATA_MODE) {
+			pdphy->in_test_data_mode = true;
+			disable_irq_nosync(irq);
+		}
+		goto done;
+	}
+
+	if (pdphy->msg_rx_cb)
+		pdphy->msg_rx_cb(pdphy->usbpd, frame_type, buf, size + 1);
+
+	print_hex_dump_debug("rx msg:", DUMP_PREFIX_NONE, 32, 4, buf, size + 1,
+		false);
+	pdphy->rx_bytes += size + 1;
+done:
+	return IRQ_HANDLED;
+}
+
+static int pdphy_request_irq(struct usb_pdphy *pdphy,
+				struct device_node *node,
+				int *irq_num, const char *irq_name,
+				irqreturn_t (irq_handler)(int irq, void *data),
+				irqreturn_t (thread_fn)(int irq, void *data),
+				int flags)
+{
+	int ret;
+
+	*irq_num = of_irq_get_byname(node, irq_name);
+	if (*irq_num < 0) {
+		dev_err(pdphy->dev, "Unable to get %s irqn", irq_name);
+		ret = -ENXIO;
+	}
+
+	irq_set_status_flags(*irq_num, IRQ_NOAUTOEN);
+	ret = devm_request_threaded_irq(pdphy->dev, *irq_num, irq_handler,
+			thread_fn, flags, irq_name, pdphy);
+	if (ret < 0) {
+		dev_err(pdphy->dev, "Unable to request %s irq: %dn",
+				irq_name, ret);
+		ret = -ENXIO;
+	}
+
+	return 0;
+}
+
+static int pdphy_probe(struct platform_device *pdev)
+{
+	int ret;
+	unsigned int base;
+	struct usb_pdphy *pdphy;
+
+	pdphy = devm_kzalloc(&pdev->dev, sizeof(*pdphy), GFP_KERNEL);
+	if (!pdphy)
+		return -ENOMEM;
+
+	pdphy->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pdphy->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	dev_set_drvdata(&pdev->dev, pdphy);
+
+	ret = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to get reg base address ret = %d\n",
+			ret);
+		return ret;
+	}
+
+	pdphy->base = base;
+	pdphy->dev = &pdev->dev;
+
+	init_waitqueue_head(&pdphy->tx_waitq);
+
+	pdphy->vdd_pdphy = devm_regulator_get(&pdev->dev, "vdd-pdphy");
+	if (IS_ERR(pdphy->vdd_pdphy)) {
+		dev_err(&pdev->dev, "unable to get vdd-pdphy\n");
+		return PTR_ERR(pdphy->vdd_pdphy);
+	}
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->sig_tx_irq, "sig-tx", NULL,
+		pdphy_sig_tx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->sig_rx_irq, "sig-rx", NULL,
+		pdphy_sig_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_tx_irq, "msg-tx", pdphy_msg_tx_irq,
+		NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_rx_irq, "msg-rx", NULL,
+		pdphy_msg_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_tx_failed_irq, "msg-tx-failed", pdphy_msg_tx_irq,
+		NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_tx_discarded_irq, "msg-tx-discarded",
+		pdphy_msg_tx_irq, NULL,
+		(IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_rx_discarded_irq, "msg-rx-discarded",
+		pdphy_msg_rx_discarded_irq, NULL,
+		(IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_SEC_ACCESS, 0xA5);
+	if (ret)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TRIM_3, 0x2);
+	if (ret)
+		return ret;
+
+	/* usbpd_create() could call back to us, so have __pdphy ready */
+	__pdphy = pdphy;
+
+	pdphy->usbpd = usbpd_create(&pdev->dev);
+	if (IS_ERR(pdphy->usbpd)) {
+		dev_err(&pdev->dev, "usbpd_create failed: %ld\n",
+				PTR_ERR(pdphy->usbpd));
+		__pdphy = NULL;
+		return PTR_ERR(pdphy->usbpd);
+	}
+
+	pdphy_create_debugfs_entries(pdphy);
+
+	return 0;
+}
+
+static int pdphy_remove(struct platform_device *pdev)
+{
+	struct usb_pdphy *pdphy = platform_get_drvdata(pdev);
+
+	debugfs_remove_recursive(pdphy->debug_root);
+	usbpd_destroy(pdphy->usbpd);
+
+	if (pdphy->is_opened)
+		pd_phy_close();
+
+	__pdphy = NULL;
+
+	return 0;
+}
+
+static void pdphy_shutdown(struct platform_device *pdev)
+{
+	struct usb_pdphy *pdphy = platform_get_drvdata(pdev);
+
+	/* let protocol engine shutdown the pdphy synchronously */
+	if (pdphy->shutdown_cb)
+		pdphy->shutdown_cb(pdphy->usbpd);
+}
+
+static const struct of_device_id pdphy_match_table[] = {
+	{
+		.compatible	 = "qcom,qpnp-pdphy",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pdphy_match_table);
+
+static struct platform_driver pdphy_driver = {
+	 .driver	 = {
+		 .name			= "qpnp-pdphy",
+		 .of_match_table	= pdphy_match_table,
+	 },
+	 .probe		= pdphy_probe,
+	 .remove	= pdphy_remove,
+	 .shutdown	= pdphy_shutdown,
+};
+
+module_platform_driver(pdphy_driver);
+
+MODULE_DESCRIPTION("QPNP PD PHY Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qpnp-pdphy");
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
new file mode 100644
index 0000000..b2663ad
--- /dev/null
+++ b/drivers/usb/pd/usbpd.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _USBPD_H
+#define _USBPD_H
+
+#include <linux/device.h>
+
+struct usbpd;
+
+#if IS_ENABLED(CONFIG_USB_PD_POLICY)
+struct usbpd *usbpd_create(struct device *parent);
+void usbpd_destroy(struct usbpd *pd);
+#else
+static inline struct usbpd *usbpd_create(struct device *parent)
+{
+	return ERR_PTR(-ENODEV);
+}
+static inline void usbpd_destroy(struct usbpd *pd) { }
+#endif
+
+enum data_role {
+	DR_NONE = -1,
+	DR_UFP = 0,
+	DR_DFP = 1,
+};
+
+enum power_role {
+	PR_NONE = -1,
+	PR_SINK = 0,
+	PR_SRC = 1,
+};
+
+enum pd_sig_type {
+	HARD_RESET_SIG = 0,
+	CABLE_RESET_SIG,
+};
+
+enum pd_msg_type {
+	SOP_MSG = 0,
+	SOPI_MSG,
+	SOPII_MSG,
+};
+
+enum pd_spec_rev {
+	USBPD_REV_20 = 1,
+	USBPD_REV_30 = 2,
+};
+
+/* enable msg and signal to be received by phy */
+#define FRAME_FILTER_EN_SOP		BIT(0)
+#define FRAME_FILTER_EN_HARD_RESET	BIT(5)
+
+struct pd_phy_params {
+	void		(*signal_cb)(struct usbpd *pd, enum pd_sig_type type);
+	void		(*msg_rx_cb)(struct usbpd *pd, enum pd_msg_type type,
+					u8 *buf, size_t len);
+	void		(*shutdown_cb)(struct usbpd *pd);
+	enum data_role	data_role;
+	enum power_role power_role;
+	u8		frame_filter_val;
+	u8		spec_rev;
+};
+
+#if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
+int pd_phy_open(struct pd_phy_params *params);
+int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms);
+int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
+	enum pd_msg_type type, unsigned int timeout_ms);
+int pd_phy_update_roles(enum data_role dr, enum power_role pr);
+int pd_phy_update_spec_rev(enum pd_spec_rev rev);
+void pd_phy_close(void);
+#else
+static inline int pd_phy_open(struct pd_phy_params *params)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
+	enum pd_msg_type type, unsigned int timeout_ms)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_update_roles(enum data_role dr, enum power_role pr)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_update_spec_rev(enum pd_spec_rev rev)
+{
+	return -ENODEV;
+}
+
+static inline void pd_phy_close(void)
+{
+}
+#endif
+#endif /* _USBPD_H */
diff --git a/include/dt-bindings/clock/qcom,camcc-skunk.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
similarity index 97%
rename from include/dt-bindings/clock/qcom,camcc-skunk.h
rename to include/dt-bindings/clock/qcom,camcc-sdm845.h
index ea54fab..dbee8901 100644
--- a/include/dt-bindings/clock/qcom,camcc-skunk.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_CAM_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_CAM_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_CAM_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_CAM_CC_SDM845_H
 
 #define CAM_CC_BPS_AHB_CLK					0
 #define CAM_CC_BPS_AREG_CLK					1
diff --git a/include/dt-bindings/clock/qcom,cpucc-skunk.h b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
similarity index 90%
rename from include/dt-bindings/clock/qcom,cpucc-skunk.h
rename to include/dt-bindings/clock/qcom,cpucc-sdm845.h
index 2332969..c1ff2a0 100644
--- a/include/dt-bindings/clock/qcom,cpucc-skunk.h
+++ b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_CPU_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 
 #define L3_CLUSTER0_VOTE_CLK					0
 #define L3_CLUSTER1_VOTE_CLK					1
diff --git a/include/dt-bindings/clock/qcom,dispcc-skunk.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
similarity index 96%
rename from include/dt-bindings/clock/qcom,dispcc-skunk.h
rename to include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 835ebcb..10530c5 100644
--- a/include/dt-bindings/clock/qcom,dispcc-skunk.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_DISP_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_DISP_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
 
 #define DISP_CC_DEBUG_CLK					0
 #define DISP_CC_MDSS_AHB_CLK					1
diff --git a/include/dt-bindings/clock/qcom,gcc-skunk.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
similarity index 98%
rename from include/dt-bindings/clock/qcom,gcc-skunk.h
rename to include/dt-bindings/clock/qcom,gcc-sdm845.h
index 7dfcffc..e409205 100644
--- a/include/dt-bindings/clock/qcom,gcc-skunk.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_GCC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_GCC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
 
 /* Hardware/Dummy/Voter clocks */
 #define GCC_XO							0
diff --git a/include/dt-bindings/clock/qcom,gpucc-skunk.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
similarity index 95%
rename from include/dt-bindings/clock/qcom,gpucc-skunk.h
rename to include/dt-bindings/clock/qcom,gpucc-sdm845.h
index 97a1014..41eb823 100644
--- a/include/dt-bindings/clock/qcom,gpucc-skunk.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_GPU_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_GPU_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
 
 #define GPU_CC_ACD_AHB_CLK					0
 #define GPU_CC_ACD_CXO_CLK					1
diff --git a/include/dt-bindings/clock/qcom,videocc-skunk.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
similarity index 92%
rename from include/dt-bindings/clock/qcom,videocc-skunk.h
rename to include/dt-bindings/clock/qcom,videocc-sdm845.h
index cf654ed..723d2e0 100644
--- a/include/dt-bindings/clock/qcom,videocc-skunk.h
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_VIDEO_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_VIDEO_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_VIDEO_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_VIDEO_CC_SDM845_H
 
 #define VIDEO_CC_APB_CLK					0
 #define VIDEO_CC_AT_CLK						1
diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h
new file mode 100644
index 0000000..ef8b519b
--- /dev/null
+++ b/include/linux/bluetooth-power.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_BLUETOOTH_POWER_H
+#define __LINUX_BLUETOOTH_POWER_H
+
+/*
+ * voltage regulator information required for configuring the
+ * bluetooth chipset
+ */
+struct bt_power_vreg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage levels to be set */
+	unsigned int low_vol_level;
+	unsigned int high_vol_level;
+	/* current level to be set */
+	unsigned int load_uA;
+	/*
+	 * is set voltage supported for this regulator?
+	 * false => set voltage is not supported
+	 * true  => set voltage is supported
+	 *
+	 * Some regulators (like gpio-regulators, LVS (low voltage swtiches)
+	 * PMIC regulators) dont have the capability to call
+	 * regulator_set_voltage or regulator_set_optimum_mode
+	 * Use this variable to indicate if its a such regulator or not
+	 */
+	bool set_voltage_sup;
+	/* is this regulator enabled? */
+	bool is_enabled;
+};
+
+struct bt_power_clk_data {
+	/* clock regulator handle */
+	struct clk *clk;
+	/* clock name */
+	const char *name;
+	/* is this clock enabled? */
+	bool is_enabled;
+};
+
+/*
+ * Platform data for the bluetooth power driver.
+ */
+struct bluetooth_power_platform_data {
+	/* Bluetooth reset gpio */
+	int bt_gpio_sys_rst;
+	struct device *slim_dev;
+	/* VDDIO voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_io;
+	/* VDD_PA voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_pa;
+	/* VDD_LDOIN voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_ldo;
+	/* VDD_XTAL voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_xtal;
+	/* VDD_CORE voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_core;
+	/* Optional: chip power down gpio-regulator
+	 * chip power down data is required when bluetooth module
+	 * and other modules like wifi co-exist in a single chip and
+	 * shares a common gpio to bring chip out of reset.
+	 */
+	struct bt_power_vreg_data *bt_chip_pwd;
+	/* bluetooth reference clock */
+	struct bt_power_clk_data *bt_chip_clk;
+	/* Optional: Bluetooth power setup function */
+	int (*bt_power_setup)(int);
+};
+
+int bt_register_slimdev(struct device *dev);
+
+#define BT_CMD_SLIM_TEST		0xbfac
+#define BT_CMD_PWR_CTRL			0xbfad
+#endif /* __LINUX_BLUETOOTH_POWER_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 3dbcd5b..31a7f91 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -830,6 +830,9 @@
 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
 			   unsigned long max_rate);
 
+unsigned long clk_aggregate_rate(struct clk_hw *hw,
+					const struct clk_core *parent);
+
 static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
 {
 	dst->clk = src->clk;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e628459..74de8b6 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -29,7 +29,7 @@
 #include <linux/workqueue.h>
 
 #define UEVENT_HELPER_PATH_LEN		256
-#define UEVENT_NUM_ENVP			32	/* number of env pointers */
+#define UEVENT_NUM_ENVP			64	/* number of env pointers */
 #define UEVENT_BUFFER_SIZE		2048	/* buffer for the variables */
 
 #ifdef CONFIG_UEVENT_HELPER
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index d2c5260..42b590f 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -164,6 +164,7 @@
 	POWER_SUPPLY_PROP_PE_START,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
 	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_FORCE_TLIM,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
new file mode 100644
index 0000000..1c13cd2
--- /dev/null
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -0,0 +1,2290 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm Technologies Inc. PMIC QPNP ADC driver header file
+ *
+ */
+
+#ifndef __QPNP_ADC_H
+#define __QPNP_ADC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/consumer.h>
+/**
+ * enum qpnp_vadc_channels - QPNP AMUX arbiter channels
+ */
+enum qpnp_vadc_channels {
+	USBIN = 0,
+	DCIN,
+	VCHG_SNS,
+	SPARE1_03,
+	USB_ID_MV,
+	VCOIN,
+	VBAT_SNS,
+	VSYS,
+	DIE_TEMP,
+	REF_625MV,
+	REF_125V,
+	CHG_TEMP,
+	SPARE1,
+	SPARE2,
+	GND_REF,
+	VDD_VADC,
+	P_MUX1_1_1,
+	P_MUX2_1_1,
+	P_MUX3_1_1,
+	P_MUX4_1_1,
+	P_MUX5_1_1,
+	P_MUX6_1_1,
+	P_MUX7_1_1,
+	P_MUX8_1_1,
+	P_MUX9_1_1,
+	P_MUX10_1_1,
+	P_MUX11_1_1,
+	P_MUX12_1_1,
+	P_MUX13_1_1,
+	P_MUX14_1_1,
+	P_MUX15_1_1,
+	P_MUX16_1_1,
+	P_MUX1_1_3,
+	P_MUX2_1_3,
+	P_MUX3_1_3,
+	P_MUX4_1_3,
+	P_MUX5_1_3,
+	P_MUX6_1_3,
+	P_MUX7_1_3,
+	P_MUX8_1_3,
+	P_MUX9_1_3,
+	P_MUX10_1_3,
+	P_MUX11_1_3,
+	P_MUX12_1_3,
+	P_MUX13_1_3,
+	P_MUX14_1_3,
+	P_MUX15_1_3,
+	P_MUX16_1_3,
+	LR_MUX1_BATT_THERM,
+	LR_MUX2_BAT_ID,
+	LR_MUX3_XO_THERM,
+	LR_MUX4_AMUX_THM1,
+	LR_MUX5_AMUX_THM2,
+	LR_MUX6_AMUX_THM3,
+	LR_MUX7_HW_ID,
+	LR_MUX8_AMUX_THM4,
+	LR_MUX9_AMUX_THM5,
+	LR_MUX10_USB_ID_LV,
+	AMUX_PU1,
+	AMUX_PU2,
+	LR_MUX3_BUF_XO_THERM_BUF,
+	LR_MUX1_PU1_BAT_THERM = 112,
+	LR_MUX2_PU1_BAT_ID = 113,
+	LR_MUX3_PU1_XO_THERM = 114,
+	LR_MUX4_PU1_AMUX_THM1 = 115,
+	LR_MUX5_PU1_AMUX_THM2 = 116,
+	LR_MUX6_PU1_AMUX_THM3 = 117,
+	LR_MUX7_PU1_AMUX_HW_ID = 118,
+	LR_MUX8_PU1_AMUX_THM4 = 119,
+	LR_MUX9_PU1_AMUX_THM5 = 120,
+	LR_MUX10_PU1_AMUX_USB_ID_LV = 121,
+	LR_MUX3_BUF_PU1_XO_THERM_BUF = 124,
+	LR_MUX1_PU2_BAT_THERM = 176,
+	LR_MUX2_PU2_BAT_ID = 177,
+	LR_MUX3_PU2_XO_THERM = 178,
+	LR_MUX4_PU2_AMUX_THM1 = 179,
+	LR_MUX5_PU2_AMUX_THM2 = 180,
+	LR_MUX6_PU2_AMUX_THM3 = 181,
+	LR_MUX7_PU2_AMUX_HW_ID = 182,
+	LR_MUX8_PU2_AMUX_THM4 = 183,
+	LR_MUX9_PU2_AMUX_THM5 = 184,
+	LR_MUX10_PU2_AMUX_USB_ID_LV = 185,
+	LR_MUX3_BUF_PU2_XO_THERM_BUF = 188,
+	LR_MUX1_PU1_PU2_BAT_THERM = 240,
+	LR_MUX2_PU1_PU2_BAT_ID = 241,
+	LR_MUX3_PU1_PU2_XO_THERM = 242,
+	LR_MUX4_PU1_PU2_AMUX_THM1 = 243,
+	LR_MUX5_PU1_PU2_AMUX_THM2 = 244,
+	LR_MUX6_PU1_PU2_AMUX_THM3 = 245,
+	LR_MUX7_PU1_PU2_AMUX_HW_ID = 246,
+	LR_MUX8_PU1_PU2_AMUX_THM4 = 247,
+	LR_MUX9_PU1_PU2_AMUX_THM5 = 248,
+	LR_MUX10_PU1_PU2_AMUX_USB_ID_LV = 249,
+	LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF = 252,
+	ALL_OFF = 255,
+	ADC_MAX_NUM = 0xffff,
+
+	/* Channel listing for refreshed VADC in hex format */
+	VADC_VREF_GND = 0,
+	VADC_CALIB_VREF_1P25 = 1,
+	VADC_CALIB_VREF = 2,
+	VADC_CALIB_VREF_1_DIV_3 = 0x82,
+	VADC_VPH_PWR = 0x83,
+	VADC_VBAT_SNS = 0x84,
+	VADC_VCOIN = 0x85,
+	VADC_DIE_TEMP = 6,
+	VADC_CHG_TEMP = 7,
+	VADC_USB_IN = 8,
+	VADC_IREG_FB = 9,
+	/* External input connection */
+	VADC_BAT_THERM = 0xa,
+	VADC_BAT_ID = 0xb,
+	VADC_XO_THERM = 0xc,
+	VADC_AMUX_THM1 = 0xd,
+	VADC_AMUX_THM2 = 0xe,
+	VADC_AMUX_THM3 = 0xf,
+	VADC_AMUX_THM4 = 0x10,
+	VADC_AMUX_THM5 = 0x11,
+	VADC_AMUX1_GPIO = 0x12,
+	VADC_AMUX2_GPIO = 0x13,
+	VADC_AMUX3_GPIO = 0x14,
+	VADC_AMUX4_GPIO = 0x15,
+	VADC_AMUX5_GPIO = 0x16,
+	VADC_AMUX6_GPIO = 0x17,
+	VADC_AMUX7_GPIO = 0x18,
+	VADC_AMUX8_GPIO = 0x19,
+	VADC_ATEST1 = 0x1a,
+	VADC_ATEST2 = 0x1b,
+	VADC_ATEST3 = 0x1c,
+	VADC_ATEST4 = 0x1d,
+	VADC_OFF = 0xff,
+	/* PU1 is 30K pull up */
+	VADC_BAT_THERM_PU1 = 0x2a,
+	VADC_BAT_ID_PU1 = 0x2b,
+	VADC_XO_THERM_PU1 = 0x2c,
+	VADC_AMUX_THM1_PU1 = 0x2d,
+	VADC_AMUX_THM2_PU1 = 0x2e,
+	VADC_AMUX_THM3_PU1 = 0x2f,
+	VADC_AMUX_THM4_PU1 = 0x30,
+	VADC_AMUX_THM5_PU1 = 0x31,
+	VADC_AMUX1_GPIO_PU1 = 0x32,
+	VADC_AMUX2_GPIO_PU1 = 0x33,
+	VADC_AMUX3_GPIO_PU1 = 0x34,
+	VADC_AMUX4_GPIO_PU1 = 0x35,
+	VADC_AMUX5_GPIO_PU1 = 0x36,
+	VADC_AMUX6_GPIO_PU1 = 0x37,
+	VADC_AMUX7_GPIO_PU1 = 0x38,
+	VADC_AMUX8_GPIO_PU1 = 0x39,
+	/* PU2 is 100K pull up */
+	VADC_BAT_THERM_PU2 = 0x4a,
+	VADC_BAT_ID_PU2 = 0x4b,
+	VADC_XO_THERM_PU2 = 0x4c,
+	VADC_AMUX_THM1_PU2 = 0x4d,
+	VADC_AMUX_THM2_PU2 = 0x4e,
+	VADC_AMUX_THM3_PU2 = 0x4f,
+	VADC_AMUX_THM4_PU2 = 0x50,
+	VADC_AMUX_THM5_PU2 = 0x51,
+	VADC_AMUX1_GPIO_PU2 = 0x52,
+	VADC_AMUX2_GPIO_PU2 = 0x53,
+	VADC_AMUX3_GPIO_PU2 = 0x54,
+	VADC_AMUX4_GPIO_PU2 = 0x55,
+	VADC_AMUX5_GPIO_PU2 = 0x56,
+	VADC_AMUX6_GPIO_PU2 = 0x57,
+	VADC_AMUX7_GPIO_PU2 = 0x58,
+	VADC_AMUX8_GPIO_PU2 = 0x59,
+	/* PU3 is 400K pull up */
+	VADC_BAT_THERM_PU3 = 0x6a,
+	VADC_BAT_ID_PU3 = 0x6b,
+	VADC_XO_THERM_PU3 = 0x6c,
+	VADC_AMUX_THM1_PU3 = 0x6d,
+	VADC_AMUX_THM2_PU3 = 0x6e,
+	VADC_AMUX_THM3_PU3 = 0x6f,
+	VADC_AMUX_THM4_PU3 = 0x70,
+	VADC_AMUX_THM5_PU3 = 0x71,
+	VADC_AMUX1_GPIO_PU3 = 0x72,
+	VADC_AMUX2_GPIO_PU3 = 0x73,
+	VADC_AMUX3_GPIO_PU3 = 0x74,
+	VADC_AMUX4_GPIO_PU3 = 0x75,
+	VADC_AMUX5_GPIO_PU3 = 0x76,
+	VADC_AMUX6_GPIO_PU3 = 0x77,
+	VADC_AMUX7_GPIO_PU3 = 0x78,
+	VADC_AMUX8_GPIO_PU3 = 0x79,
+	/* External input connection with 1/3 div */
+	VADC_AMUX1_GPIO_DIV_3 = 0x92,
+	VADC_AMUX2_GPIO_DIV_3 = 0x93,
+	VADC_AMUX3_GPIO_DIV_3 = 0x94,
+	VADC_AMUX4_GPIO_DIV_3 = 0x95,
+	VADC_AMUX5_GPIO_DIV_3 = 0x96,
+	VADC_AMUX6_GPIO_DIV_3 = 0x97,
+	VADC_AMUX7_GPIO_DIV_3 = 0x98,
+	VADC_AMUX8_GPIO_DIV_3 = 0x99,
+	VADC_ATEST1_DIV_3 = 0x9a,
+	VADC_ATEST2_DIV_3 = 0x9b,
+	VADC_ATEST3_DIV_3 = 0x9c,
+	VADC_ATEST4_DIV_3 = 0x9d,
+	VADC_REFRESH_MAX_NUM = 0xffff,
+};
+
+/**
+ * enum qpnp_iadc_channels - QPNP IADC channel list
+ */
+enum qpnp_iadc_channels {
+	INTERNAL_RSENSE = 0,
+	EXTERNAL_RSENSE,
+	ALT_LEAD_PAIR,
+	GAIN_CALIBRATION_17P857MV,
+	OFFSET_CALIBRATION_SHORT_CADC_LEADS,
+	OFFSET_CALIBRATION_CSP_CSN,
+	OFFSET_CALIBRATION_CSP2_CSN2,
+	IADC_MUX_NUM,
+};
+
+#define QPNP_ADC_625_UV	625000
+#define QPNP_ADC_HWMON_NAME_LENGTH				64
+#define QPNP_MAX_PROP_NAME_LEN					32
+#define QPNP_THERMALNODE_NAME_LENGTH                            25
+#define QPNP_ADC_1P25_UV					1250000
+
+/* Structure device for qpnp vadc */
+struct qpnp_vadc_chip;
+
+/* Structure device for qpnp iadc */
+struct qpnp_iadc_chip;
+
+/* Structure device for qpnp adc tm */
+struct qpnp_adc_tm_chip;
+
+/**
+ * enum qpnp_adc_clk_type - Clock rate supported.
+ * %CLK_TYPE1: 2P4MHZ
+ * %CLK_TYPE2: 4P8MHZ
+ * %CLK_TYPE3: 9P6MHZ
+ * %CLK_TYPE4: 19P2MHZ
+ * %CLK_NONE: Do not use this Clk type.
+ *
+ * The Clock rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_clk_type {
+	CLK_TYPE1 = 0,
+	CLK_TYPE2,
+	CLK_TYPE3,
+	CLK_TYPE4,
+	CLK_NONE,
+};
+
+/**
+ * enum qpnp_adc_decimation_type - Sampling rate supported.
+ * %DECIMATION_TYPE1: 512
+ * %DECIMATION_TYPE2: 1K
+ * %DECIMATION_TYPE3: 2K
+ * %DECIMATION_TYPE4: 4k
+ * %DECIMATION_NONE: Do not use this Sampling type.
+ *
+ * The Sampling rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_decimation_type {
+	DECIMATION_TYPE1 = 0,
+	DECIMATION_TYPE2,
+	DECIMATION_TYPE3,
+	DECIMATION_TYPE4,
+	DECIMATION_NONE = 0xff,
+
+	ADC_HC_DEC_RATIO_256 = 0,
+	ADC_HC_DEC_RATIO_512 = 1,
+	ADC_HC_DEC_RATIO_1024 = 2,
+	ADC_HC_DEC_RATIO_NONE = 0xff,
+};
+
+/**
+ * enum qpnp_adc_calib_type - QPNP ADC Calibration type.
+ * %ADC_CALIB_ABSOLUTE: Use 625mV and 1.25V reference channels.
+ * %ADC_CALIB_RATIOMETRIC: Use reference Voltage/GND.
+ * %ADC_CALIB_CONFIG_NONE: Do not use this calibration type.
+ *
+ * enum qpnp_adc_cal_sel - Selects the calibration type that is applied
+ *			   on the corresponding channel measurement after
+ *			   the ADC data is read.
+ * %ADC_HC_NO_CAL :	To obtain raw, uncalibrated data on qpnp-vadc-hc type.
+ * %ADC_HC_RATIO_CAL :	Applies ratiometric calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed. Used
+ *			only with qpnp-vadc-hc type of VADC.
+ * %ADC_HC_ABS_CAL :	Applies absolute calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed. Used
+ *			only with qpnp-vadc-hc type of VADC.
+ *
+ * Use the input reference voltage depending on the calibration type
+ * to calcluate the offset and gain parameters. The calibration is
+ * specific to each channel of the QPNP ADC.
+ */
+enum qpnp_adc_calib_type {
+	CALIB_ABSOLUTE = 0,
+	CALIB_RATIOMETRIC,
+	CALIB_NONE,
+
+	ADC_HC_NO_CAL = 0,
+	ADC_HC_RATIO_CAL = 1,
+	ADC_HC_ABS_CAL = 2,
+	ADC_HC_CAL_SEL_NONE,
+};
+
+/**
+ * enum qpnp_adc_channel_scaling_param - pre-scaling AMUX ratio.
+ * %CHAN_PATH_SCALING0: ratio of {1, 1}
+ * %CHAN_PATH_SCALING1: ratio of {1, 3}
+ * %CHAN_PATH_SCALING2: ratio of {1, 4}
+ * %CHAN_PATH_SCALING3: ratio of {1, 6}
+ * %CHAN_PATH_SCALING4: ratio of {1, 20}
+ * %CHAN_PATH_SCALING5: ratio of {1, 8}
+ * %CHAN_PATH_SCALING6: ratio of {10, 81} The actual ratio is (1/8.1).
+ * %CHAN_PATH_SCALING7: ratio of {1, 10}
+ * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
+ *
+ * The pre-scaling is applied for signals to be within the voltage range
+ * of the ADC.
+ */
+enum qpnp_adc_channel_scaling_param {
+	PATH_SCALING0 = 0,
+	PATH_SCALING1,
+	PATH_SCALING2,
+	PATH_SCALING3,
+	PATH_SCALING4,
+	PATH_SCALING5,
+	PATH_SCALING6,
+	PATH_SCALING7,
+	PATH_SCALING_NONE,
+};
+
+/**
+ * enum qpnp_adc_scale_fn_type - Scaling function for pm8941 pre calibrated
+ *				   digital data relative to ADC reference.
+ * %SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * %SCALE_BATT_THERM: Conversion to temperature(decidegC) based on btm
+ *			parameters.
+ * %SCALE_THERM_100K_PULLUP: Returns temperature in degC.
+ *				 Uses a mapping table with 100K pullup.
+ * %SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * %SCALE_XOTHERM: Returns XO thermistor voltage in degree's Centigrade.
+ * %SCALE_THERM_150K_PULLUP: Returns temperature in degC.
+ *				 Uses a mapping table with 150K pullup.
+ * %SCALE_QRD_BATT_THERM: Conversion to temperature(decidegC) based on
+ *			btm parameters.
+ * %SCALE_QRD_SKUAA_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUAA.
+ * %SCALE_SMB_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SMB.
+ * %SCALE_QRD_SKUG_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUG.
+ * %SCALE_QRD_SKUH_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUH
+ * %SCALE_QRD_SKUT1_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUT1
+ * %SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * %SCALE_NONE: Do not use this scaling type.
+ */
+enum qpnp_adc_scale_fn_type {
+	SCALE_DEFAULT = 0,
+	SCALE_BATT_THERM,
+	SCALE_THERM_100K_PULLUP,
+	SCALE_PMIC_THERM,
+	SCALE_XOTHERM,
+	SCALE_THERM_150K_PULLUP,
+	SCALE_QRD_BATT_THERM,
+	SCALE_QRD_SKUAA_BATT_THERM,
+	SCALE_SMB_BATT_THERM,
+	SCALE_QRD_SKUG_BATT_THERM,
+	SCALE_QRD_SKUH_BATT_THERM,
+	SCALE_NCP_03WF683_THERM,
+	SCALE_QRD_SKUT1_BATT_THERM,
+	SCALE_PMI_CHG_TEMP = 16,
+	SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_tm_rscale_fn_type - Scaling function used to convert the
+ *	channels input voltage/temperature to corresponding ADC code that is
+ *	applied for thresholds. Check the corresponding channels scaling to
+ *	determine the appropriate temperature/voltage units that are passed
+ *	to the scaling function. Example battery follows the power supply
+ *	framework that needs its units to be in decidegreesC so it passes
+ *	deci-degreesC. PA_THERM clients pass the temperature in degrees.
+ *	The order below should match the one in the driver for
+ *	adc_tm_rscale_fn[].
+ */
+enum qpnp_adc_tm_rscale_fn_type {
+	SCALE_R_VBATT = 0,
+	SCALE_RBATT_THERM,
+	SCALE_R_USB_ID,
+	SCALE_RPMIC_THERM,
+	SCALE_R_SMB_BATT_THERM,
+	SCALE_R_ABSOLUTE,
+	SCALE_QRD_SKUH_RBATT_THERM,
+	SCALE_QRD_SKUT1_RBATT_THERM,
+	SCALE_RSCALE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_rscale_fn_type - Scaling function used to convert the
+ *	channels input voltage/temperature to corresponding ADC code that is
+ *	applied for thresholds. Check the corresponding channels scaling to
+ *	determine the appropriate temperature/voltage units that are passed
+ *	to the scaling function. The order below should match the one in the
+ *	driver for qpnp_adc_scale_fn[].
+ */
+enum qpnp_vadc_rscale_fn_type {
+	SCALE_RVADC_ABSOLUTE = 0,
+	SCALE_RVADC_SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_fast_avg_ctl - Provides ability to obtain single result
+ *		from the ADC that is an average of multiple measurement
+ *		samples. Select number of samples for use in fast
+ *		average mode (i.e. 2 ^ value).
+ * %ADC_FAST_AVG_SAMPLE_1:   0x0 = 1
+ * %ADC_FAST_AVG_SAMPLE_2:   0x1 = 2
+ * %ADC_FAST_AVG_SAMPLE_4:   0x2 = 4
+ * %ADC_FAST_AVG_SAMPLE_8:   0x3 = 8
+ * %ADC_FAST_AVG_SAMPLE_16:  0x4 = 16
+ * %ADC_FAST_AVG_SAMPLE_32:  0x5 = 32
+ * %ADC_FAST_AVG_SAMPLE_64:  0x6 = 64
+ * %ADC_FAST_AVG_SAMPLE_128: 0x7 = 128
+ * %ADC_FAST_AVG_SAMPLE_256: 0x8 = 256
+ * %ADC_FAST_AVG_SAMPLE_512: 0x9 = 512
+ */
+enum qpnp_adc_fast_avg_ctl {
+	ADC_FAST_AVG_SAMPLE_1 = 0,
+	ADC_FAST_AVG_SAMPLE_2,
+	ADC_FAST_AVG_SAMPLE_4,
+	ADC_FAST_AVG_SAMPLE_8,
+	ADC_FAST_AVG_SAMPLE_16,
+	ADC_FAST_AVG_SAMPLE_32,
+	ADC_FAST_AVG_SAMPLE_64,
+	ADC_FAST_AVG_SAMPLE_128,
+	ADC_FAST_AVG_SAMPLE_256,
+	ADC_FAST_AVG_SAMPLE_512,
+	ADC_FAST_AVG_SAMPLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_hw_settle_time - Time between AMUX getting configured and
+ *		the ADC starting conversion. Delay = 100us * value for
+ *		value < 11 and 2ms * (value - 10) otherwise.
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_0US:   0us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_100US: 100us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_200US: 200us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_300US: 300us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_400US: 400us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_500US: 500us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_600US: 600us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_700US: 700us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_800US: 800us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_900US: 900us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_1MS:   1ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_2MS:   2ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_4MS:   4ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_6MS:   6ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_8MS:   8ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_10MS:  10ms
+ * %ADC_CHANNEL_HW_SETTLE_NONE
+ */
+enum qpnp_adc_hw_settle_time {
+	ADC_CHANNEL_HW_SETTLE_DELAY_0US = 0,
+	ADC_CHANNEL_HW_SETTLE_DELAY_100US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_2000US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_300US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_400US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_500US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_600US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_700US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_800US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_900US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_1MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_2MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_4MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_6MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_8MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_10MS,
+	ADC_CHANNEL_HW_SETTLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_dec_ratio_sel - Selects the decimation ratio of the ADC.
+ *				 Support values are 256, 512 and 1024.
+ */
+enum qpnp_vadc_dec_ratio_sel {
+	ADC_DEC_RATIO_256 = 0,
+	ADC_DEC_RATIO_512,
+	ADC_DEC_RATIO_1024,
+	ADC_DEC_RATIO_NONE,
+};
+
+/**
+ * enum qpnp_adc_cal_sel - Selects the calibration type that is applied
+ *			   on the corresponding channel measurement after
+ *			   the ADC data is read.
+ * %ADC_NO_CAL :	To obtain raw, uncalibrated data.
+ * %ADC_RATIO_CAL :	Applies ratiometric calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed.
+ * %ADC_ABS_CAL :	Applies absolute calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed.
+ */
+
+/**
+ * enum qpnp_adc_cal_val - Selects if the calibration values applied
+ *			    are the ones when collected on a timer interval
+ *			    or if an immediate calibration needs to be forced.
+ * %ADC_TIMER_CAL : Uses calibration value collected on the timer interval.
+ * %ADC_NEW_CAL : Forces an immediate calibration. Use only when necessary
+ *		  since it forces 3 calibration measurements in addition to
+ *		  the channel measurement. For most measurement, using
+ *		  calibration based on the timer interval is sufficient.
+ */
+enum qpnp_adc_cal_val {
+	ADC_TIMER_CAL = 0,
+	ADC_NEW_CAL,
+	ADC_CAL_VAL_NONE,
+};
+
+/**
+ * enum qpnp_vadc_mode_sel - Selects the basic mode of operation.
+ *		- The normal mode is used for single measurement.
+ *		- The Conversion sequencer is used to trigger an
+ *		  ADC read when a HW trigger is selected.
+ *		- The measurement interval performs a single or
+ *		  continuous measurement at a specified interval/delay.
+ * %ADC_OP_NORMAL_MODE : Normal mode used for single measurement.
+ * %ADC_OP_CONVERSION_SEQUENCER : Conversion sequencer used to trigger
+ *		  an ADC read on a HW supported trigger.
+ *		  Refer to enum qpnp_vadc_trigger for
+ *		  supported HW triggers.
+ * %ADC_OP_MEASUREMENT_INTERVAL : The measurement interval performs a
+ *		  single or continuous measurement after a specified delay.
+ *		  For delay look at qpnp_adc_meas_timer.
+ */
+enum qpnp_vadc_mode_sel {
+	ADC_OP_NORMAL_MODE = 0,
+	ADC_OP_CONVERSION_SEQUENCER,
+	ADC_OP_MEASUREMENT_INTERVAL,
+	ADC_OP_MODE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_trigger - Select the HW trigger to be used while
+ *		measuring the ADC reading.
+ * %ADC_GSM_PA_ON : GSM power amplifier on.
+ * %ADC_TX_GTR_THRES : Transmit power greater than threshold.
+ * %ADC_CAMERA_FLASH_RAMP : Flash ramp up done.
+ * %ADC_DTEST : DTEST.
+ */
+enum qpnp_vadc_trigger {
+	ADC_GSM_PA_ON = 0,
+	ADC_TX_GTR_THRES,
+	ADC_CAMERA_FLASH_RAMP,
+	ADC_DTEST,
+	ADC_SEQ_NONE,
+};
+
+/**
+ * enum qpnp_vadc_conv_seq_timeout - Select delay (0 to 15ms) from
+ *		conversion request to triggering conversion sequencer
+ *		hold off time.
+ */
+enum qpnp_vadc_conv_seq_timeout {
+	ADC_CONV_SEQ_TIMEOUT_0MS = 0,
+	ADC_CONV_SEQ_TIMEOUT_1MS,
+	ADC_CONV_SEQ_TIMEOUT_2MS,
+	ADC_CONV_SEQ_TIMEOUT_3MS,
+	ADC_CONV_SEQ_TIMEOUT_4MS,
+	ADC_CONV_SEQ_TIMEOUT_5MS,
+	ADC_CONV_SEQ_TIMEOUT_6MS,
+	ADC_CONV_SEQ_TIMEOUT_7MS,
+	ADC_CONV_SEQ_TIMEOUT_8MS,
+	ADC_CONV_SEQ_TIMEOUT_9MS,
+	ADC_CONV_SEQ_TIMEOUT_10MS,
+	ADC_CONV_SEQ_TIMEOUT_11MS,
+	ADC_CONV_SEQ_TIMEOUT_12MS,
+	ADC_CONV_SEQ_TIMEOUT_13MS,
+	ADC_CONV_SEQ_TIMEOUT_14MS,
+	ADC_CONV_SEQ_TIMEOUT_15MS,
+	ADC_CONV_SEQ_TIMEOUT_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_holdoff - Select delay from conversion
+ *		trigger signal (i.e. adc_conv_seq_trig) transition
+ *		to ADC enable. Delay = 25us * (value + 1).
+ */
+enum qpnp_adc_conv_seq_holdoff {
+	ADC_SEQ_HOLD_25US = 0,
+	ADC_SEQ_HOLD_50US,
+	ADC_SEQ_HOLD_75US,
+	ADC_SEQ_HOLD_100US,
+	ADC_SEQ_HOLD_125US,
+	ADC_SEQ_HOLD_150US,
+	ADC_SEQ_HOLD_175US,
+	ADC_SEQ_HOLD_200US,
+	ADC_SEQ_HOLD_225US,
+	ADC_SEQ_HOLD_250US,
+	ADC_SEQ_HOLD_275US,
+	ADC_SEQ_HOLD_300US,
+	ADC_SEQ_HOLD_325US,
+	ADC_SEQ_HOLD_350US,
+	ADC_SEQ_HOLD_375US,
+	ADC_SEQ_HOLD_400US,
+	ADC_SEQ_HOLD_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_state - Conversion sequencer operating state
+ * %ADC_CONV_SEQ_IDLE : Sequencer is in idle.
+ * %ADC_CONV_TRIG_RISE : Waiting for rising edge trigger.
+ * %ADC_CONV_TRIG_HOLDOFF : Waiting for rising trigger hold off time.
+ * %ADC_CONV_MEAS_RISE : Measuring selected ADC signal.
+ * %ADC_CONV_TRIG_FALL : Waiting for falling trigger edge.
+ * %ADC_CONV_FALL_HOLDOFF : Waiting for falling trigger hold off time.
+ * %ADC_CONV_MEAS_FALL : Measuring selected ADC signal.
+ * %ADC_CONV_ERROR : Aberrant Hardware problem.
+ */
+enum qpnp_adc_conv_seq_state {
+	ADC_CONV_SEQ_IDLE = 0,
+	ADC_CONV_TRIG_RISE,
+	ADC_CONV_TRIG_HOLDOFF,
+	ADC_CONV_MEAS_RISE,
+	ADC_CONV_TRIG_FALL,
+	ADC_CONV_FALL_HOLDOFF,
+	ADC_CONV_MEAS_FALL,
+	ADC_CONV_ERROR,
+	ADC_CONV_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_1 - Selects the measurement interval time.
+ *		If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the USB_ID. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1P0MS : 1ms
+ * %ADC_MEAS_INTERVAL_2P0MS : 2ms
+ * %ADC_MEAS_INTERVAL_3P9MS : 3.9ms
+ * %ADC_MEAS_INTERVAL_7P8MS : 7.8ms
+ * %ADC_MEAS_INTERVAL_15P6MS : 15.6ms
+ * %ADC_MEAS_INTERVAL_31P3MS : 31.3ms
+ * %ADC_MEAS_INTERVAL_62P5MS : 62.5ms
+ * %ADC_MEAS_INTERVAL_125MS : 125ms
+ * %ADC_MEAS_INTERVAL_250MS : 250ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_16S: 16seconds
+ */
+enum qpnp_adc_meas_timer_1 {
+	ADC_MEAS1_INTERVAL_0MS = 0,
+	ADC_MEAS1_INTERVAL_1P0MS,
+	ADC_MEAS1_INTERVAL_2P0MS,
+	ADC_MEAS1_INTERVAL_3P9MS,
+	ADC_MEAS1_INTERVAL_7P8MS,
+	ADC_MEAS1_INTERVAL_15P6MS,
+	ADC_MEAS1_INTERVAL_31P3MS,
+	ADC_MEAS1_INTERVAL_62P5MS,
+	ADC_MEAS1_INTERVAL_125MS,
+	ADC_MEAS1_INTERVAL_250MS,
+	ADC_MEAS1_INTERVAL_500MS,
+	ADC_MEAS1_INTERVAL_1S,
+	ADC_MEAS1_INTERVAL_2S,
+	ADC_MEAS1_INTERVAL_4S,
+	ADC_MEAS1_INTERVAL_8S,
+	ADC_MEAS1_INTERVAL_16S,
+	ADC_MEAS1_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_2 - Selects the measurement interval time.
+ *		If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the batt_therm. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_100MS : 100ms
+ * %ADC_MEAS_INTERVAL_200MS : 200ms
+ * %ADC_MEAS_INTERVAL_300MS : 300ms
+ * %ADC_MEAS_INTERVAL_400MS : 400ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_600MS : 600ms
+ * %ADC_MEAS_INTERVAL_700MS : 700ms
+ * %ADC_MEAS_INTERVAL_800MS : 800ms
+ * %ADC_MEAS_INTERVAL_900MS : 900ms
+ * %ADC_MEAS_INTERVAL_1S: 1seconds
+ * %ADC_MEAS_INTERVAL_1P1S: 1.1seconds
+ * %ADC_MEAS_INTERVAL_1P2S: 1.2seconds
+ * %ADC_MEAS_INTERVAL_1P3S: 1.3seconds
+ * %ADC_MEAS_INTERVAL_1P4S: 1.4seconds
+ * %ADC_MEAS_INTERVAL_1P5S: 1.5seconds
+ */
+enum qpnp_adc_meas_timer_2 {
+	ADC_MEAS2_INTERVAL_0MS = 0,
+	ADC_MEAS2_INTERVAL_100MS,
+	ADC_MEAS2_INTERVAL_200MS,
+	ADC_MEAS2_INTERVAL_300MS,
+	ADC_MEAS2_INTERVAL_400MS,
+	ADC_MEAS2_INTERVAL_500MS,
+	ADC_MEAS2_INTERVAL_600MS,
+	ADC_MEAS2_INTERVAL_700MS,
+	ADC_MEAS2_INTERVAL_800MS,
+	ADC_MEAS2_INTERVAL_900MS,
+	ADC_MEAS2_INTERVAL_1S,
+	ADC_MEAS2_INTERVAL_1P1S,
+	ADC_MEAS2_INTERVAL_1P2S,
+	ADC_MEAS2_INTERVAL_1P3S,
+	ADC_MEAS2_INTERVAL_1P4S,
+	ADC_MEAS2_INTERVAL_1P5S,
+	ADC_MEAS2_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_3 - Selects the measurement interval time.
+ *		If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * Do not set a polling rate greater than 1 second on PMIC 2.0.
+ * The max polling rate on the PMIC 2.0 appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_3S : 3seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_5S : 5seconds
+ * %ADC_MEAS_INTERVAL_6S: 6seconds
+ * %ADC_MEAS_INTERVAL_7S : 7seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_9S : 9seconds
+ * %ADC_MEAS_INTERVAL_10S : 10seconds
+ * %ADC_MEAS_INTERVAL_11S : 11seconds
+ * %ADC_MEAS_INTERVAL_12S : 12seconds
+ * %ADC_MEAS_INTERVAL_13S : 13seconds
+ * %ADC_MEAS_INTERVAL_14S : 14seconds
+ * %ADC_MEAS_INTERVAL_15S : 15seconds
+ */
+enum qpnp_adc_meas_timer_3 {
+	ADC_MEAS3_INTERVAL_0S = 0,
+	ADC_MEAS3_INTERVAL_1S,
+	ADC_MEAS3_INTERVAL_2S,
+	ADC_MEAS3_INTERVAL_3S,
+	ADC_MEAS3_INTERVAL_4S,
+	ADC_MEAS3_INTERVAL_5S,
+	ADC_MEAS3_INTERVAL_6S,
+	ADC_MEAS3_INTERVAL_7S,
+	ADC_MEAS3_INTERVAL_8S,
+	ADC_MEAS3_INTERVAL_9S,
+	ADC_MEAS3_INTERVAL_10S,
+	ADC_MEAS3_INTERVAL_11S,
+	ADC_MEAS3_INTERVAL_12S,
+	ADC_MEAS3_INTERVAL_13S,
+	ADC_MEAS3_INTERVAL_14S,
+	ADC_MEAS3_INTERVAL_15S,
+	ADC_MEAS3_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_select - Selects the timer for which
+ *	the appropriate polling frequency is set.
+ * %ADC_MEAS_TIMER_SELECT1 - Select this timer for measurement polling interval
+ *				for 1 second.
+ * %ADC_MEAS_TIMER_SELECT2 - Select this timer for 500ms measurement interval.
+ * %ADC_MEAS_TIMER_SELECT3 - Select this timer for 5 second interval.
+ */
+enum qpnp_adc_meas_timer_select {
+	ADC_MEAS_TIMER_SELECT1 = 0,
+	ADC_MEAS_TIMER_SELECT2,
+	ADC_MEAS_TIMER_SELECT3,
+	ADC_MEAS_TIMER_NUM,
+};
+
+/**
+ * enum qpnp_adc_meas_interval_op_ctl - Select operating mode.
+ * %ADC_MEAS_INTERVAL_OP_SINGLE : Conduct single measurement at specified time
+ *			delay.
+ * %ADC_MEAS_INTERVAL_OP_CONTINUOUS : Make measurements at measurement interval
+ *			times.
+ */
+enum qpnp_adc_meas_interval_op_ctl {
+	ADC_MEAS_INTERVAL_OP_SINGLE = 0,
+	ADC_MEAS_INTERVAL_OP_CONTINUOUS,
+	ADC_MEAS_INTERVAL_OP_NONE,
+};
+
+/**
+ * Channel selection registers for each of the configurable measurements
+ * Channels allotment is set at device config for a channel.
+ * The USB_ID, BATT_THERM, PMIC_THERM and VBAT channels are used by the
+ * kernel space USB, Battery and IADC drivers.
+ * The other 3 channels are configurable for use by userspace clients.
+ */
+enum qpnp_adc_tm_channel_select	{
+	QPNP_ADC_TM_M0_ADC_CH_SEL_CTL = 0x48,
+	QPNP_ADC_TM_M1_ADC_CH_SEL_CTL = 0x68,
+	QPNP_ADC_TM_M2_ADC_CH_SEL_CTL = 0x70,
+	QPNP_ADC_TM_M3_ADC_CH_SEL_CTL = 0x78,
+	QPNP_ADC_TM_M4_ADC_CH_SEL_CTL = 0x80,
+	QPNP_ADC_TM_M5_ADC_CH_SEL_CTL = 0x88,
+	QPNP_ADC_TM_M6_ADC_CH_SEL_CTL = 0x90,
+	QPNP_ADC_TM_M7_ADC_CH_SEL_CTL = 0x98,
+	QPNP_ADC_TM_CH_SELECT_NONE
+};
+
+/**
+ * Channel index for the corresponding index to qpnp_adc_tm_channel_selec
+ */
+enum qpnp_adc_tm_channel_num {
+	QPNP_ADC_TM_CHAN0 = 0,
+	QPNP_ADC_TM_CHAN1,
+	QPNP_ADC_TM_CHAN2,
+	QPNP_ADC_TM_CHAN3,
+	QPNP_ADC_TM_CHAN4,
+	QPNP_ADC_TM_CHAN5,
+	QPNP_ADC_TM_CHAN6,
+	QPNP_ADC_TM_CHAN7,
+	QPNP_ADC_TM_CHAN_NONE
+};
+
+enum qpnp_comp_scheme_type {
+	COMP_ID_GF = 0,
+	COMP_ID_SMIC,
+	COMP_ID_TSMC,
+	COMP_ID_NUM,
+};
+
+/**
+ * struct qpnp_adc_tm_config - Represent ADC Thermal Monitor configuration.
+ * @channel: ADC channel for which thermal monitoring is requested.
+ * @adc_code: The pre-calibrated digital output of a given ADC releative to the
+ *		ADC reference.
+ * @high_thr_temp: Temperature at which high threshold notification is required.
+ * @low_thr_temp: Temperature at which low threshold notification is required.
+ * @low_thr_voltage : Low threshold voltage ADC code used for reverse
+ *			calibration.
+ * @high_thr_voltage: High threshold voltage ADC code used for reverse
+ *			calibration.
+ */
+struct qpnp_adc_tm_config {
+	int	channel;
+	int	adc_code;
+	int	high_thr_temp;
+	int	low_thr_temp;
+	int64_t	high_thr_voltage;
+	int64_t	low_thr_voltage;
+};
+
+/**
+ * enum qpnp_adc_tm_trip_type - Type for setting high/low temperature/voltage.
+ * %ADC_TM_TRIP_HIGH_WARM: Setting high temperature. Note that high temperature
+ *			corresponds to low voltage. Driver handles this case
+ *			appropriately to set high/low thresholds for voltage.
+ *			threshold.
+ * %ADC_TM_TRIP_LOW_COOL: Setting low temperature.
+ */
+enum qpnp_adc_tm_trip_type {
+	ADC_TM_TRIP_HIGH_WARM = 0,
+	ADC_TM_TRIP_LOW_COOL,
+	ADC_TM_TRIP_NUM,
+};
+
+#define ADC_TM_WRITABLE_TRIPS_MASK ((1 << ADC_TM_TRIP_NUM) - 1)
+
+/**
+ * enum qpnp_tm_state - This lets the client know whether the threshold
+ *		that was crossed was high/low.
+ * %ADC_TM_HIGH_STATE: Client is notified of crossing the requested high
+ *			voltage threshold.
+ * %ADC_TM_COOL_STATE: Client is notified of crossing the requested cool
+ *			temperature threshold.
+ * %ADC_TM_LOW_STATE: Client is notified of crossing the requested low
+ *			voltage threshold.
+ * %ADC_TM_WARM_STATE: Client is notified of crossing the requested high
+ *			temperature threshold.
+ */
+enum qpnp_tm_state {
+	ADC_TM_HIGH_STATE = 0,
+	ADC_TM_COOL_STATE = ADC_TM_HIGH_STATE,
+	ADC_TM_LOW_STATE,
+	ADC_TM_WARM_STATE = ADC_TM_LOW_STATE,
+	ADC_TM_STATE_NUM,
+};
+
+/**
+ * enum qpnp_state_request - Request to enable/disable the corresponding
+ *			high/low voltage/temperature thresholds.
+ * %ADC_TM_HIGH_THR_ENABLE: Enable high voltage threshold.
+ * %ADC_TM_COOL_THR_ENABLE = Enables cool temperature threshold.
+ * %ADC_TM_LOW_THR_ENABLE: Enable low voltage/temperature threshold.
+ * %ADC_TM_WARM_THR_ENABLE = Enables warm temperature threshold.
+ * %ADC_TM_HIGH_LOW_THR_ENABLE: Enable high and low voltage/temperature
+ *				threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high voltage/temperature threshold.
+ * %ADC_TM_COOL_THR_ENABLE = Disables cool temperature threshold.
+ * %ADC_TM_LOW_THR_DISABLE: Disable low voltage/temperature threshold.
+ * %ADC_TM_WARM_THR_ENABLE = Disables warm temperature threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high and low voltage/temperature
+ *				threshold.
+ */
+enum qpnp_state_request {
+	ADC_TM_HIGH_THR_ENABLE = 0,
+	ADC_TM_COOL_THR_ENABLE = ADC_TM_HIGH_THR_ENABLE,
+	ADC_TM_LOW_THR_ENABLE,
+	ADC_TM_WARM_THR_ENABLE = ADC_TM_LOW_THR_ENABLE,
+	ADC_TM_HIGH_LOW_THR_ENABLE,
+	ADC_TM_HIGH_THR_DISABLE,
+	ADC_TM_COOL_THR_DISABLE = ADC_TM_HIGH_THR_DISABLE,
+	ADC_TM_LOW_THR_DISABLE,
+	ADC_TM_WARM_THR_DISABLE = ADC_TM_LOW_THR_DISABLE,
+	ADC_TM_HIGH_LOW_THR_DISABLE,
+	ADC_TM_THR_NUM,
+};
+
+/**
+ * struct qpnp_adc_tm_btm_param - Represent Battery temperature threshold
+ *				monitoring configuration.
+ * @high_temp: High temperature threshold for which notification is requested.
+ * @low_temp: Low temperature threshold for which notification is requested.
+ * @high_thr_voltage: High voltage for which notification is requested.
+ * @low_thr_voltage: Low voltage for which notification is requested.
+ * @adc_tm_hc: Represents the refreshed BTM register design.
+ * @state_request: Enable/disable the corresponding high and low temperature
+ *		thresholds.
+ * @timer_interval1: Select polling rate from qpnp_adc_meas_timer_1 type.
+ * @timer_interval2: Select polling rate from qpnp_adc_meas_timer_2 type.
+ * @timer_interval3: Select polling rate from qpnp_adc_meas_timer_3 type.
+ * @btmid_ctx: A context of void type.
+ * @threshold_notification: Notification callback once threshold are crossed.
+ * units to be used for High/Low temperature and voltage notification -
+ * This depends on the clients usage. Check the rscaling function
+ * for the appropriate channel nodes.
+ * @Batt therm clients temperature units is decidegreesCentigrate.
+ * @USB_ID inputs the voltage units in milli-volts.
+ * @PA_THERM inputs the units in degC.
+ * @PMIC_THERM inputs the units in millidegC.
+ */
+struct qpnp_adc_tm_btm_param {
+	int32_t					high_temp;
+	int32_t					low_temp;
+	int32_t					high_thr;
+	int32_t					low_thr;
+	int32_t					gain_num;
+	int32_t					gain_den;
+	bool					adc_tm_hc;
+	enum qpnp_vadc_channels			channel;
+	enum qpnp_state_request			state_request;
+	enum qpnp_adc_meas_timer_1		timer_interval;
+	enum qpnp_adc_meas_timer_2		timer_interval2;
+	enum qpnp_adc_meas_timer_3		timer_interval3;
+	void					*btm_ctx;
+	void	(*threshold_notification)(enum qpnp_tm_state state,
+						void *ctx);
+};
+
+/**
+ * struct qpnp_vadc_linear_graph - Represent ADC characteristics.
+ * @dy: Numerator slope to calculate the gain.
+ * @dx: Denominator slope to calculate the gain.
+ * @adc_vref: A/D word of the voltage reference used for the channel.
+ * @adc_gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are computed
+ * to calibrate the device.
+ */
+struct qpnp_vadc_linear_graph {
+	int64_t dy;
+	int64_t dx;
+	int64_t adc_vref;
+	int64_t adc_gnd;
+};
+
+/**
+ * struct qpnp_vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ *     resistance.
+ */
+struct qpnp_vadc_map_pt {
+	int32_t x;
+	int32_t y;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio - Represent scaling ratio for adc input.
+ * @num: Numerator scaling parameter.
+ * @den: Denominator scaling parameter.
+ */
+struct qpnp_vadc_scaling_ratio {
+	int32_t num;
+	int32_t den;
+};
+
+/**
+ * struct qpnp_adc_properties - Represent the ADC properties.
+ * @adc_reference: Reference voltage for QPNP ADC.
+ * @bitresolution: ADC bit resolution for QPNP ADC.
+ * @biploar: Polarity for QPNP ADC.
+ * @adc_hc: Represents using HC variant of the ADC controller.
+ */
+struct qpnp_adc_properties {
+	uint32_t	adc_vdd_reference;
+	uint32_t	bitresolution;
+	bool		bipolar;
+	bool		adc_hc;
+};
+
+/**
+ * struct qpnp_vadc_chan_properties - Represent channel properties of the ADC.
+ * @offset_gain_numerator: The inverse numerator of the gain applied to the
+ *			   input channel.
+ * @offset_gain_denominator: The inverse denominator of the gain applied to the
+ *			     input channel.
+ * @high_thr: High threshold voltage that is requested to be set.
+ * @low_thr: Low threshold voltage that is requested to be set.
+ * @timer_select: Chosen from one of the 3 timers to set the polling rate for
+ *		  the VADC_BTM channel.
+ * @meas_interval1: Polling rate to set for timer 1.
+ * @meas_interval2: Polling rate to set for timer 2.
+ * @tm_channel_select: BTM channel number for the 5 VADC_BTM channels.
+ * @state_request: User can select either enable or disable high/low or both
+ * activation levels based on the qpnp_state_request type.
+ * @adc_graph: ADC graph for the channel of struct type qpnp_adc_linear_graph.
+ */
+struct qpnp_vadc_chan_properties {
+	uint32_t			offset_gain_numerator;
+	uint32_t			offset_gain_denominator;
+	uint32_t				high_thr;
+	uint32_t				low_thr;
+	enum qpnp_adc_meas_timer_select		timer_select;
+	enum qpnp_adc_meas_timer_1		meas_interval1;
+	enum qpnp_adc_meas_timer_2		meas_interval2;
+	enum qpnp_adc_tm_channel_select		tm_channel_select;
+	enum qpnp_state_request			state_request;
+	enum qpnp_adc_calib_type		calib_type;
+	struct qpnp_vadc_linear_graph	adc_graph[ADC_HC_CAL_SEL_NONE];
+};
+
+/**
+ * struct qpnp_vadc_result - Represent the result of the QPNP ADC.
+ * @chan: The channel number of the requested conversion.
+ * @adc_code: The pre-calibrated digital output of a given ADC relative to the
+ *	      the ADC reference.
+ * @measurement: In units specific for a given ADC; most ADC uses reference
+ *		 voltage but some ADC uses reference current. This measurement
+ *		 here is a number relative to a reference of a given ADC.
+ * @physical: The data meaningful for each individual channel whether it is
+ *	      voltage, current, temperature, etc.
+ *	      All voltage units are represented in micro - volts.
+ *	      -Battery temperature units are represented as 0.1 DegC.
+ *	      -PA Therm temperature units are represented as DegC.
+ *	      -PMIC Die temperature units are represented as 0.001 DegC.
+ */
+struct qpnp_vadc_result {
+	uint32_t	chan;
+	int32_t		adc_code;
+	int64_t		measurement;
+	int64_t		physical;
+};
+
+/**
+ * struct qpnp_adc_amux - AMUX properties for individual channel
+ * @name: Channel string name.
+ * @channel_num: Channel in integer used from qpnp_adc_channels.
+ * @chan_path_prescaling: Channel scaling performed on the input signal.
+ * @adc_decimation: Sampling rate desired for the channel.
+ * adc_scale_fn: Scaling function to convert to the data meaningful for
+ *		 each individual channel whether it is voltage, current,
+ *		 temperature, etc and compensates the channel properties.
+ */
+struct qpnp_adc_amux {
+	char					*name;
+	enum qpnp_vadc_channels			channel_num;
+	enum qpnp_adc_channel_scaling_param	chan_path_prescaling;
+	enum qpnp_adc_decimation_type		adc_decimation;
+	enum qpnp_adc_scale_fn_type		adc_scale_fn;
+	enum qpnp_adc_fast_avg_ctl		fast_avg_setup;
+	enum qpnp_adc_hw_settle_time		hw_settle_time;
+	enum qpnp_adc_calib_type		calib_type;
+	enum qpnp_adc_cal_val			cal_val;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio
+ *
+ */
+static const struct qpnp_vadc_scaling_ratio qpnp_vadc_amux_scaling_ratio[] = {
+	{1, 1},
+	{1, 3},
+	{1, 4},
+	{1, 6},
+	{1, 20},
+	{1, 8},
+	{10, 81},
+	{1, 10}
+};
+
+/**
+ * struct qpnp_vadc_scale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ *	which takes the adc properties, channel properties,
+ *	and returns the physical result
+ */
+struct qpnp_vadc_scale_fn {
+	int32_t (*chan)(struct qpnp_vadc_chip *, int32_t,
+		const struct qpnp_adc_properties *,
+		const struct qpnp_vadc_chan_properties *,
+		struct qpnp_vadc_result *);
+};
+
+/**
+ * struct qpnp_adc_tm_reverse_scale_fn - Reverse scaling prototype
+ * @chan: Function pointer to one of the scaling functions
+ *	which takes the adc properties, channel properties,
+ *	and returns the physical result
+ */
+struct qpnp_adc_tm_reverse_scale_fn {
+	int32_t (*chan)(struct qpnp_vadc_chip *,
+		struct qpnp_adc_tm_btm_param *,
+		uint32_t *, uint32_t *);
+};
+
+/**
+ * struct qpnp_vadc_rscale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ *	which takes the adc properties, channel properties,
+ *	and returns the physical result
+ */
+struct qpnp_vadc_rscale_fn {
+	int32_t (*chan)(struct qpnp_vadc_chip *,
+		const struct qpnp_vadc_chan_properties *,
+		struct qpnp_adc_tm_btm_param *,
+		uint32_t *, uint32_t *);
+};
+
+/**
+ * struct qpnp_iadc_calib - IADC channel calibration structure.
+ * @channel - Channel for which the historical offset and gain is
+ *	      calculated. Available channels are internal rsense,
+ *	      external rsense and alternate lead pairs.
+ * @offset_raw - raw Offset value for the channel.
+ * @gain_raw - raw Gain of the channel.
+ * @ideal_offset_uv - ideal offset value for the channel.
+ * @ideal_gain_nv - ideal gain for the channel.
+ * @offset_uv - converted value of offset in uV.
+ * @gain_uv - converted value of gain in uV.
+ */
+struct qpnp_iadc_calib {
+	enum qpnp_iadc_channels		channel;
+	uint16_t			offset_raw;
+	uint16_t			gain_raw;
+	uint32_t			ideal_offset_uv;
+	uint32_t			ideal_gain_nv;
+	uint32_t			offset_uv;
+	uint32_t			gain_uv;
+};
+
+/**
+ * struct qpnp_iadc_result - IADC read result structure.
+ * @oresult_uv - Result of ADC in uV.
+ * @result_ua - Result of ADC in uA.
+ */
+struct qpnp_iadc_result {
+	int32_t				result_uv;
+	int32_t				result_ua;
+};
+
+/**
+ * struct qpnp_adc_drv - QPNP ADC device structure.
+ * @spmi - spmi device for ADC peripheral.
+ * @offset - base offset for the ADC peripheral.
+ * @adc_prop - ADC properties specific to the ADC peripheral.
+ * @amux_prop - AMUX properties representing the ADC peripheral.
+ * @adc_channels - ADC channel properties for the ADC peripheral.
+ * @adc_irq_eoc - End of Conversion IRQ.
+ * @adc_irq_fifo_not_empty - Conversion sequencer request written
+ *			to FIFO when not empty.
+ * @adc_irq_conv_seq_timeout - Conversion sequencer trigger timeout.
+ * @adc_high_thr_irq - Output higher than high threshold set for measurement.
+ * @adc_low_thr_irq - Output lower than low threshold set for measurement.
+ * @adc_lock - ADC lock for access to the peripheral.
+ * @adc_rslt_completion - ADC result notification after interrupt
+ *			  is received.
+ * @calib - Internal rsens calibration values for gain and offset.
+ */
+struct qpnp_adc_drv {
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	uint8_t				slave;
+	uint16_t			offset;
+	struct qpnp_adc_properties	*adc_prop;
+	struct qpnp_adc_amux_properties	*amux_prop;
+	struct qpnp_adc_amux		*adc_channels;
+	int				adc_irq_eoc;
+	int				adc_irq_fifo_not_empty;
+	int				adc_irq_conv_seq_timeout;
+	int				adc_high_thr_irq;
+	int				adc_low_thr_irq;
+	struct mutex			adc_lock;
+	struct completion		adc_rslt_completion;
+	struct qpnp_iadc_calib		calib;
+	struct regulator		*hkadc_ldo;
+	struct regulator		*hkadc_ldo_ok;
+	bool				adc_hc;
+};
+
+/**
+ * struct qpnp_adc_amux_properties - QPNP VADC amux channel property.
+ * @amux_channel - Refer to the qpnp_vadc_channel list.
+ * @decimation - Sampling rate supported for the channel.
+ * @mode_sel - The basic mode of operation.
+ * @hw_settle_time - The time between AMUX being configured and the
+ *			start of conversion.
+ * @fast_avg_setup - Ability to provide single result from the ADC
+ *			that is an average of multiple measurements.
+ * @trigger_channel - HW trigger channel for conversion sequencer.
+ * @calib_type - Used to store the calibration type for the channel
+ *		 absolute/ratiometric.
+ * @cal_val - Used to determine if fresh calibration value or timer
+ *	      updated calibration value is to be used.
+ * @chan_prop - Represent the channel properties of the ADC.
+ */
+struct qpnp_adc_amux_properties {
+	uint32_t				amux_channel;
+	uint32_t				decimation;
+	uint32_t				mode_sel;
+	uint32_t				hw_settle_time;
+	uint32_t				fast_avg_setup;
+	enum qpnp_vadc_trigger			trigger_channel;
+	enum qpnp_adc_calib_type		calib_type;
+	enum qpnp_adc_cal_val			cal_val;
+	struct qpnp_vadc_chan_properties	chan_prop[0];
+};
+
+/* SW index's for PMIC type and version used by QPNP VADC and IADC */
+#define QPNP_REV_ID_8941_3_1	1
+#define QPNP_REV_ID_8026_1_0	2
+#define QPNP_REV_ID_8026_2_0	3
+#define QPNP_REV_ID_8110_1_0	4
+#define QPNP_REV_ID_8026_2_1	5
+#define QPNP_REV_ID_8110_2_0	6
+#define QPNP_REV_ID_8026_2_2	7
+#define QPNP_REV_ID_8941_3_0	8
+#define QPNP_REV_ID_8941_2_0	9
+#define QPNP_REV_ID_8916_1_0	10
+#define QPNP_REV_ID_8916_1_1	11
+#define QPNP_REV_ID_8916_2_0	12
+#define QPNP_REV_ID_8909_1_0	13
+#define QPNP_REV_ID_8909_1_1	14
+#define QPNP_REV_ID_PM8950_1_0	16
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE)				\
+			|| defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
+/**
+ * qpnp_vadc_read() - Performs ADC read on the channel.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_hc_read() - Performs ADC read on the channel.
+ *		It uses the refreshed VADC design from qpnp-vadc-hc.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_conv_seq_request() - Performs ADC read on the conversion
+ *				sequencer channel.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
+			enum qpnp_vadc_trigger trigger_channel,
+			enum qpnp_vadc_channels channel,
+			struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_adc_get_devicetree_data() - Abstracts the ADC devicetree data.
+ * @spmi:	spmi ADC device.
+ * @adc_qpnp:	spmi device tree node structure
+ */
+int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
+					struct qpnp_adc_drv *adc_qpnp);
+
+/**
+ * qpnp_adc_scale_default() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_pmic_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Performs the AMUX out as 2mV/K and returns
+ *		the temperature in milli degC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_pmi_chg_temp() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. The voltage measured by HKADC is related to
+ *		the junction temperature according to
+ *		Tj = -137.67 degC * (V_adc * 2) + 382.04 degC
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skuaa_batt_therm() - Scales the pre-calibrated digital
+ *		output of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skug_batt_therm() - Scales the pre-calibrated digital
+ *		output of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skug_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skuh_batt_therm() - Scales the pre-calibrated digital
+ *		output of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skuh_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skut1_batt_therm() - Scales the pre-calibrated digital
+ *		output of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skut1_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_smb_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_batt_id() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_tdkntcg_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the xo therm in
+ *		milidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_pu1() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the therm in degC.
+ *		It uses a mapping table computed for a 150K pull-up.
+ *		Pull-up1 is an internal pull-up on the AMUX of 150K.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_pu2() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the therm in degC.
+ *		It uses a mapping table computed for a 100K pull-up.
+ *		Pull-up2 is an internal pull-up on the AMUX of 100K.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_ncp03() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the therm in degC.
+ *		It uses a mapping table computed for a NCP03WF683.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_get_vadc() - Clients need to register with the vadc using the
+ *		corresponding device instance it wants to read the channels
+ *		from. Read the bindings document on how to pass the phandle
+ *		for the corresponding vadc driver to register with.
+ * @dev:	Clients device structure
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the vadc
+ * @struct qpnp_vadc_chip * - On success returns the vadc device structure
+ *		pointer that needs to be used during an ADC request.
+ */
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name);
+/**
+ * qpnp_adc_tm_scaler() - Performs reverse calibration.
+ * @config:	Thermal monitoring configuration.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution and
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ */
+static inline int32_t qpnp_adc_tm_scaler(struct qpnp_adc_tm_config *tm_config,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop)
+{ return -ENXIO; }
+/**
+ * qpnp_get_vadc_gain_and_offset() - Obtains the VADC gain and offset
+ *		for absolute and ratiometric calibration.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The result in which the ADC offset and gain values are stored.
+ * @type:	The calibration type whether client needs the absolute or
+ *		ratiometric gain and offset values.
+ */
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
+			struct qpnp_vadc_linear_graph *param,
+			enum qpnp_adc_calib_type calib_type);
+/**
+ * qpnp_adc_scale_millidegc_pmic_voltage_thr() - Performs reverse calibration
+ *		on the low/high temperature threshold values passed by the
+ *		client. The function coverts milldegC to voltage threshold
+ *		and accounts for the corresponding channels scaling as (2mV/K).
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_btm_scaler() - Performs reverse calibration on the low/high
+ *		temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+
+/**
+ * qpnp_adc_qrd_skuh_btm_scaler() - Performs reverse calibration on the low/high
+ *		temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values for SKUH board.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_qrd_skut1_btm_scaler() - Performs reverse calibration on the
+ *		low/high temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values for SKUT1 board.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ *		and convert given temperature to voltage on supported
+ *		thermistor channels using 100k pull-up.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @param:	The input temperature values.
+ */
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *dev,
+		const struct qpnp_adc_properties *adc_properties,
+				struct qpnp_adc_tm_config *param);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ *		and converts the given ADC code to temperature for
+ *		thermistor channels using 100k pull-up.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @reg:	The input ADC code.
+ * @result:	The physical measurement temperature on the thermistor.
+ */
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *dev,
+			const struct qpnp_adc_properties *adc_prop,
+				uint32_t reg, int64_t *result);
+/**
+ * qpnp_adc_usb_scaler() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies ratiometric calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_vbatt_rscaler() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies ratiometric calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_vadc_absolute_rthr() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies absolute calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		const struct qpnp_vadc_chan_properties *chan_prop,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_absolute_rthr() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies absolute calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_smb_btm_rscaler() - Performs reverse calibration on the low/high
+ *		temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_vadc_iadc_sync_request() - Performs Voltage ADC read and
+ *		locks the peripheral. When performing simultaneous
+ *		voltage and current request the VADC peripheral is
+ *		prepared for conversion and the IADC sync conversion
+ *		is done from the IADC peripheral.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the voltage ADC read.
+ */
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel);
+/**
+ * qpnp_vadc_iadc_sync_complete_request() - Reads the ADC result and
+ *		unlocks the peripheral.
+ * @dev:	Structure device for qpnp vadc
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *dev,
+	enum qpnp_vadc_channels channel, struct qpnp_vadc_result *result);
+/**
+ * qpnp_vadc_sns_comp_result() - Compensate vbatt readings based on temperature
+ * @dev:	Structure device for qpnp vadc
+ * @result:	Voltage in uV that needs compensation.
+ * @is_pon_ocv: Whether the reading is from a power on OCV or not
+ */
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+					int64_t *result, bool is_pon_ocv);
+/**
+ * qpnp_adc_get_revid_version() - Obtain the PMIC number and revision.
+ * @dev:	Structure device node.
+ * returns internal mapped PMIC number and revision id.
+ */
+int qpnp_adc_get_revid_version(struct device *dev);
+/**
+ * qpnp_vadc_channel_monitor() - Configures kernel clients a channel to
+ *		monitor the corresponding ADC channel for threshold detection.
+ *		Driver passes the high/low voltage threshold along
+ *		with the notification callback once the set thresholds
+ *		are crossed.
+ * @param:	Structure pointer of qpnp_adc_tm_btm_param type.
+ *		Clients pass the low/high temperature along with the threshold
+ *		notification callback.
+ */
+int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_vadc_end_channel_monitor() - Disables recurring measurement mode for
+ *		VADC_USR and disables the bank.
+ * @param:	device instance for the VADC
+ */
+int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip);
+/**
+ * qpnp_vadc_calib_vref() - Read calibration channel REF_125V/VDD_VADC
+ * @dev:	Structure device for qpnp vadc
+ * @calib_type:	absolute or ratiometric calib type.
+ * returns calibration channel adc code.
+ */
+int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+				enum qpnp_adc_calib_type calib_type,
+				int *calib_data);
+/**
+ * qpnp_vadc_calib_gnd() - Read calibration channel REF_625MV/GND_REF
+ * @dev:	Structure device for qpnp vadc
+ * @calib_type:	absolute or ratiometric calib type.
+ * returns calibration channel adc code.
+ */
+int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+				enum qpnp_adc_calib_type calib_type,
+				int *calib_data);
+
+/**
+ * qpnp_adc_enable_voltage() - Enable LDO for HKADC
+ * @dev:	Structure device for qpnp vadc
+ * returns result of enabling the regulator interface.
+ */
+int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc);
+
+/**
+ * qpnp_adc_disable_voltage() - Disable vote for HKADC LDO
+ * @dev:	Structure device for qpnp vadc
+ */
+void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc);
+
+/**
+ * qpnp_adc_free_voltage_resource() - Puts HKADC LDO
+ * @dev:	Structure device for qpnp vadc
+ */
+void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc);
+
+#else
+static inline int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+				uint32_t channel,
+				struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *dev,
+				uint32_t channel,
+				struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
+			enum qpnp_vadc_trigger trigger_channel,
+			enum qpnp_vadc_channels channel,
+			struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_batt_therm(
+			struct qpnp_vadc_chip *vadc, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(
+			struct qpnp_vadc_chip *vadc, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skug_batt_therm(
+			struct qpnp_vadc_chip *vadc, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skuh_batt_therm(
+			struct qpnp_vadc_chip *vdev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skut1_batt_therm(
+			struct qpnp_vadc_chip *vdev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
+			struct qpnp_vadc_linear_graph *param,
+			enum qpnp_adc_calib_type calib_type)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		const struct qpnp_vadc_chan_properties *chan_prop,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(
+		struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_therm_voltage_pu2(
+				struct qpnp_vadc_chip *dev,
+			const struct qpnp_adc_properties *adc_properties,
+				struct qpnp_adc_tm_config *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_voltage_therm_pu2(
+				struct qpnp_vadc_chip *dev,
+			const struct qpnp_adc_properties *adc_prop,
+			uint32_t reg, int64_t *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_iadc_sync_complete_request(
+				struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+						int64_t *result)
+{ return -ENXIO; }
+static inline int qpnp_adc_get_revid_version(struct device *dev)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{ return -ENXIO; }
+
+static inline int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc)
+{ return -ENXIO; }
+
+static inline void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc)
+{ return; }
+
+static inline void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc)
+{ return; }
+
+static inline int32_t qpnp_adc_get_devicetree_data(
+		struct platform_device *pdev, struct qpnp_adc_drv *adc_qpnp)
+{ return -ENXIO; }
+
+#endif
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_CURRENT)				\
+			|| defined(CONFIG_SENSORS_QPNP_ADC_CURRENT_MODULE)
+/**
+ * qpnp_iadc_read() - Performs ADC read on the current channel.
+ * @dev:	Structure device for qpnp iadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Current across rsense in mA.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *dev,
+				enum qpnp_iadc_channels channel,
+				struct qpnp_iadc_result *result);
+/**
+ * qpnp_iadc_get_rsense() - Reads the RDS resistance value from the
+			trim registers.
+ * @dev:	Structure device for qpnp iadc
+ * @rsense:	RDS resistance in nOhms.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *dev, int32_t *rsense);
+/**
+ * qpnp_iadc_get_gain_and_offset() - Performs gain calibration
+ *				over 17.8571mV and offset over selected
+ *				channel. Channel can be internal rsense,
+ *				external rsense and alternate lead pair.
+ * @dev:	Structure device for qpnp iadc
+ * @result:	result structure where the gain and offset is stored of
+ *		type qpnp_iadc_calib.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *dev,
+					struct qpnp_iadc_calib *result);
+/**
+ * qpnp_get_iadc() - Clients need to register with the iadc with the
+ *		corresponding device instance it wants to read the channels.
+ *		Read the bindings document on how to pass the phandle for
+ *		the corresponding vadc driver to register with.
+ * @dev:	Clients device structure
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the iadc
+ * @struct qpnp_iadc_chip * - On success returns the iadc device structure
+ *		pointer used everytime client makes an ADC request.
+ */
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name);
+/**
+ * qpnp_iadc_vadc_sync_read() - Performs synchronous VADC and IADC read.
+ *		The api is to be used only by the BMS to perform
+ *		simultaneous VADC and IADC measurement for battery voltage
+ *		and current.
+ * @dev:	Structure device for qpnp iadc
+ * @i_channel:	Input battery current channel to perform the IADC read.
+ * @i_result:	Current across the rsense in mA.
+ * @v_channel:	Input battery voltage channel to perform VADC read.
+ * @v_result:	Voltage on the vbatt channel with units in mV.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *dev,
+	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result);
+/**
+ * qpnp_iadc_calibrate_for_trim - Clients can use this API to re-calibrate
+ *		IADC. The offset and gain values are programmed in the trim
+ *		registers. The offset and the gain can be retrieved using
+ *		qpnp_iadc_get_gain_and_offset
+ * @dev:	Structure device for qpnp iadc
+ * @batfet_closed: batfet is opened or closed. The IADC chooses proper
+ *			channel (internal/external) based on batfet status
+ *			for calibration.
+ * RETURNS:	0 on success.
+ */
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *dev,
+						bool batfet_closed);
+/**
+ * qpnp_iadc_comp_result() - Compensates the result of the current based on
+ *		the gain and offset co-effients and rsense parameters.
+ * @dev:	Structure device for qpnp iadc
+ * @result:	Current value to perform the compensation.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *dev, int64_t *result);
+/**
+ * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
+ *				to skip iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occurred.
+ */
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *dev);
+/**
+ * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
+ *				to resume iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occurred.
+ */
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *dev);
+#else
+static inline int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+	enum qpnp_iadc_channels channel, struct qpnp_iadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc,
+							int32_t *rsense)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+				struct qpnp_iadc_calib *result)
+{ return -ENXIO; }
+static inline struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
+	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+							bool batfet_closed)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc,
+						int64_t *result)
+{ return -ENXIO; }
+static inline int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
+{ return -ENXIO; }
+static inline int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
+{ return -ENXIO; }
+#endif
+
+/* Public API */
+#if defined(CONFIG_THERMAL_QPNP_ADC_TM)				\
+			|| defined(CONFIG_THERMAL_QPNP_ADC_TM_MODULE)
+/**
+ * qpnp_adc_tm_usbid_configure() - Configures Channel 0 of VADC_BTM to
+ *		monitor USB_ID channel using 100k internal pull-up.
+ *		USB driver passes the high/low voltage threshold along
+ *		with the notification callback once the set thresholds
+ *		are crossed.
+ * @param:	Structure pointer of qpnp_adc_tm_usbid_param type.
+ *		Clients pass the low/high voltage along with the threshold
+ *		notification callback.
+ */
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_adc_tm_usbid_end() - Disables the monitoring of channel 0 thats
+ *		assigned for monitoring USB_ID. Disables the low/high
+ *		threshold activation for channel 0 as well.
+ * @param:	none.
+ */
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip);
+/**
+ * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to
+ *		monitor the corresponding ADC channel for threshold detection.
+ *		Driver passes the high/low voltage threshold along
+ *		with the notification callback once the set thresholds
+ *		are crossed.
+ * @param:	Structure pointer of qpnp_adc_tm_btm_param type.
+ *		Clients pass the low/high temperature along with the threshold
+ *		notification callback.
+ */
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_adc_tm_disable_chan_meas() - Disables the monitoring of channel thats
+ *		assigned for monitoring kernel clients. Disables the low/high
+ *		threshold activation for the corresponding channel.
+ * @param:	Structure pointer of qpnp_adc_tm_btm_param type.
+ *		This is used to identify the channel for which the corresponding
+ *		channels high/low threshold notification will be disabled.
+ */
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_get_adc_tm() - Clients need to register with the adc_tm using the
+ *		corresponding device instance it wants to read the channels
+ *		from. Read the bindings document on how to pass the phandle
+ *		for the corresponding adc_tm driver to register with.
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the vadc
+ * @struct qpnp_adc_tm_chip * - On success returns the vadc device structure
+ *		pointer that needs to be used during an ADC TM request.
+ */
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name);
+#else
+static inline int32_t qpnp_adc_tm_usbid_configure(
+			struct qpnp_adc_tm_chip *chip,
+			struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_channel_measure(
+					struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_disable_chan_meas(
+					struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+#endif
+
+#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 511182a..8d0210e 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -77,11 +77,19 @@
 	THERMAL_DEVICE_ENABLED,
 };
 
+enum thermal_trip_activation_mode {
+	THERMAL_TRIP_ACTIVATION_DISABLED = 0,
+	THERMAL_TRIP_ACTIVATION_ENABLED,
+};
+
 enum thermal_trip_type {
 	THERMAL_TRIP_ACTIVE = 0,
 	THERMAL_TRIP_PASSIVE,
 	THERMAL_TRIP_HOT,
 	THERMAL_TRIP_CRITICAL,
+	THERMAL_TRIP_CONFIGURABLE_HI,
+	THERMAL_TRIP_CONFIGURABLE_LOW,
+	THERMAL_TRIP_CRITICAL_LOW,
 };
 
 enum thermal_trend {
@@ -122,6 +130,8 @@
 	int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
 	int (*get_crit_temp) (struct thermal_zone_device *, int *);
 	int (*set_emul_temp) (struct thermal_zone_device *, int);
+	int (*activate_trip_type)(struct thermal_zone_device *, int,
+		enum thermal_trip_activation_mode);
 	int (*get_trend) (struct thermal_zone_device *, int,
 			  enum thermal_trend *);
 	int (*notify) (struct thermal_zone_device *, int,
diff --git a/include/linux/usb/usbpd.h b/include/linux/usb/usbpd.h
new file mode 100644
index 0000000..3566a7a
--- /dev/null
+++ b/include/linux/usb/usbpd.h
@@ -0,0 +1,159 @@
+/* Copyright (c) 2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_USBPD_H
+#define __LINUX_USB_USBPD_H
+
+#include <linux/list.h>
+
+struct usbpd;
+
+/* Standard IDs */
+#define USBPD_SID			0xff00
+
+/* Structured VDM Command Type */
+enum usbpd_svdm_cmd_type {
+	SVDM_CMD_TYPE_INITIATOR,
+	SVDM_CMD_TYPE_RESP_ACK,
+	SVDM_CMD_TYPE_RESP_NAK,
+	SVDM_CMD_TYPE_RESP_BUSY,
+};
+
+/* Structured VDM Commands */
+#define USBPD_SVDM_DISCOVER_IDENTITY	0x1
+#define USBPD_SVDM_DISCOVER_SVIDS	0x2
+#define USBPD_SVDM_DISCOVER_MODES	0x3
+#define USBPD_SVDM_ENTER_MODE		0x4
+#define USBPD_SVDM_EXIT_MODE		0x5
+#define USBPD_SVDM_ATTENTION		0x6
+
+/*
+ * Implemented by client
+ */
+struct usbpd_svid_handler {
+	u16 svid;
+
+	/* Notified when VDM session established/reset; must be implemented */
+	void (*connect)(struct usbpd_svid_handler *hdlr);
+	void (*disconnect)(struct usbpd_svid_handler *hdlr);
+
+	/* Unstructured VDM */
+	void (*vdm_received)(struct usbpd_svid_handler *hdlr, u32 vdm_hdr,
+			const u32 *vdos, int num_vdos);
+
+	/* Structured VDM */
+	void (*svdm_received)(struct usbpd_svid_handler *hdlr, u8 cmd,
+			enum usbpd_svdm_cmd_type cmd_type, const u32 *vdos,
+			int num_vdos);
+
+	/* client should leave these blank; private members used by PD driver */
+	struct list_head entry;
+	bool discovered;
+};
+
+enum plug_orientation {
+	ORIENTATION_NONE,
+	ORIENTATION_CC1,
+	ORIENTATION_CC2,
+};
+
+#if IS_ENABLED(CONFIG_USB_PD_POLICY)
+/*
+ * Obtains an instance of usbpd from a DT phandle
+ */
+struct usbpd *devm_usbpd_get_by_phandle(struct device *dev,
+		const char *phandle);
+
+/*
+ * Called by client to handle specific SVID messages.
+ * Specify callback functions in the usbpd_svid_handler argument
+ */
+int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr);
+
+void usbpd_unregister_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr);
+
+/*
+ * Transmit a VDM message.
+ */
+int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos,
+		int num_vdos);
+
+/*
+ * Transmit a Structured VDM message.
+ */
+int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
+		enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
+		const u32 *vdos, int num_vdos);
+
+/*
+ * Get current status of CC pin orientation.
+ *
+ * Return: ORIENTATION_CC1 or ORIENTATION_CC2 if attached,
+ *         otherwise ORIENTATION_NONE if not attached
+ */
+enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd);
+#else
+static inline struct usbpd *devm_usbpd_get_by_phandle(struct device *dev,
+		const char *phandle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int usbpd_register_svid(struct usbpd *pd,
+		struct usbpd_svid_handler *hdlr)
+{
+	return -EINVAL;
+}
+
+static inline void usbpd_unregister_svid(struct usbpd *pd,
+		struct usbpd_svid_handler *hdlr)
+{
+}
+
+static inline int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos,
+		int num_vdos)
+{
+	return -EINVAL;
+}
+
+static inline int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
+		enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
+		const u32 *vdos, int num_vdos)
+{
+	return -EINVAL;
+}
+
+static inline enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
+{
+	return ORIENTATION_NONE;
+}
+#endif /* IS_ENABLED(CONFIG_USB_PD_POLICY) */
+
+/*
+ * Additional helpers for Enter/Exit Mode commands
+ */
+
+static inline int usbpd_enter_mode(struct usbpd *pd, u16 svid, int mode,
+		const u32 *vdo)
+{
+	return usbpd_send_svdm(pd, svid, USBPD_SVDM_ENTER_MODE,
+			SVDM_CMD_TYPE_INITIATOR, mode, vdo, vdo ? 1 : 0);
+}
+
+static inline int usbpd_exit_mode(struct usbpd *pd, u16 svid, int mode,
+		const u32 *vdo)
+{
+	return usbpd_send_svdm(pd, svid, USBPD_SVDM_EXIT_MODE,
+			SVDM_CMD_TYPE_INITIATOR, mode, vdo, vdo ? 1 : 0);
+}
+
+#endif /* __LINUX_USB_USBPD_H */
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 7e2f328..6b567d7 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -104,7 +104,7 @@
 	irqreturn_t (*handler)(int, void *),
 	unsigned long flags, const char *name, void *ctx);
 extern int icnss_get_ce_id(int irq);
-extern int icnss_set_fw_debug_mode(bool enable_fw_log);
+extern int icnss_set_fw_log_mode(uint8_t fw_log_mode);
 extern int icnss_athdiag_read(struct device *dev, uint32_t offset,
 			      uint32_t mem_type, uint32_t data_len,
 			      uint8_t *output);
@@ -124,5 +124,9 @@
 extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len);
 extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len);
 extern bool icnss_is_qmi_disable(void);
+extern bool icnss_is_fw_ready(void);
+extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len);
+extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int icnss_trigger_recovery(struct device *dev);
 
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index f5987da..2656d5d 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -96,10 +96,10 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmhamster")
 #define early_machine_is_msmfalcon()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmfalcon")
-#define early_machine_is_msmskunk()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmskunk")
-#define early_machine_is_sdmbat()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmbat")
+#define early_machine_is_sdm845()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
+#define early_machine_is_sdm830()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm830")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -137,8 +137,8 @@
 #define early_machine_is_apqcobalt()	0
 #define early_machine_is_msmhamster()	0
 #define early_machine_is_msmfalcon()	0
-#define early_machine_is_msmskunk()	0
-#define early_machine_is_sdmbat()	0
+#define early_machine_is_sdm845()	0
+#define early_machine_is_sdm830()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -198,8 +198,8 @@
 	MSM_CPU_COBALT,
 	MSM_CPU_HAMSTER,
 	MSM_CPU_FALCON,
-	MSM_CPU_SKUNK,
-	MSM_CPU_BAT,
+	MSM_CPU_SDM845,
+	MSM_CPU_SDM830,
 };
 
 struct msm_soc_info {
diff --git a/include/soc/qcom/spcom.h b/include/soc/qcom/spcom.h
new file mode 100644
index 0000000..31369a5
--- /dev/null
+++ b/include/soc/qcom/spcom.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SPCOM_H_
+#define _SPCOM_H_
+
+#include <linux/types.h>	/* uint32_t ,bool */
+
+/**
+ * @brief - Secure Processor Communication API
+ *
+ * This API should be used by Linux Kernel drivers,
+ * similar API is provided to user space applications
+ * via spcomlib.h API file.
+ * Sending Request and receiving Response is synchronous, only one at a time.
+ * The API is based on Client/Server model.
+ * The API resemble the trustzone QSEECOM API.
+ * In most cases, the Secure Processor side has servers and the HLOS
+ * side has clients. Request is initiated by the client and responded by the
+ * server.
+ */
+
+/*===========================================================================*/
+/*                           defines, enums , types                          */
+/*===========================================================================*/
+
+/* Maximum size (including null) for channel names - match glink */
+#define SPCOM_CHANNEL_NAME_SIZE		32
+
+/**
+ * Request buffer size.
+ * Any large data (multiply of 4KB) is provided by temp buffer in DDR.
+ * Request shall provide the temp buffer physical address (align to 4KB).
+ * Maximum request/response size of 268 is used to accommodate APDU size.
+ * From kernel spcom driver perspective a PAGE_SIZE of 4K
+ * is the actual maximum size for a single read/write file operation.
+ */
+#define SPCOM_MAX_REQUEST_SIZE		268
+#define SPCOM_MAX_RESPONSE_SIZE		268
+
+/**
+ * Abstract spcom handle.
+ * The actual struct definition is internal to the spcom driver.
+ */
+struct spcom_client; /* Forward declaration */
+struct spcom_server; /* Forward declaration */
+
+/**
+ * Client registration info
+ *
+ * @ch_name:	glink logical channel name
+ * @notify_ssr_cb: callback when the remote SP side reset (power down).
+ *      This is likely to happen due to remote subsystem restart (SSR).
+ *      NULL callback means no notification required.
+ *      Upon ssr callback, the user should unregister,
+ *      Poll for link up and then register again.
+ */
+struct spcom_client_info {
+	const char *ch_name;
+	void (*notify_ssr_cb)(void);
+};
+
+/**
+ * Server registration info
+ *
+ * @ch_name:	glink logical channel name
+ * @notify_ssr_cb: callback when the remote SP side reset (power down).
+ *      This is likely to happen due to remote subsystem restart (SSR).
+ *      NULL callback means no notification required.
+ *      Upon ssr callback, the user should unregister,
+ *      Poll for link up and then register again.
+ */
+struct spcom_service_info {
+	const char *ch_name;
+	void (*notify_ssr_cb)(void);
+};
+
+/*===========================================================================*/
+/*                           General API                                     */
+/*===========================================================================*/
+
+/**
+ * spcom_is_sp_subsystem_link_up() - check if SPSS link is up.
+ *
+ * return: true if link is up, false if link is down.
+ */
+bool spcom_is_sp_subsystem_link_up(void);
+
+/*===========================================================================*/
+/*                           Client Send Message                             */
+/*===========================================================================*/
+/**
+ * spcom_register_client() - register client for channel
+ *
+ * Only one client/Server can register on each side of a channel.
+ * Server on remote side is expected to be running and connected,
+ * therefore connection expected within the provided timeout.
+ * Handle is returned even if timeout expired.
+ * use spcom_client_is_server_connected() to check fully connected.
+ *
+ * @info:	Client configuration info (input).
+ *
+ * return: client handle on success, NULL on failure.
+ */
+struct spcom_client *spcom_register_client(struct spcom_client_info *info);
+
+/**
+ * spcom_unregister_client() - unregister client for channel
+ *
+ * @client:	Client Handle.
+ *
+ * return: 0 on success, negative error code on failure (see errno.h)
+ */
+int spcom_unregister_client(struct spcom_client *client);
+
+/**
+ * spcom_client_send_message_sync() - Send a synchronous request and response
+ *
+ * @client:	a pointer to spcom client
+ * @req_ptr:	a pointer to the request C struct representation
+ * @req_size:	size of the request C struct
+ * @resp_ptr:	a pointer to the response C struct representation
+ * @resp_size:  size of the response C struct
+ * @timeout_msec: Timeout in msec between command and response, 0=no timeout.
+ *
+ * return: number of rx bytes on success, negative value on failure.
+ */
+int spcom_client_send_message_sync(struct spcom_client	*client,
+				   void			*req_ptr,
+				   uint32_t		req_size,
+				   void			*resp_ptr,
+				   uint32_t		resp_size,
+				   uint32_t		timeout_msec);
+
+/**
+ * spcom_client_is_server_connected() - Check if remote server connected.
+ *
+ * This API checks that the logical channel is fully connected between
+ * the client and the server.
+ * Normally, the server should be up first and connect first.
+ *
+ * @client:	a pointer to spcom client
+ *
+ * return: true if server connected, false otherwise.
+ */
+bool spcom_client_is_server_connected(struct spcom_client *client);
+
+/*===========================================================================*/
+/*                           Service                                         */
+/*===========================================================================*/
+
+/**
+ * spcom_register_service() - register server for channel
+ *
+ * Only one client/Server can register on each side of a channel.
+ *
+ * @info:	Server configuration info (input).
+ *
+ * return: server handle on success, NULL on failure.
+ */
+struct spcom_server *spcom_register_service(struct spcom_service_info *info);
+
+/**
+ * spcom_unregister_service() - unregister server for channel
+ *
+ * @server:	server Handle.
+ *
+ * return: 0 on success, negative error code on failure (see errno.h)
+ */
+int spcom_unregister_service(struct spcom_server *server);
+
+/**
+ * spcom_server_get_next_request_size() - get the size of the
+ * next request
+ *
+ * This API MUST be called before calling spcom_server_wait_for_request().
+ * The server should allocate the relevant buffer size.
+ *
+ * @server:	a pointer to spcom server
+ *
+ * return: size of request in bytes on success, negative value on failure.
+ */
+int spcom_server_get_next_request_size(struct spcom_server *server);
+
+/**
+ * spcom_server_wait_for_request() - server wait for request
+ *
+ * @server:     a pointer to spcom server
+ * @req_ptr:	a pointer to the request buffer
+ * @req_size:	size of the buffer provided.
+ * The server should provide a buffer of at least the size
+ * returned by spcom_server_get_next_request_size() and up to
+ * SPCOM_MAX_REQUEST_SIZE.
+ *
+ * return: size of request on success, negative value on failure (see errno.h)
+ */
+int spcom_server_wait_for_request(struct spcom_server	*server,
+				  void			*req_ptr,
+				  uint32_t		req_size);
+
+/**
+ * spcom_server_send_response() - Send a the response to request
+ *
+ * @server:	a pointer to spcom server
+ * @resp_ptr:	a pointer to the response C struct representation
+ * @resp_size:  size of the response C struct
+ *
+ * return: sent data size on success, negative value on failure (see errno.h)
+ */
+int spcom_server_send_response(struct spcom_server	*server,
+			       void			*resp_ptr,
+			       uint32_t		resp_size);
+
+#endif /* _SPCOM_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 3e2449b..856f627 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -270,6 +270,7 @@
 header-y += memfd.h
 header-y += mempolicy.h
 header-y += meye.h
+header-y += mhi.h
 header-y += mic_common.h
 header-y += mic_ioctl.h
 header-y += mii.h
@@ -422,6 +423,7 @@
 header-y += sonypi.h
 header-y += soundcard.h
 header-y += sound.h
+header-y += spcom.h
 header-y += stat.h
 header-y += stddef.h
 header-y += string.h
diff --git a/include/uapi/linux/mhi.h b/include/uapi/linux/mhi.h
new file mode 100644
index 0000000..834c1dc
--- /dev/null
+++ b/include/uapi/linux/mhi.h
@@ -0,0 +1,37 @@
+#ifndef _UAPI_MHI_H
+#define _UAPI_MHI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+enum peripheral_ep_type {
+	DATA_EP_TYPE_RESERVED,
+	DATA_EP_TYPE_HSIC,
+	DATA_EP_TYPE_HSUSB,
+	DATA_EP_TYPE_PCIE,
+	DATA_EP_TYPE_EMBEDDED,
+	DATA_EP_TYPE_BAM_DMUX,
+};
+
+struct peripheral_ep_info {
+	enum peripheral_ep_type		ep_type;
+	__u32				peripheral_iface_id;
+};
+
+struct ipa_ep_pair {
+	__u32				cons_pipe_num;
+	__u32				prod_pipe_num;
+};
+
+struct ep_info {
+	struct peripheral_ep_info	ph_ep_info;
+	struct ipa_ep_pair		ipa_ep_pair;
+
+};
+
+#define MHI_UCI_IOCTL_MAGIC	'm'
+
+#define MHI_UCI_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 2, struct ep_info)
+
+#endif /* _UAPI_MHI_H */
+
diff --git a/include/uapi/linux/spcom.h b/include/uapi/linux/spcom.h
new file mode 100644
index 0000000..9b6b9b7
--- /dev/null
+++ b/include/uapi/linux/spcom.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_SPCOM_H_
+#define _UAPI_SPCOM_H_
+
+#include <linux/types.h>	/* uint32_t, bool */
+#ifndef BIT
+	#define BIT(x) (1 << x)
+#endif
+#ifndef PAGE_SIZE
+	#define PAGE_SIZE 4096
+#endif
+
+/**
+ * @brief - Secure Processor Communication interface to user space spcomlib.
+ *
+ * Sending data and control commands by write() file operation.
+ * Receiving data by read() file operation.
+ * Getting the next request size by read() file operation,
+ * with special size SPCOM_GET_NEXT_REQUEST_SIZE.
+ */
+
+/* Maximum size (including null) for channel names */
+#define SPCOM_CHANNEL_NAME_SIZE		32
+
+/*
+ * file read(fd, buf, size) with this size,
+ * hints the kernel that user space wants to read the next-req-size.
+ * This size is bigger than both SPCOM_MAX_REQUEST_SIZE and
+ * SPCOM_MAX_RESPONSE_SIZE , so it is not a valid data size.
+ */
+#define SPCOM_GET_NEXT_REQUEST_SIZE	(PAGE_SIZE-1)
+
+/* Command Id between spcomlib and spcom driver, on write() */
+enum spcom_cmd_id {
+	SPCOM_CMD_LOAD_APP	= 0x4C4F4144, /* "LOAD" = 0x4C4F4144 */
+	SPCOM_CMD_RESET_SP	= 0x52455354, /* "REST" = 0x52455354 */
+	SPCOM_CMD_SEND		= 0x53454E44, /* "SEND" = 0x53454E44 */
+	SPCOM_CMD_SEND_MODIFIED	= 0x534E444D, /* "SNDM" = 0x534E444D */
+	SPCOM_CMD_LOCK_ION_BUF  = 0x4C4F434B, /* "LOCK" = 0x4C4F434B */
+	SPCOM_CMD_UNLOCK_ION_BUF = 0x554C434B, /* "ULCK" = 0x4C4F434B */
+	SPCOM_CMD_FSSR		= 0x46535352, /* "FSSR" = 0x46535352 */
+	SPCOM_CMD_CREATE_CHANNEL = 0x43524554, /* "CRET" = 0x43524554 */
+};
+
+/*
+ * @note: Event types that are always implicitly polled:
+ * POLLERR=0x08 | POLLHUP=0x10 | POLLNVAL=0x20
+ * so bits 3,4,5 can't be used
+ */
+enum spcom_poll_events {
+	SPCOM_POLL_LINK_STATE	= BIT(1),
+	SPCOM_POLL_CH_CONNECT	= BIT(2),
+	SPCOM_POLL_READY_FLAG	= BIT(14), /* output */
+	SPCOM_POLL_WAIT_FLAG	= BIT(15), /* if set , wait for the event */
+};
+
+/* Common Command structure between User Space and spcom driver, on write() */
+struct spcom_user_command {
+	enum spcom_cmd_id cmd_id;
+	uint32_t arg;
+} __packed;
+
+/* Command structure between User Space and spcom driver, on write() */
+struct spcom_send_command {
+	enum spcom_cmd_id cmd_id;
+	uint32_t timeout_msec;
+	uint32_t buf_size;
+	char buf[0]; /* Variable buffer size - must be last field */
+} __packed;
+
+/* Command structure between userspace spcomlib and spcom driver, on write() */
+struct spcom_user_create_channel_command {
+	enum spcom_cmd_id cmd_id;
+	char ch_name[SPCOM_CHANNEL_NAME_SIZE];
+} __packed;
+
+/* maximum ION buf for send-modfied-command */
+#define SPCOM_MAX_ION_BUF 4
+
+struct spcom_ion_info {
+	int32_t fd; /* ION buffer File Descriptor, set -1 for invalid fd */
+	uint32_t buf_offset; /* virtual address offset in request/response */
+};
+
+/* Pass this FD to unlock all ION buffer for the specific channel */
+#define SPCOM_ION_FD_UNLOCK_ALL	0xFFFF
+
+struct spcom_ion_handle {
+	int32_t fd;		/* File Descriptor associated with the buffer */
+};
+
+/* Command structure between User Space and spcom driver, on write() */
+struct spcom_user_send_modified_command {
+	enum spcom_cmd_id cmd_id;
+	struct spcom_ion_info ion_info[SPCOM_MAX_ION_BUF];
+	uint32_t timeout_msec;
+	uint32_t buf_size;
+	char buf[0]; /* Variable buffer size - must be last field */
+} __packed;
+
+
+#endif /* _UAPI_SPCOM_H_ */
diff --git a/scripts/build-all.py b/scripts/build-all.py
index 0f8babf..d36e96f 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -61,7 +61,7 @@
     """Ensure that PWD is a kernel directory"""
     have_defconfig = any([
         os.path.isfile('arch/arm64/configs/msm_defconfig'),
-        os.path.isfile('arch/arm64/configs/msmskunk_defconfig')])
+        os.path.isfile('arch/arm64/configs/sdm845_defconfig')])
 
     if not all([os.path.isfile('MAINTAINERS'), have_defconfig]):
         fail("This doesn't seem to be an MSM kernel dir")
@@ -305,10 +305,12 @@
         r'[fm]sm[0-9]*_defconfig',
         r'apq*_defconfig',
         r'qsd*_defconfig',
-	r'mpq*_defconfig',
+        r'mpq*_defconfig',
+        r'sdm[0-9]*_defconfig',
         )
     arch64_pats = (
-	r'msm*_defconfig',
+        r'msm*_defconfig',
+        r'sdm[0-9]*_defconfig',
         )
     for p in arch_pats:
         for n in glob.glob('arch/arm/configs/' + p):
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index b3ac439..4a7af76 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -233,8 +233,8 @@
 	 the machine driver and the corresponding
 	 DAI-links
 
-config SND_SOC_MSMSKUNK
-	tristate "SoC Machine driver for MSMSKUNK boards"
+config SND_SOC_SDM845
+	tristate "SoC Machine driver for SDM845 boards"
 	depends on ARCH_QCOM
 	select SND_SOC_COMPRESS
 	select SND_SOC_QDSP6V2
@@ -255,7 +255,7 @@
 	select SND_HWDEP
         select DTS_EAGLE
 	help
-	 To add support for SoC audio on MSMSKUNK.
+	 To add support for SoC audio on SDM845.
 	 This enables sound soc drivers that interfaces
 	 with DSP. This also enables the machine driver
 	 and the corresponding DAI-links.
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index 50ceda4..e0544fc 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -34,6 +34,6 @@
 obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-msmfalcon-common.o
 obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-ext-codec.o
 
-# for MSMSKUNK sound card driver
-snd-soc-msmskunk-objs := msmskunk.o
-obj-$(CONFIG_SND_SOC_MSMSKUNK) += snd-soc-msmskunk.o
+# for SDM845 sound card driver
+snd-soc-sdm845-objs := sdm845.o
+obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
diff --git a/sound/soc/msm/msmskunk.c b/sound/soc/msm/sdm845.c
similarity index 99%
rename from sound/soc/msm/msmskunk.c
rename to sound/soc/msm/sdm845.c
index 4759235..6987949 100644
--- a/sound/soc/msm/msmskunk.c
+++ b/sound/soc/msm/sdm845.c
@@ -38,9 +38,9 @@
 #include "../codecs/wcd934x/wcd934x-mbhc.h"
 #include "../codecs/wsa881x.h"
 
-#define DRV_NAME "msmskunk-asoc-snd"
+#define DRV_NAME "sdm845-asoc-snd"
 
-#define __CHIPSET__ "MSMSKUNK "
+#define __CHIPSET__ "SDM845 "
 #define MSM_DAILINK_NAME(name) (__CHIPSET__#name)
 
 #define SAMPLING_RATE_8KHZ      8000
@@ -3173,7 +3173,7 @@
 	return ret;
 }
 
-static int msmskunk_notifier_service_cb(struct notifier_block *this,
+static int sdm845_notifier_service_cb(struct notifier_block *this,
 					 unsigned long opcode, void *ptr)
 {
 	int ret;
@@ -3231,7 +3231,7 @@
 }
 
 static struct notifier_block service_nb = {
-	.notifier_call  = msmskunk_notifier_service_cb,
+	.notifier_call  = sdm845_notifier_service_cb,
 	.priority = -INT_MAX,
 };
 
@@ -5417,7 +5417,7 @@
 }
 
 struct snd_soc_card snd_soc_card_tavil_msm = {
-	.name		= "msmskunk-tavil-snd-card",
+	.name		= "sdm845-tavil-snd-card",
 	.late_probe	= msm_snd_card_tavil_late_probe,
 };
 
@@ -5647,13 +5647,13 @@
 			 ARRAY_SIZE(msm_stub_be_dai_links)];
 
 struct snd_soc_card snd_soc_card_stub_msm = {
-	.name		= "msmskunk-stub-snd-card",
+	.name		= "sdm845-stub-snd-card",
 };
 
-static const struct of_device_id msmskunk_asoc_machine_of_match[]  = {
-	{ .compatible = "qcom,msmskunk-asoc-snd-tavil",
+static const struct of_device_id sdm845_asoc_machine_of_match[]  = {
+	{ .compatible = "qcom,sdm845-asoc-snd-tavil",
 	  .data = "tavil_codec"},
-	{ .compatible = "qcom,msmskunk-asoc-snd-stub",
+	{ .compatible = "qcom,sdm845-asoc-snd-stub",
 	  .data = "stub_codec"},
 	{},
 };
@@ -5666,7 +5666,7 @@
 	int total_links;
 	const struct of_device_id *match;
 
-	match = of_match_node(msmskunk_asoc_machine_of_match, dev->of_node);
+	match = of_match_node(sdm845_asoc_machine_of_match, dev->of_node);
 	if (!match) {
 		dev_err(dev, "%s: No DT match found for sound card\n",
 			__func__);
@@ -6107,7 +6107,7 @@
 		goto err;
 	}
 
-	match = of_match_node(msmskunk_asoc_machine_of_match,
+	match = of_match_node(sdm845_asoc_machine_of_match,
 			pdev->dev.of_node);
 	if (!match) {
 		dev_err(&pdev->dev, "%s: no matched codec is found.\n",
@@ -6222,7 +6222,7 @@
 	msm_i2s_auxpcm_init(pdev);
 
 	is_initial_boot = true;
-	ret = audio_notifier_register("msmskunk", AUDIO_NOTIFIER_ADSP_DOMAIN,
+	ret = audio_notifier_register("sdm845", AUDIO_NOTIFIER_ADSP_DOMAIN,
 				      &service_nb);
 	if (ret < 0)
 		pr_err("%s: Audio notifier register failed ret = %d\n",
@@ -6250,19 +6250,19 @@
 	return 0;
 }
 
-static struct platform_driver msmskunk_asoc_machine_driver = {
+static struct platform_driver sdm845_asoc_machine_driver = {
 	.driver = {
 		.name = DRV_NAME,
 		.owner = THIS_MODULE,
 		.pm = &snd_soc_pm_ops,
-		.of_match_table = msmskunk_asoc_machine_of_match,
+		.of_match_table = sdm845_asoc_machine_of_match,
 	},
 	.probe = msm_asoc_machine_probe,
 	.remove = msm_asoc_machine_remove,
 };
-module_platform_driver(msmskunk_asoc_machine_driver);
+module_platform_driver(sdm845_asoc_machine_driver);
 
 MODULE_DESCRIPTION("ALSA SoC msm");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, msmskunk_asoc_machine_of_match);
+MODULE_DEVICE_TABLE(of, sdm845_asoc_machine_of_match);