Merge "msm: vidc: Add NULL check for bus vote data allocation"
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index b570448..2782428 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -366,6 +366,7 @@
 				control register. Number of offsets defined should
 				match the number of xin-ids defined in
 				property: qcom,sde-inline-rot-xin
+- #power-domain-cells:		Number of cells in a power-domain specifier and should contain 0.
 
 Bus Scaling Subnodes:
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
@@ -446,6 +447,7 @@
     interrupt-controller;
     #interrupt-cells = <1>;
     iommus = <&mdp_smmu 0>;
+    #power-domain-cells = <0>;
 
     qcom,sde-off = <0x1000>;
     qcom,sde-ctl-off = <0x00002000 0x00002200 0x00002400
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
index 933ad85..83a58df 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
@@ -278,6 +278,46 @@
   Value type: <u32>
   Definition: should specify the power on sequence delay time in ms.
 
+- spiop-read
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read operation related data.
+
+- spiop-readseq
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read sequence operation realted data.
+
+- spiop-queryid
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI query eeprom id operation related data.
+
+- spiop-pprog:
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI page program operation related data.
+
+- spiop-wenable
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI write enable operation related data.
+
+- spiop-readst
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read destination operation related data.
+
+- spiop-erase
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI erase operation related data.
+
+- eeprom-idx
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides eeprom id realted data.
+
 - xxxx-supply
   Usage: required
   Value type: <phandle>
@@ -385,6 +425,10 @@
 		cell-index = <0>;
 		reg = <0x0>;
 		qcom,eeprom-name = "msm_eeprom";
+		eeprom-id0 = <0xF8 0x15>;
+		eeprom-id1 = <0xEF 0x15>;
+		eeprom-id2 = <0xC2 0x36>;
+		eeprom-id3 = <0xC8 0x15>;
 		compatible = "qcom,eeprom";
 		qcom,slave-addr = <0x60>;
 		qcom,num-blocks = <2>;
@@ -400,6 +444,13 @@
 		qcom,cmm-data-compressed;
 		qcom,cmm-data-offset = <0>;
 		qcom,cmm-data-size = <0>;
+		spiop-read = <0x03 3 0 0 0>;
+		spiop-readseq = <0x03 3 0 0 0>;
+		spiop-queryid = <0x90 3 0 0 0>;
+		spiop-pprog = <0x02 3 0 3 100>;
+		spiop-wenable = <0x06 0 0 0 0>;
+		spiop-readst = <0x05 0 0 0 0>;
+		spiop-erase = <0x20 3 0 10 100>;
 		qcom,cam-power-seq-type = "sensor_vreg",
 			"sensor_vreg", "sensor_clk",
 			"sensor_gpio", "sensor_gpio";
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
index 72e2ab4..728c5f9 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
@@ -56,6 +56,11 @@
   Value type: <string>
   Definition: Should specify a string label to identify the context bank.
 
+- qcom,secure-cb
+  Usage: optional
+  Value type: boolean
+  Definition: Specifies if the context bank is a secure context bank.
+
 =============================================
 Third Level Node - CAM SMMU memory map device
 =============================================
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 46649af..db34047 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -123,6 +123,7 @@
 				swizzle configuration value.
 - qcom,rot-reg-bus:		Property to provide Bus scaling for register
 				access for rotator blocks.
+- power-domains:		A phandle to respective power domain node.
 
 Subnode properties:
 - compatible:		Compatible name used in smmu v2.
@@ -150,6 +151,8 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
+		power-domains = <&mdss_mdp>;
+
 		qcom,mdss-mdp-reg-offset = <0x00001000>;
 
 		rot-vdd-supply = <&gdsc_mdss>;
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index a3fd951..d205b0b 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -309,6 +309,13 @@
 		    is specified to make it fully functional. Value has no
 		    unit. Allowed range is 0 to 62200 in micro units.
 
+- qcom,ki-coeff-full-dischg
+	Usage:	    optional
+	Value type: <u32>
+	Definition: Ki coefficient full SOC value that will be applied during
+		    discharging. If not specified, a value of 0 will be set.
+		    Allowed range is from 245 to 62256.
+
 - qcom,fg-rconn-mohms
 	Usage:      optional
 	Value type: <u32>
diff --git a/Documentation/devicetree/bindings/serial/msm_serial_hs.txt b/Documentation/devicetree/bindings/serial/msm_serial_hs.txt
new file mode 100644
index 0000000..031be45
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/msm_serial_hs.txt
@@ -0,0 +1,121 @@
+* Qualcomm MSM HSUART
+
+Required properties:
+- compatible :
+	- "qcom,msm-hsuart-v14" to be used for UARTDM Core v1.4
+- reg : offset and length of the register set for both the device,
+	uart core and bam core
+- reg-names :
+	- "core_mem" to be used as name of the uart core
+	- "bam_mem" to be used as name of the bam core
+- interrupts : interrupts for both the device,uart core and bam core
+- interrupt-names :
+	- "core_irq" to be used as uart irq
+	- "bam irq" to be used as bam irq
+- #interrupt-cells: Specifies the number of cells needed to encode an interrupt
+		source. The type shall be a <u32> and the value shall be 1
+- #address-cells: Specifies the number of cells needed to encode an address.
+		The type shall be <u32> and the value shall be 0
+- interrupt-parent = It is needed for interrupt mapping
+- bam-tx-ep-pipe-index : BAM TX Endpoint Pipe Index for HSUART
+- bam-rx-ep-pipe-index : BAM RX Endpoint Pipe Index for HSUART
+
+BLSP has a static pipe allocation and assumes a pair-pipe for each uart core.
+Pipes [2*i : 2*i+1] are allocated for UART cores where i = [0 : 5].
+Hence, Minimum and Maximum permitted value of endpoint pipe index to be used
+with uart core is 0 and 11 respectively.
+
+There is one HSUART block used in MSM devices,
+"qcom,msm-hsuart-v14". The msm-serial-hs driver is
+able to handle this, and matches against the "qcom,msm-hsuart-v14"
+as the compatibility.
+
+The registers for the "qcom,msm-hsuart-v14" device need to specify both
+register blocks - uart core and bam core.
+
+Example:
+
+	uart7: uart@f995d000 {
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xf995d000 0x1000>,
+		      <0xf9944000 0x5000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&uart7>;
+		interrupts = <0 1>;
+		#interrupt-cells = <1>;
+		interrupt-map = <0 &intc 0 113 0
+				1 &intc 0 239 0>
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+	};
+
+Optional properties:
+- qcom,<gpio-name>-gpio : handle to the GPIO node, see "gpios property" in
+Documentation/devicetree/bindings/gpio/gpio.txt.
+"gpio-name" can be "tx", "rx", "cts" and "rfr" based on number of UART GPIOs
+need to configured.
+Gpio's are optional if it is required to be not configured by UART driver or
+case where there is nothing connected and we want to use internal loopback mode
+for uart.
+- qcom, wakeup_irq : UART RX GPIO IRQ line to be configured as wakeup source.
+- qcom,inject-rx-on-wakeup : inject_rx_on_wakeup enables feature where on
+receiving interrupt with UART RX GPIO IRQ line (i.e. above wakeup_irq property),
+HSUART driver injects provided character with property rx_to_inject.
+- qcom, rx-char-to-inject : The character to be inserted on wakeup.
+- qcom, no-suspend-delay : This decides system to go to suspend immediately
+or not
+
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+below optional properties:
+    - qcom,msm_bus,name
+    - qcom,msm_bus,num_cases
+    - qcom,msm_bus,active_only
+    - qcom,msm_bus,num_paths
+    - qcom,msm_bus,vectors
+
+Aliases :
+An alias may be optionally used to bind the UART device to a TTY device
+(ttyHS<alias_num>) with a given alias number. Aliases are of the form
+uart<n> where <n> is an integer representing the alias number to use.
+On systems with multiple UART devices present, an alias may optionally be
+defined for such devices. The alias value should be from 0 to 255.
+
+Example:
+
+	aliases {
+		uart4 = &uart7; // This device will be enumerated as ttyHS4
+	};
+
+	uart7: uart@f995d000 {
+		compatible = "qcom,msm-hsuart-v14"
+		reg = <0x19c40000 0x1000">,
+		      <0xf9944000 0x5000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&uart7>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 113 0
+				1 &intc 0 239 0
+				2 &msmgpio 42 0>;
+		qcom,tx-gpio = <&msmgpio 41 0x00>;
+		qcom,rx-gpio = <&msmgpio 42 0x00>;
+		qcom,cts-gpio = <&msmgpio 43 0x00>;
+		qcom,rfr-gpio = <&msmgpio 44 0x00>;
+		qcom,inject-rx-on-wakeup = <1>;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+
+		qcom,msm-bus,name = "uart7";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<84 512 0 0>,
+				<84 512 500 800>;
+	};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
index 800508a..45e309c 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
@@ -10,6 +10,10 @@
  - interrupts:  Interrupt number
  - reg: Should be address and size of EUD register space
  - reg-names: Should be "eud_base"
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with
+   the "clocks" property.
 
 Driver notifies clients via extcon for VBUS spoof attach/detach
 and charger enable/disable events. Clients registered for these
@@ -23,6 +27,8 @@
 		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x88e0000 0x4000>;
 		reg-names = "eud_base";
+		clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "cfg_ahb_clk";
 	};
 
 An example for EUD extcon client:
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 609d853..6838afd 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -58,6 +58,7 @@
 	gating. Default it is enabled.
  - snps,xhci-imod-value: Interrupt moderation interval for host mode
 	(in increments of 250nsec).
+ - usb-core-id: Differentiates between different controllers present on a device.
 
 This is usually a subnode to DWC3 glue to which it is connected.
 
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 6109fad..d23cb46 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -159,6 +159,7 @@
    "efuse_addr": EFUSE address to read and update analog tune parameter.
    "emu_phy_base" : phy base address used for programming emulation target phy.
    "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
+   "eud_base" : EUD device register address space to use EUD pet functionality.
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 1826b6f..2765e20 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -195,6 +195,7 @@
 CONFIG_INPUT_GPIO=m
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_HS=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index ce61464..1062175 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -189,6 +189,7 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 276e09c..aefdb52 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -153,6 +153,11 @@
 	qcs605-external-codec-mtp.dtb
 endif
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+else
+dtb-$(CONFIG_ARCH_MSM8953) += msm8953-mtp.dtb
+endif
+
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
 clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
new file mode 100644
index 0000000..1b78fdd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+				core1 {
+					cpu = <&CPU1>;
+				};
+				core2 {
+					cpu = <&CPU2>;
+				};
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+				core1 {
+					cpu = <&CPU5>;
+				};
+				core2 {
+					cpu = <&CPU6>;
+				};
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			reg = <0x0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-level = <2>;
+			      /* A53 L2 dump not supported */
+			      qcom,dump-size = <0x0>;
+			};
+			L1_I_0: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_0: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x1>;
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			L1_I_1: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_1: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x2>;
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			L1_I_2: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_2: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x3>;
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			L1_I_3: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_3: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x100>;
+			efficiency = <1126>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-level = <2>;
+			      /* A53 L2 dump not supported */
+			      qcom,dump-size = <0x0>;
+			};
+			L1_I_100: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_100: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x101>;
+			efficiency = <1126>;
+			next-level-cache = <&L2_1>;
+			L1_I_101: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_101: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x102>;
+			efficiency = <1126>;
+			next-level-cache = <&L2_1>;
+			L1_I_102: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_102: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x103>;
+			efficiency = <1126>;
+			next-level-cache = <&L2_1>;
+			L1_I_103: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x8800>;
+			};
+			L1_D_103: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9000>;
+			};
+		};
+	};
+};
+
+&soc {
+	cpuss_dump {
+		compatible = "qcom,cpuss-dump";
+		qcom,l2_dump0 {
+			/* L2 cache dump for A53 cluster */
+			qcom,dump-node = <&L2_0>;
+			qcom,dump-id = <0xC0>;
+		};
+		qcom,l2_dump1 {
+			/* L2 cache dump for A53 cluster */
+			qcom,dump-node = <&L2_1>;
+			qcom,dump-id = <0xC1>;
+		};
+		qcom,l1_i_cache0 {
+			qcom,dump-node = <&L1_I_0>;
+			qcom,dump-id = <0x60>;
+		};
+		qcom,l1_i_cache1 {
+			qcom,dump-node = <&L1_I_1>;
+			qcom,dump-id = <0x61>;
+		};
+		qcom,l1_i_cache2 {
+			qcom,dump-node = <&L1_I_2>;
+			qcom,dump-id = <0x62>;
+		};
+		qcom,l1_i_cache3 {
+			qcom,dump-node = <&L1_I_3>;
+			qcom,dump-id = <0x63>;
+		};
+		qcom,l1_i_cache100 {
+			qcom,dump-node = <&L1_I_100>;
+			qcom,dump-id = <0x64>;
+		};
+		qcom,l1_i_cache101 {
+			qcom,dump-node = <&L1_I_101>;
+			qcom,dump-id = <0x65>;
+		};
+		qcom,l1_i_cache102 {
+			qcom,dump-node = <&L1_I_102>;
+			qcom,dump-id = <0x66>;
+		};
+		qcom,l1_i_cache103 {
+			qcom,dump-node = <&L1_I_103>;
+			qcom,dump-id = <0x67>;
+		};
+		qcom,l1_d_cache0 {
+			qcom,dump-node = <&L1_D_0>;
+			qcom,dump-id = <0x80>;
+		};
+		qcom,l1_d_cache1 {
+			qcom,dump-node = <&L1_D_1>;
+			qcom,dump-id = <0x81>;
+		};
+		qcom,l1_d_cache2 {
+			qcom,dump-node = <&L1_D_2>;
+			qcom,dump-id = <0x82>;
+		};
+		qcom,l1_d_cache3 {
+			qcom,dump-node = <&L1_D_3>;
+			qcom,dump-id = <0x83>;
+		};
+		qcom,l1_d_cache100 {
+			qcom,dump-node = <&L1_D_100>;
+			qcom,dump-id = <0x84>;
+		};
+		qcom,l1_d_cache101 {
+			qcom,dump-node = <&L1_D_101>;
+			qcom,dump-id = <0x85>;
+		};
+		qcom,l1_d_cache102 {
+			qcom,dump-node = <&L1_D_102>;
+			qcom,dump-id = <0x86>;
+		};
+		qcom,l1_d_cache103 {
+			qcom,dump-node = <&L1_D_103>;
+			qcom,dump-id = <0x87>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
new file mode 100644
index 0000000..1e8b0f0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 MTP";
+	compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+	qcom,board-id= <8 0>;
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
similarity index 72%
copy from arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
copy to arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
index bc431f2..243aaf5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,4 +10,9 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#include "sdm670-audio-overlay.dtsi"
+
+&blsp1_uart0 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
new file mode 100644
index 0000000..e3ada39
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
@@ -0,0 +1,1506 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tlmm: pinctrl@1000000 {
+		compatible = "qcom,msm8953-pinctrl";
+		reg = <0x1000000 0x300000>;
+		interrupts = <0 208 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		pmx-uartconsole {
+			uart_console_active: uart_console_active {
+				mux {
+					pins = "gpio4", "gpio5";
+					function = "blsp_uart2";
+				};
+
+				config {
+					pins = "gpio4", "gpio5";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			uart_console_sleep: uart_console_sleep {
+				mux {
+					pins = "gpio4", "gpio5";
+					function = "blsp_uart2";
+				};
+
+				config {
+					pins = "gpio4", "gpio5";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+		};
+		cci {
+			cci0_active: cci0_active {
+				/* cci0 active state */
+				mux {
+					/* CLK, DATA */
+					pins = "gpio29", "gpio30";
+					function = "cci_i2c";
+				};
+
+				config {
+					pins = "gpio29", "gpio30";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable; /* No PULL */
+				};
+			};
+
+			cci0_suspend: cci0_suspend {
+				/* cci0 suspended state */
+				mux {
+					/* CLK, DATA */
+					pins = "gpio29", "gpio30";
+					function = "cci_i2c";
+				};
+
+				config {
+					pins = "gpio29", "gpio30";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable; /* No PULL */
+				};
+			};
+
+			cci1_active: cci1_active {
+				/* cci1 active state */
+				mux {
+					/* CLK, DATA */
+					pins = "gpio31", "gpio32";
+					function = "cci_i2c";
+				};
+
+				config {
+					pins = "gpio31", "gpio32";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable; /* No PULL */
+				};
+			};
+
+			cci1_suspend: cci1_suspend {
+				/* cci1 suspended state */
+				mux {
+					/* CLK, DATA */
+					pins = "gpio31", "gpio32";
+					function = "cci_i2c";
+				};
+
+				config {
+					pins = "gpio31", "gpio32";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable; /* No PULL */
+				};
+			};
+		};
+
+		/*sensors */
+		cam_sensor_mclk0_default: cam_sensor_mclk0_default {
+			/* MCLK0 */
+			mux {
+				/* CLK, DATA */
+				pins = "gpio26";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio26";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk0_sleep: cam_sensor_mclk0_sleep {
+			/* MCLK0 */
+			mux {
+				/* CLK, DATA */
+				pins = "gpio26";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio26";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear_default: cam_sensor_rear_default {
+			/* RESET, STANDBY */
+			mux {
+				pins = "gpio40", "gpio39";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio40","gpio39";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear_sleep: cam_sensor_rear_sleep {
+			/* RESET, STANDBY */
+			mux {
+				pins = "gpio40","gpio39";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio40","gpio39";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear_vana: cam_sensor_rear_vdig {
+			/* VDIG */
+			mux {
+				pins = "gpio134";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio134";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear_vana_sleep: cam_sensor_rear_vdig_sleep {
+			/* VDIG */
+			mux {
+				pins = "gpio134";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio134";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk1_default: cam_sensor_mclk1_default {
+			/* MCLK1 */
+			mux {
+				/* CLK, DATA */
+				pins = "gpio27";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio27";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk1_sleep: cam_sensor_mclk1_sleep {
+			/* MCLK1 */
+			mux {
+				/* CLK, DATA */
+				pins = "gpio27";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio27";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_front_default: cam_sensor_front_default {
+			/* RESET, STANDBY */
+			mux {
+				pins = "gpio131","gpio132";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio131","gpio132";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_front_sleep: cam_sensor_front_sleep {
+			/* RESET, STANDBY */
+			mux {
+				pins = "gpio131","gpio132";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio131","gpio132";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk2_default: cam_sensor_mclk2_default {
+			/* MCLK2 */
+			mux {
+				/* CLK, DATA */
+				pins = "gpio28";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio28";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk2_sleep: cam_sensor_mclk2_sleep {
+			/* MCLK2 */
+			mux {
+				/* CLK, DATA */
+				pins = "gpio28";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio28";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_front1_default: cam_sensor_front1_default {
+			/* RESET, STANDBY */
+			mux {
+				pins = "gpio129", "gpio130";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio129", "gpio130";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_front1_sleep: cam_sensor_front1_sleep {
+			/* RESET, STANDBY */
+			mux {
+				pins = "gpio129", "gpio130";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio129", "gpio130";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		pmx_adv7533_int: pmx_adv7533_int {
+			adv7533_int_active: adv7533_int_active {
+				mux {
+					pins = "gpio90";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio90";
+					drive-strength = <16>;
+					bias-disable;
+				};
+			};
+
+			adv7533_int_suspend: adv7533_int_suspend {
+				mux {
+					pins = "gpio90";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio90";
+					drive-strength = <16>;
+					bias-disable;
+				};
+			};
+
+		};
+
+		pmx_mdss: pmx_mdss {
+			mdss_dsi_active: mdss_dsi_active {
+				mux {
+					pins = "gpio61", "gpio59";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio61", "gpio59";
+					drive-strength = <8>; /* 8 mA */
+					bias-disable = <0>; /* no pull */
+					output-high;
+				};
+			};
+
+			mdss_dsi_suspend: mdss_dsi_suspend {
+				mux {
+					pins = "gpio61", "gpio59";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio61", "gpio59";
+					drive-strength = <2>; /* 2 mA */
+					bias-pull-down; /* pull down */
+				};
+			};
+		};
+
+		pmx_mdss_te {
+			mdss_te_active: mdss_te_active {
+				mux {
+					pins = "gpio24";
+					function = "mdp_vsync";
+				};
+				config {
+					pins = "gpio24";
+					drive-strength = <2>; /* 8 mA */
+					bias-pull-down; /* pull down*/
+				};
+			};
+
+			mdss_te_suspend: mdss_te_suspend {
+				mux {
+					pins = "gpio24";
+					function = "mdp_vsync";
+				};
+				config {
+					pins = "gpio24";
+					drive-strength = <2>; /* 2 mA */
+					bias-pull-down; /* pull down */
+				};
+			};
+		};
+
+		hsuart_active: default {
+			mux {
+				pins = "gpio12", "gpio13", "gpio14", "gpio15";
+				function = "blsp_uart4";
+			};
+
+			config {
+				pins = "gpio12", "gpio13", "gpio14", "gpio15";
+				drive-strength = <16>;
+				bias-disable;
+			};
+		};
+
+		hsuart_sleep: sleep {
+			mux {
+				pins = "gpio12", "gpio13", "gpio14", "gpio15";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio12", "gpio13", "gpio14", "gpio15";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		/* SDC pin type */
+		sdc1_clk_on: sdc1_clk_on {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc1_clk_off: sdc1_clk_off {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_cmd_on: sdc1_cmd_on {
+			config {
+				pins = "sdc1_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_cmd_off: sdc1_cmd_off {
+			config {
+				pins = "sdc1_cmd";
+				num-grp-pins = <1>;
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_data_on: sdc1_data_on {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_data_off: sdc1_data_off {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_rclk_on: sdc1_rclk_on {
+			config {
+				pins = "sdc1_rclk";
+				bias-pull-down; /* pull down */
+			};
+		};
+
+		sdc1_rclk_off: sdc1_rclk_off {
+			config {
+				pins = "sdc1_rclk";
+				bias-pull-down; /* pull down */
+			};
+		};
+
+		sdc2_clk_on: sdc2_clk_on {
+			config {
+				pins = "sdc2_clk";
+				drive-strength = <16>; /* 16 MA */
+				bias-disable; /* NO pull */
+			};
+		};
+
+		sdc2_clk_off: sdc2_clk_off {
+			config {
+				pins = "sdc2_clk";
+				bias-disable; /* NO pull */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		sdc2_cmd_on: sdc2_cmd_on {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up; /* pull up */
+				drive-strength = <10>; /* 10 MA */
+			};
+		};
+
+		sdc2_cmd_off: sdc2_cmd_off {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up; /* pull up */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		sdc2_data_on: sdc2_data_on {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up; /* pull up */
+				drive-strength = <10>; /* 10 MA */
+			};
+		};
+
+		sdc2_data_off: sdc2_data_off {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up; /* pull up */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		sdc2_cd_on: cd_on {
+			mux {
+				pins = "gpio133";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio133";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		sdc2_cd_off: cd_off {
+			mux {
+				pins = "gpio133";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio133";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		i2c_2 {
+			i2c_2_active: i2c_2_active {
+				/* active state */
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "blsp_i2c2";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_2_sleep: i2c_2_sleep {
+				/* suspended state */
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		i2c_3 {
+			i2c_3_active: i2c_3_active {
+				/* active state */
+				mux {
+					pins = "gpio10", "gpio11";
+					function = "blsp_i2c3";
+				};
+
+				config {
+					pins = "gpio10", "gpio11";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_3_sleep: i2c_3_sleep {
+				/* suspended state */
+				mux {
+					pins = "gpio10", "gpio11";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio10", "gpio11";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		i2c_5 {
+			i2c_5_active: i2c_5_active {
+				/* active state */
+				mux {
+					pins = "gpio18", "gpio19";
+					function = "blsp_i2c5";
+				};
+
+				config {
+					pins = "gpio18", "gpio19";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_5_sleep: i2c_5_sleep {
+				/* suspended state */
+				mux {
+					pins = "gpio18", "gpio19";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio18", "gpio19";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		pmx_rd_nfc_int {
+			/*qcom,pins = <&gp 17>;*/
+			pins = "gpio17";
+			qcom,pin-func = <0>;
+			qcom,num-grp-pins = <1>;
+			label = "pmx_nfc_int";
+
+			nfc_int_active: active {
+				drive-strength = <6>;
+				bias-pull-up;
+			};
+
+			nfc_int_suspend: suspend {
+				drive-strength = <6>;
+				bias-pull-up;
+			};
+		};
+
+		pmx_nfc_reset {
+			/*qcom,pins = <&gp 16>;*/
+			pins = "gpio16";
+			qcom,pin-func = <0>;
+			qcom,num-grp-pins = <1>;
+			label = "pmx_nfc_disable";
+
+			nfc_disable_active: active {
+				drive-strength = <6>;
+				bias-pull-up;
+			};
+
+			nfc_disable_suspend: suspend {
+				drive-strength = <6>;
+				bias-disable;
+			};
+		};
+
+		wcnss_pmux_5wire {
+			/* Active configuration of bus pins */
+			wcnss_default: wcnss_default {
+				wcss_wlan2 {
+					pins = "gpio76";
+					function = "wcss_wlan2";
+				};
+				wcss_wlan1 {
+					pins = "gpio77";
+					function = "wcss_wlan1";
+				};
+				wcss_wlan0 {
+					pins = "gpio78";
+					function = "wcss_wlan0";
+				};
+				wcss_wlan {
+					pins = "gpio79", "gpio80";
+					function = "wcss_wlan";
+				};
+
+				config {
+					pins = "gpio76", "gpio77",
+						"gpio78", "gpio79",
+						"gpio80";
+					drive-strength = <6>; /* 6 MA */
+					bias-pull-up; /* PULL UP */
+				};
+			};
+
+			wcnss_sleep: wcnss_sleep {
+				wcss_wlan2 {
+					pins = "gpio76";
+					function = "wcss_wlan2";
+				};
+				wcss_wlan1 {
+					pins = "gpio77";
+					function = "wcss_wlan1";
+				};
+				wcss_wlan0 {
+					pins = "gpio78";
+					function = "wcss_wlan0";
+				};
+				wcss_wlan {
+					pins = "gpio79", "gpio80";
+					function = "wcss_wlan";
+				};
+
+				config {
+					pins = "gpio76", "gpio77",
+						"gpio78", "gpio79",
+						"gpio80";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-down; /* PULL Down */
+				};
+			};
+		};
+
+		wcnss_pmux_gpio: wcnss_pmux_gpio {
+			wcnss_gpio_default: wcnss_gpio_default {
+				/* Active configuration of bus pins */
+				mux {
+					/* Uses general purpose pins */
+					pins = "gpio76", "gpio77",
+					"gpio78", "gpio79",
+					"gpio80";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio76", "gpio77",
+						"gpio78", "gpio79",
+						"gpio80";
+					drive-strength = <6>; /* 6 MA */
+					bias-pull-up; /* PULL UP */
+				};
+			};
+		};
+
+		wcd9xxx_intr {
+			wcd_intr_default: wcd_intr_default{
+				mux {
+					pins = "gpio73";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio73";
+					drive-strength = <2>; /* 2 mA */
+					bias-pull-down; /* pull down */
+					input-enable;
+				};
+			};
+		};
+
+		cdc_reset_ctrl {
+			cdc_reset_sleep: cdc_reset_sleep {
+				mux {
+					pins = "gpio67";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio67";
+					drive-strength = <16>;
+					bias-disable;
+					output-low;
+				};
+			};
+			cdc_reset_active:cdc_reset_active {
+				mux {
+					pins = "gpio67";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio67";
+					drive-strength = <16>;
+					bias-pull-down;
+					output-high;
+				};
+			};
+		};
+
+		cdc_mclk2_pin {
+			cdc_mclk2_sleep: cdc_mclk2_sleep {
+				mux {
+					pins = "gpio66";
+					function = "pri_mi2s";
+				};
+				config {
+					pins = "gpio66";
+					drive-strength = <2>; /* 2 mA */
+					bias-pull-down;       /* PULL DOWN */
+				};
+			};
+			cdc_mclk2_active: cdc_mclk2_active {
+				mux {
+					pins = "gpio66";
+					function = "pri_mi2s";
+				};
+				config {
+					pins = "gpio66";
+					drive-strength = <8>; /* 8 mA */
+					bias-disable;         /* NO PULL */
+				};
+			};
+		};
+
+		cdc-pdm-2-lines {
+			cdc_pdm_lines_2_act: pdm_lines_2_on {
+				mux {
+					pins = "gpio70", "gpio71", "gpio72";
+					function = "cdc_pdm0";
+				};
+
+				config {
+					pins = "gpio70", "gpio71", "gpio72";
+					drive-strength = <8>;
+				};
+			};
+
+			cdc_pdm_lines_2_sus: pdm_lines_2_off {
+				mux {
+					pins = "gpio70", "gpio71", "gpio72";
+					function = "cdc_pdm0";
+				};
+
+				config {
+					pins = "gpio70", "gpio71", "gpio72";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		cdc-pdm-lines {
+			cdc_pdm_lines_act: pdm_lines_on {
+				mux {
+					pins = "gpio69", "gpio73", "gpio74";
+					function = "cdc_pdm0";
+				};
+
+				config {
+					pins = "gpio69", "gpio73", "gpio74";
+					drive-strength = <8>;
+				};
+			};
+			cdc_pdm_lines_sus: pdm_lines_off {
+				mux {
+					pins = "gpio69", "gpio73", "gpio74";
+					function = "cdc_pdm0";
+				};
+
+				config {
+					pins = "gpio69", "gpio73", "gpio74";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		cdc-pdm-comp-lines {
+			cdc_pdm_comp_lines_act: pdm_comp_lines_on {
+				mux {
+					pins = "gpio67", "gpio68";
+					function = "cdc_pdm0";
+				};
+
+				config {
+					pins = "gpio67", "gpio68";
+					drive-strength = <8>;
+				};
+			};
+
+			cdc_pdm_comp_lines_sus: pdm_comp_lines_off {
+				mux {
+					pins = "gpio67", "gpio68";
+					function = "cdc_pdm0";
+				};
+
+				config {
+					pins = "gpio67", "gpio68";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		cross-conn-det {
+			cross_conn_det_act: lines_on {
+				mux {
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <8>;
+					output-low;
+					bias-pull-down;
+				};
+			};
+
+			cross_conn_det_sus: lines_off {
+				mux {
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		/* WSA VI sense */
+		wsa-vi {
+			wsa_vi_on: wsa_vi_on {
+				mux {
+					pins = "gpio94", "gpio95";
+					function = "wsa_io";
+				};
+
+				config {
+					pins = "gpio94", "gpio95";
+					drive-strength = <8>; /* 8 MA */
+					bias-disable; /* NO pull */
+				};
+			};
+
+			wsa_vi_off: wsa_vi_off {
+				mux {
+					pins = "gpio94", "gpio95";
+					function = "wsa_io";
+				};
+
+				config {
+					pins = "gpio94", "gpio95";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-down;
+				};
+			};
+		};
+
+		/* WSA Reset */
+		wsa_reset {
+			wsa_reset_on: wsa_reset_on {
+				mux {
+					pins = "gpio96";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio96";
+					drive-strength = <2>; /* 2 MA */
+					output-high;
+				};
+			};
+
+			wsa_reset_off: wsa_reset_off {
+				mux {
+					pins = "gpio96";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio96";
+					drive-strength = <2>; /* 2 MA */
+					output-low;
+				};
+			};
+		};
+
+		/* WSA CLK */
+		wsa_clk {
+			wsa_clk_on: wsa_clk_on {
+				mux {
+					pins = "gpio25";
+					function = "pri_mi2s_mclk_a";
+				};
+
+				config {
+					pins = "gpio25";
+					drive-strength = <8>; /* 8 MA */
+					output-high;
+				};
+			};
+
+			wsa_clk_off: wsa_clk_off {
+				mux {
+					pins = "gpio25";
+					function = "pri_mi2s_mclk_a";
+				};
+
+				config {
+					pins = "gpio25";
+					drive-strength = <2>; /* 2 MA */
+					output-low;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pri-tlmm-lines {
+			pri_tlmm_lines_act: pri_tlmm_lines_act {
+				mux {
+					pins = "gpio91", "gpio93";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio91", "gpio93";
+					drive-strength = <8>;
+				};
+			};
+
+			pri_tlmm_lines_sus: pri_tlmm_lines_sus {
+				mux {
+					pins = "gpio91", "gpio93";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio91", "gpio93";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pri-tlmm-ws-lines {
+			pri_tlmm_ws_act: pri_tlmm_ws_act {
+				mux {
+					pins = "gpio92";
+					function = "pri_mi2s_ws";
+				};
+
+				config {
+					pins = "gpio92";
+					drive-strength = <8>;
+				};
+			};
+
+			pri_tlmm_ws_sus: pri_tlmm_ws_sus {
+				mux {
+					pins = "gpio92";
+					function = "pri_mi2s_ws";
+				};
+
+				config {
+					pins = "gpio92";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		spi3 {
+			spi3_default: spi3_default {
+				/* active state */
+				mux {
+					/* MOSI, MISO, CLK */
+					pins = "gpio8", "gpio9", "gpio11";
+					function = "blsp_spi3";
+				};
+
+				config {
+					pins = "gpio8", "gpio9", "gpio11";
+					drive-strength = <12>; /* 12 MA */
+					bias-disable = <0>; /* No PULL */
+				};
+			};
+
+			spi3_sleep: spi3_sleep {
+				/* suspended state */
+				mux {
+					/* MOSI, MISO, CLK */
+					pins = "gpio8", "gpio9", "gpio11";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio8", "gpio9", "gpio11";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-down; /* PULL Down */
+				};
+			};
+
+			spi3_cs0_active: cs0_active {
+				/* CS */
+				mux {
+					pins = "gpio10";
+					function = "blsp_spi3";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;
+					bias-disable = <0>;
+				};
+			};
+
+			spi3_cs0_sleep: cs0_sleep {
+				/* CS */
+				mux {
+					pins = "gpio10";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;
+					bias-disable = <0>;
+				};
+			};
+		};
+
+		/* add pingrp for touchscreen */
+		pmx_ts_int_active {
+			ts_int_active: ts_int_active {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <8>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		pmx_ts_int_suspend {
+			ts_int_suspend: ts_int_suspend {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pmx_ts_reset_active {
+			ts_reset_active: ts_reset_active {
+				mux {
+					pins = "gpio64";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio64";
+					drive-strength = <8>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		pmx_ts_reset_suspend {
+			ts_reset_suspend: ts_reset_suspend {
+				mux {
+					pins = "gpio64";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio64";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pmx_ts_release {
+			ts_release: ts_release {
+				mux {
+					pins = "gpio65", "gpio64";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65", "gpio64";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		tlmm_gpio_key {
+			gpio_key_active: gpio_key_active {
+				mux {
+					pins = "gpio85", "gpio86", "gpio87";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio87";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			gpio_key_suspend: gpio_key_suspend {
+				mux {
+					pins = "gpio85", "gpio86", "gpio87";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio87";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+		pmx_qdsd_clk {
+			qdsd_clk_sdcard: clk_sdcard {
+				config {
+					pins = "qdsd_clk";
+					bias-disable;/* NO pull */
+					drive-strength = <16>; /* 16 MA */
+				};
+			};
+			qdsd_clk_trace: clk_trace {
+				config {
+					pins = "qdsd_clk";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_clk_swdtrc: clk_swdtrc {
+				config {
+					pins = "qdsd_clk";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_clk_spmi: clk_spmi {
+				config {
+					pins = "qdsd_clk";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+		};
+
+		pmx_qdsd_cmd {
+			qdsd_cmd_sdcard: cmd_sdcard {
+				config {
+					pins = "qdsd_cmd";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_cmd_trace: cmd_trace {
+				config {
+					pins = "qdsd_cmd";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_cmd_swduart: cmd_uart {
+				config {
+					pins = "qdsd_cmd";
+					bias-pull-up; /* pull up */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_cmd_swdtrc: cmd_swdtrc {
+				config {
+					pins = "qdsd_cmd";
+					bias-pull-up; /* pull up */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_cmd_jtag: cmd_jtag {
+				config {
+					pins = "qdsd_cmd";
+					bias-disable; /* NO pull */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_cmd_spmi: cmd_spmi {
+				config {
+					pins = "qdsd_cmd";
+					bias-pull-down; /* pull down */
+					drive-strength = <10>; /* 10 MA */
+				};
+			};
+		};
+
+		pmx_qdsd_data0 {
+			qdsd_data0_sdcard: data0_sdcard {
+				config {
+					pins = "qdsd_data0";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data0_trace: data0_trace {
+				config {
+					pins = "qdsd_data0";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data0_swduart: data0_uart {
+				config {
+					pins = "qdsd_data0";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data0_swdtrc: data0_swdtrc {
+				config {
+					pins = "qdsd_data0";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data0_jtag: data0_jtag {
+				config {
+					pins = "qdsd_data0";
+					bias-pull-up; /* pull up */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data0_spmi: data0_spmi {
+				config {
+					pins = "qdsd_data0";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+		};
+
+		pmx_qdsd_data1 {
+			qdsd_data1_sdcard: data1_sdcard {
+				config {
+					pins = "qdsd_data1";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data1_trace: data1_trace {
+				config {
+					pins = "qdsd_data1";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data1_swduart: data1_uart {
+				config {
+					pins = "qdsd_data1";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data1_swdtrc: data1_swdtrc {
+				config {
+					pins = "qdsd_data1";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data1_jtag: data1_jtag {
+				config {
+					pins = "qdsd_data1";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+		};
+
+		pmx_qdsd_data2 {
+			qdsd_data2_sdcard: data2_sdcard {
+				config {
+					pins = "qdsd_data2";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data2_trace: data2_trace {
+				config {
+					pins = "qdsd_data2";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data2_swduart: data2_uart {
+				config {
+					pins = "qdsd_data2";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data2_swdtrc: data2_swdtrc {
+				config {
+					pins = "qdsd_data2";
+					bias-pull-down; /* pull down */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data2_jtag: data2_jtag {
+				config {
+					pins = "qdsd_data2";
+					bias-pull-up; /* pull up */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+		};
+
+		pmx_qdsd_data3 {
+			qdsd_data3_sdcard: data3_sdcard {
+				config {
+					pins = "qdsd_data3";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data3_trace: data3_trace {
+				config {
+					pins = "qdsd_data3";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+			qdsd_data3_swduart: data3_uart {
+				config {
+					pins = "qdsd_data3";
+					bias-pull-up; /* pull up */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data3_swdtrc: data3_swdtrc {
+				config {
+					pins = "qdsd_data3";
+					bias-pull-up; /* pull up */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data3_jtag: data3_jtag {
+				config {
+					pins = "qdsd_data3";
+					bias-pull-up; /* pull up */
+					drive-strength = <2>; /* 2 MA */
+				};
+			};
+			qdsd_data3_spmi: data3_spmi {
+				config {
+					pins = "qdsd_data3";
+					bias-pull-down; /* pull down */
+					drive-strength = <8>; /* 8 MA */
+				};
+			};
+		};
+
+		typec_ssmux_config: typec_ssmux_config {
+			mux {
+				pins = "gpio139";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio139";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
new file mode 100644
index 0000000..f17ac32
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -0,0 +1,605 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM 8953";
+	compatible = "qcom,msm8953";
+	qcom,msm-id = <293 0x0>;
+	interrupt-parent = <&intc>;
+
+	chosen {
+		bootargs = "sched_enable_hmp=1 sched_enable_power_aware=1";
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		other_ext_mem: other_ext_region@0 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0x0 0x85b00000 0x0 0xd00000>;
+		};
+
+		modem_mem: modem_region@0 {
+			compatible = "removed-dma-pool";
+			no-map-fixup;
+			reg = <0x0 0x86c00000 0x0 0x6a00000>;
+		};
+
+		adsp_fw_mem: adsp_fw_region@0 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0x0 0x8d600000 0x0 0x1100000>;
+		};
+
+		wcnss_fw_mem: wcnss_fw_region@0 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0x0 0x8e700000 0x0 0x700000>;
+		};
+
+		venus_mem: venus_region@0 {
+			compatible = "shared-dma-pool";
+			reusable;
+			alloc-ranges = <0x0 0x80000000 0x0 0x10000000>;
+			alignment = <0 0x400000>;
+			size = <0 0x0800000>;
+		};
+
+		secure_mem: secure_region@0 {
+			compatible = "shared-dma-pool";
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x09800000>;
+		};
+
+		qseecom_mem: qseecom_region@0 {
+			compatible = "shared-dma-pool";
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1000000>;
+		};
+
+		adsp_mem: adsp_region@0 {
+			compatible = "shared-dma-pool";
+			reusable;
+			size = <0 0x400000>;
+		};
+
+		dfps_data_mem: dfps_data_mem@90000000 {
+		       reg = <0 0x90000000 0 0x1000>;
+		       label = "dfps_data_mem";
+		};
+
+		cont_splash_mem: splash_region@0x90001000 {
+			reg = <0x0 0x90001000 0x0 0x13ff000>;
+			label = "cont_splash_mem";
+		};
+
+		gpu_mem: gpu_region@0 {
+			compatible = "shared-dma-pool";
+			reusable;
+			alloc-ranges = <0x0 0x80000000 0x0 0x10000000>;
+			alignment = <0 0x400000>;
+			size = <0 0x800000>;
+		};
+	};
+
+	aliases {
+		/* smdtty devices */
+		sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+		sdhc2 = &sdhc_2; /* SDC2 for SD card */
+	};
+
+	soc: soc { };
+
+};
+
+#include "msm8953-pinctrl.dtsi"
+#include "msm8953-cpu.dtsi"
+
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	ranges = <0 0 0 0xffffffff>;
+	compatible = "simple-bus";
+
+	apc_apm: apm@b111000 {
+		compatible = "qcom,msm8953-apm";
+		reg = <0xb111000 0x1000>;
+		reg-names = "pm-apcc-glb";
+		qcom,apm-post-halt-delay = <0x2>;
+		qcom,apm-halt-clk-delay = <0x11>;
+		qcom,apm-resume-clk-delay = <0x10>;
+		qcom,apm-sel-switch-delay = <0x01>;
+	};
+
+	intc: interrupt-controller@b000000 {
+		compatible = "qcom,msm-qgic2";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		reg = <0x0b000000 0x1000>,
+		      <0x0b002000 0x1000>;
+	};
+
+	qcom,msm-gladiator@b1c0000 {
+		compatible = "qcom,msm-gladiator";
+		reg = <0x0b1c0000 0x4000>;
+		reg-names = "gladiator_base";
+		interrupts = <0 22 0>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 2 0xff08>,
+			     <1 3 0xff08>,
+			     <1 4 0xff08>,
+			     <1 1 0xff08>;
+		clock-frequency = <19200000>;
+	};
+
+	timer@b120000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "arm,armv7-timer-mem";
+		reg = <0xb120000 0x1000>;
+		clock-frequency = <19200000>;
+
+		frame@b121000 {
+			frame-number = <0>;
+			interrupts = <0 8 0x4>,
+				     <0 7 0x4>;
+			reg = <0xb121000 0x1000>,
+			      <0xb122000 0x1000>;
+		};
+
+		frame@b123000 {
+			frame-number = <1>;
+			interrupts = <0 9 0x4>;
+			reg = <0xb123000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@b124000 {
+			frame-number = <2>;
+			interrupts = <0 10 0x4>;
+			reg = <0xb124000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@b125000 {
+			frame-number = <3>;
+			interrupts = <0 11 0x4>;
+			reg = <0xb125000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@b126000 {
+			frame-number = <4>;
+			interrupts = <0 12 0x4>;
+			reg = <0xb126000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@b127000 {
+			frame-number = <5>;
+			interrupts = <0 13 0x4>;
+			reg = <0xb127000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@b128000 {
+			frame-number = <6>;
+			interrupts = <0 14 0x4>;
+			reg = <0xb128000 0x1000>;
+			status = "disabled";
+		};
+	};
+	qcom,rmtfs_sharedmem@00000000 {
+		compatible = "qcom,sharedmem-uio";
+		reg = <0x00000000 0x00180000>;
+		reg-names = "rmtfs";
+		qcom,client-id = <0x00000001>;
+	};
+
+	restart@4ab000 {
+		compatible = "qcom,pshold";
+		reg = <0x4ab000 0x4>,
+			<0x193d100 0x4>;
+		reg-names = "pshold-base", "tcsr-boot-misc-detect";
+	};
+
+	qcom,mpm2-sleep-counter@4a3000 {
+		compatible = "qcom,mpm2-sleep-counter";
+		reg = <0x4a3000 0x1000>;
+		clock-frequency = <32768>;
+	};
+
+	cpu-pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <1 7 0xff00>;
+	};
+
+	qcom,sps {
+		compatible = "qcom,msm_sps_4k";
+		qcom,pipe-attr-ee;
+	};
+
+	blsp1_uart0: serial@78af000 {
+		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+		reg = <0x78af000 0x200>;
+		interrupts = <0 107 0>;
+		status = "disabled";
+	};
+
+	dma_blsp1: qcom,sps-dma@7884000 { /* BLSP1 */
+		#dma-cells = <4>;
+		compatible = "qcom,sps-dma";
+		reg = <0x7884000 0x1f000>;
+		interrupts = <0 238 0>;
+		qcom,summing-threshold = <10>;
+	};
+
+	dma_blsp2: qcom,sps-dma@7ac4000 { /* BLSP2 */
+		#dma-cells = <4>;
+		compatible = "qcom,sps-dma";
+		reg = <0x7ac4000 0x1f000>;
+		interrupts = <0 239 0>;
+		qcom,summing-threshold = <10>;
+	};
+
+	slim_msm: slim@c140000{
+		cell-index = <1>;
+		compatible = "qcom,slim-ngd";
+		reg = <0xc140000 0x2c000>,
+		      <0xc104000 0x2a000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 163 0>, <0 180 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		qcom,apps-ch-pipes = <0x600000>;
+		qcom,ea-pc = <0x200>;
+		status = "disabled";
+	};
+
+	cpubw: qcom,cpubw {
+		compatible = "qcom,devbw";
+		governor = "cpufreq";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   769 /*  100.8 MHz */ >,
+			<  1611 /*  211.2 MHz */ >,     /*Low SVS*/
+			<  2124 /*  278.4 MHz */ >,
+			<  2929 /*  384   MHz */ >,
+			<  3221 /*  422.4 MHz */ >,	/* SVS */
+			<  4248 /*  556.8 MHz */ >,
+			<  5126 /*  672   MHz */ >,
+			<  5859 /*  768   MHz */ >,     /* SVS+  */
+			<  6152 /*  806.4 MHz */ >,
+			<  6445 /*  844.8 MHz */ >,     /* NOM   */
+			<  7104 /*  931.2 MHz */ >;     /* TURBO */
+	};
+
+	mincpubw: qcom,mincpubw {
+		compatible = "qcom,devbw";
+		governor = "cpufreq";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   769 /*  100.8 MHz */ >,
+			<  1611 /*  211.2 MHz */ >,     /*Low SVS*/
+			<  2124 /*  278.4 MHz */ >,
+			<  2929 /*  384   MHz */ >,
+			<  3221 /*  422.4 MHz */ >,	/* SVS */
+			<  4248 /*  556.8 MHz */ >,
+			<  5126 /*  672   MHz */ >,
+			<  5859 /*  768   MHz */ >,     /* SVS+  */
+			<  6152 /*  806.4 MHz */ >,
+			<  6445 /*  844.8 MHz */ >,     /* NOM   */
+			<  7104 /*  931.2 MHz */ >;     /* TURBO */
+	};
+
+	qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon2";
+		reg = <0x408000 0x300>, <0x401000 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 183 4>;
+		qcom,mport = <0>;
+		qcom,target-dev = <&cpubw>;
+	};
+
+	devfreq-cpufreq {
+		cpubw-cpufreq {
+		target-dev = <&cpubw>;
+		cpu-to-dev-map =
+			 <  652800  1611>,
+			 < 1036800  3221>,
+			 < 1401600  5859>,
+			 < 1689600  6445>,
+			 < 1804800  7104>,
+			 < 1958400  7104>,
+			 < 2208000  7104>;
+		};
+
+		mincpubw-cpufreq {
+			target-dev = <&mincpubw>;
+			cpu-to-dev-map =
+				<  652800 1611 >,
+				< 1401600 3221 >,
+				< 2208000 5859 >;
+		};
+	};
+
+	qcom,ipc-spinlock@1905000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0x1905000 0x8000>;
+		qcom,num-locks = <8>;
+	};
+
+	qcom,smem@86300000 {
+		compatible = "qcom,smem";
+		reg = <0x86300000 0x100000>,
+			<0x0b011008 0x4>,
+			<0x60000 0x8000>,
+			<0x193d000 0x8>;
+		reg-names = "smem", "irq-reg-base",
+				"aux-mem1", "smem_targ_info_reg";
+		qcom,mpu-enabled;
+
+		qcom,smd-modem {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <0>;
+			qcom,smd-irq-offset = <0x0>;
+			qcom,smd-irq-bitmask = <0x1000>;
+			interrupts = <0 25 1>;
+			label = "modem";
+			qcom,not-loadable;
+		};
+
+		qcom,smsm-modem {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <0>;
+			qcom,smsm-irq-offset = <0x0>;
+			qcom,smsm-irq-bitmask = <0x2000>;
+			interrupts = <0 26 1>;
+		};
+
+		qcom,smd-wcnss {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <6>;
+			qcom,smd-irq-offset = <0x0>;
+			qcom,smd-irq-bitmask = <0x20000>;
+			interrupts = <0 142 1>;
+			label = "wcnss";
+		};
+
+		qcom,smsm-wcnss {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <6>;
+			qcom,smsm-irq-offset = <0x0>;
+			qcom,smsm-irq-bitmask = <0x80000>;
+			interrupts = <0 144 1>;
+		};
+
+		qcom,smd-adsp {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <1>;
+			qcom,smd-irq-offset = <0x0>;
+			qcom,smd-irq-bitmask = <0x100>;
+			interrupts = <0 289 1>;
+			label = "adsp";
+		};
+
+		qcom,smsm-adsp {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <1>;
+			qcom,smsm-irq-offset = <0x0>;
+			qcom,smsm-irq-bitmask = <0x200>;
+			interrupts = <0 290 1>;
+		};
+
+		qcom,smd-rpm {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <15>;
+			qcom,smd-irq-offset = <0x0>;
+			qcom,smd-irq-bitmask = <0x1>;
+			interrupts = <0 168 1>;
+			label = "rpm";
+			qcom,irq-no-suspend;
+			qcom,not-loadable;
+		};
+	};
+
+	qcom,wdt@b017000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0xb017000 0x1000>;
+		reg-names = "wdt-base";
+		interrupts = <0 3 0>, <0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping;
+		qcom,wakeup-enable;
+	};
+
+	qcom,chd {
+		compatible = "qcom,core-hang-detect";
+		qcom,threshold-arr = <0xb1880b0 0xb1980b0 0xb1a80b0
+			0xb1b80b0 0xb0880b0 0xb0980b0 0xb0a80b0 0xb0b80b0>;
+		qcom,config-arr = <0xb1880b8 0xb1980b8 0xb1a80b8
+			0xb1b80b8 0xb0880b8 0xb0980b8 0xb0a80b8 0xb0b80b8>;
+	};
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,rtb-size = <0x100000>;
+	};
+
+	qcom,msm-imem@8600000 {
+		compatible = "qcom,msm-imem";
+		reg = <0x08600000 0x1000>;
+		ranges = <0x0 0x08600000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mem_dump_table@10 {
+			compatible = "qcom,msm-imem-mem_dump_table";
+			reg = <0x10 8>;
+		};
+
+		restart_reason@65c {
+			compatible = "qcom,msm-imem-restart_reason";
+			reg = <0x65c 4>;
+		};
+
+		boot_stats@6b0 {
+			compatible = "qcom,msm-imem-boot_stats";
+			reg = <0x6b0 32>;
+		};
+
+		 pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+
+		};
+	};
+
+	qcom,memshare {
+		compatible = "qcom,memshare";
+
+		qcom,client_1 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x200000>;
+			qcom,client-id = <0>;
+			qcom,allocate-boot-time;
+			label = "modem";
+		};
+
+		qcom,client_2 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x300000>;
+			qcom,client-id = <2>;
+			label = "modem";
+		};
+
+		mem_client_3_size: qcom,client_3 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x0>;
+			qcom,client-id = <1>;
+			label = "modem";
+		};
+	};
+		sdcc1_ice: sdcc1ice@7803000 {
+		compatible = "qcom,ice";
+		reg = <0x7803000 0x8000>;
+		interrupt-names = "sdcc_ice_nonsec_level_irq",
+				  "sdcc_ice_sec_level_irq";
+		interrupts = <0 312 0>, <0 313 0>;
+		qcom,enable-ice-clk;
+		qcom,op-freq-hz = <270000000>, <0>, <0>, <0>;
+		qcom,msm-bus,name = "sdcc_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<78 512 0 0>,    /* No vote */
+			<78 512 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN", "MAX";
+		qcom,instance-type = "sdcc";
+	};
+
+	sdhc_1: sdhci@7824900 {
+		compatible = "qcom,sdhci-msm";
+		reg = <0x7824900 0x500>, <0x7824000 0x800>, <0x7824e00 0x200>;
+		reg-names = "hc_mem", "core_mem", "cmdq_mem";
+
+		interrupts = <0 123 0>, <0 138 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		sdhc-msm-crypto = <&sdcc1_ice>;
+		qcom,bus-width = <8>;
+
+		qcom,devfreq,freq-table = <50000000 200000000>;
+
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <2 213>;
+
+		qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
+		qcom,pm-qos-cmdq-latency-us = <2 213>, <2 213>;
+
+		qcom,pm-qos-legacy-latency-us = <2 213>, <2 213>;
+
+		qcom,msm-bus,name = "sdhc1";
+		qcom,msm-bus,num-cases = <9>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
+			<78 512 1046 3200>,    /* 400 KB/s*/
+			<78 512 52286 160000>, /* 20 MB/s */
+			<78 512 65360 200000>, /* 25 MB/s */
+			<78 512 130718 400000>, /* 50 MB/s */
+			<78 512 130718 400000>, /* 100 MB/s */
+			<78 512 261438 800000>, /* 200 MB/s */
+			<78 512 261438 800000>, /* 400 MB/s */
+			<78 512 1338562 4096000>; /* Max. bandwidth */
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+			100000000 200000000 400000000 4294967295>;
+
+		qcom,ice-clk-rates = <270000000 160000000>;
+		qcom,large-address-bus;
+
+		status = "disabled";
+	};
+
+	sdhc_2: sdhci@7864900 {
+		compatible = "qcom,sdhci-msm";
+		reg = <0x7864900 0x500>, <0x7864000 0x800>;
+		reg-names = "hc_mem", "core_mem";
+
+		interrupts = <0 125 0>, <0 221 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		qcom,bus-width = <4>;
+
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <2 213>;
+
+		qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
+		qcom,pm-qos-legacy-latency-us = <2 213>, <2 213>;
+
+		qcom,devfreq,freq-table = <50000000 200000000>;
+
+		qcom,msm-bus,name = "sdhc2";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+			<81 512 1046 3200>,    /* 400 KB/s*/
+			<81 512 52286 160000>, /* 20 MB/s */
+			<81 512 65360 200000>, /* 25 MB/s */
+			<81 512 130718 400000>, /* 50 MB/s */
+			<81 512 261438 800000>, /* 100 MB/s */
+			<81 512 261438 800000>, /* 200 MB/s */
+			<81 512 1338562 4096000>; /* Max. bandwidth */
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+			100000000 200000000 4294967295>;
+
+		qcom,large-address-bus;
+		status = "disabled";
+	};
+
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index b7737a9..dfb8142 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -50,7 +50,6 @@
 	qcom,hph-en0-gpio = <&tavil_hph_en0>;
 	qcom,hph-en1-gpio = <&tavil_hph_en1>;
 	qcom,msm-mclk-freq = <9600000>;
-	qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
 	asoc-codec = <&stub_codec>;
 	asoc-codec-names = "msm-stub-codec.1";
 	qcom,wsa-max-devs = <2>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index c29f7c9..ef92cdd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -100,7 +100,7 @@
 	int_codec: sound {
 		status = "okay";
 		compatible = "qcom,sdm670-asoc-snd";
-		qcom,model = "sdm670-snd-card";
+		qcom,model = "sdm670-snd-card-mtp";
 		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts
index 9feb5b4..2b5ed1a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
index 7aa87a2..5a1b945 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
@@ -15,7 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
-#include "sdm670-int-codec-audio-overlay.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index 257698a..fa06779 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -29,6 +29,7 @@
 	vdd-hba-supply = <&ufs_phy_gdsc>;
 	vdd-hba-fixed-regulator;
 	vcc-supply = <&pm660l_l4>;
+	vcc-voltage-level = <2960000 2960000>;
 	vccq2-supply = <&pm660_l8>;
 	vcc-max-microamp = <600000>;
 	vccq2-max-microamp = <600000>;
@@ -91,8 +92,19 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 0 204 0
+			1 &intc 0 0 222 0
+			2 &tlmm 96 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 96 0x1>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 6cbfb57..8b79d8b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -746,6 +746,7 @@
 		arm,primecell-periphid = <0x0003b968>;
 		reg = <0x6860000 0x1000>;
 		reg-names = "tpdm-base";
+		status = "disabled";
 
 		coresight-name = "coresight-tpdm-turing";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ext-cdc-usbc-audio.dtsi
similarity index 71%
copy from arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-ext-cdc-usbc-audio.dtsi
index bc431f2..cd113b3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-ext-cdc-usbc-audio.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,4 +10,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#include "sdm670-audio-overlay.dtsi"
+
+&tavil_snd {
+	qcom,msm-mbhc-usbc-audio-supported = <1>;
+	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 7718bca..89dee0c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -64,7 +64,7 @@
 
 		qcom,highest-bank-bit = <14>;
 
-		qcom,min-access-length = <64>;
+		qcom,min-access-length = <32>;
 
 		qcom,ubwc-mode = <2>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
similarity index 82%
rename from arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
rename to arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
index bc431f2..df10e7d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
@@ -10,3 +10,8 @@
  * GNU General Public License for more details.
  */
 #include "sdm670-audio-overlay.dtsi"
+
+&int_codec {
+	qcom,msm-mbhc-usbc-audio-supported = <1>;
+	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts
index 65c16c1..ac254fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
index f12be2ae..1241a20 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
@@ -15,7 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
-#include "sdm670-int-codec-audio-overlay.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index fa76a2e..466062b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -29,6 +29,7 @@
 	vdd-hba-supply = <&ufs_phy_gdsc>;
 	vdd-hba-fixed-regulator;
 	vcc-supply = <&pm660l_l4>;
+	vcc-voltage-level = <2960000 2960000>;
 	vccq2-supply = <&pm660_l8>;
 	vcc-max-microamp = <600000>;
 	vccq2-max-microamp = <600000>;
@@ -91,8 +92,19 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 0 204 0
+			1 &intc 0 0 222 0
+			2 &tlmm 96 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 96 0x1>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index e747185..177813f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1258,6 +1258,32 @@
 			};
 		};
 
+		sdc2_cd_on: cd_on {
+			mux {
+				pins = "gpio96";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio96";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		sdc2_cd_off: cd_off {
+			mux {
+				pins = "gpio96";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio96";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
 		/* USB C analog configuration */
 		wcd_usbc_analog_en1 {
 			wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
index b3d2357..5b67765 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
index 95642f9..26f5e78 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
@@ -16,7 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
-#include "sdm670-int-codec-audio-overlay.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts
index ff3270d..1550661 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts
index 5592ce4..14f48a0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts
@@ -16,7 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
-#include "sdm670-int-codec-audio-overlay.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
index e2a2df7..e137705 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
@@ -16,7 +16,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-rumi.dtsi"
-#include "sdm670-int-codec-audio-overlay.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 RUMI";
 	compatible = "qcom,sdm670-rumi", "qcom,sdm670", "qcom,rumi";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index 4ba16e4..a50d9b6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -188,3 +188,31 @@
 &usb_qmp_dp_phy {
 	status = "disabled";
 };
+
+&mdss_mdp {
+	status = "disabled";
+};
+
+&sde_rscc {
+	status = "disabled";
+};
+
+&mdss_rotator {
+	status = "disabled";
+};
+
+&mdss_dsi0 {
+	status = "disabled";
+};
+
+&mdss_dsi1 {
+	status = "disabled";
+};
+
+&mdss_dsi_phy0 {
+	status = "disabled";
+};
+
+&mdss_dsi_phy1 {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts
index 190b32f..e4e1db5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-cdp.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts
index 45ff83f..80a8423 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts
@@ -15,7 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
-
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts
index 226c2ae..c5bab55 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-cdp.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660L, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts
index 8928f80..2c53334 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660L, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts
index 78d1dfa..09ba184 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts
@@ -21,7 +21,7 @@
 
 #include "sdm670-mtp.dtsi"
 #include "sdm670-external-codec.dtsi"
-
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660L, USB-C Audio, Ext. Audio Codec MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts
index 5628e92..7a19819 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660L, USB-C Audio, Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts
index c1891a4..71db0f7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts
@@ -22,6 +22,7 @@
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660A, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts
index c059113..ff641e6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts
@@ -17,6 +17,7 @@
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660A, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts
index c54e299..c2e6f58 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts
@@ -22,6 +22,7 @@
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660A, USB-C Audio, Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts
index d09b5e5..2cd68f1 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts
@@ -17,6 +17,7 @@
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660A, USB-C Audio, Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts
index d8c4102..3d5c04e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-mtp.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts
index b9bb3ef..8449625 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts
@@ -15,6 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts
index 95df620..6a26d95 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts
index 5e33308..1871b45 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts
index 226a46b..d565cdd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts
index ca99736..b288569 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index f5d9319..9aafe03 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -23,6 +23,8 @@
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <dt-bindings/clock/qcom,aop-qmp.h>
 
+#define MHZ_TO_MBPS(mhz, w) ((mhz * 1000000 * w) / (1024 * 1024))
+
 / {
 	model = "Qualcomm Technologies, Inc. SDM670";
 	compatible = "qcom,sdm670";
@@ -40,6 +42,10 @@
 		hsuart0 = &qupv3_se6_4uart;
 	};
 
+	chosen {
+		bootargs = "rcupdate.rcu_expedited=1 core_ctl_disable_cpumask=6-7";
+	};
+
 	cpus {
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -536,24 +542,6 @@
 			reg = <0 0x93d00000 0 0x1e00000>;
 		};
 
-		pil_ipa_fw_mem: pil_ipa_fw_region@95b00000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x95b00000 0 0x10000>;
-		};
-
-		pil_ipa_gsi_mem: pil_ipa_gsi_region@95b10000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x95b10000 0 0x5000>;
-		};
-
-		pil_gpu_mem: pil_gpu_region@95b15000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x95b15000 0 0x1000>;
-		};
-
 		adsp_mem: adsp_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -805,6 +793,7 @@
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm660l_s3_level>;
 		vdd_mx-supply = <&pm660l_s1_level>;
+		qcom,gpu_cc_gmu_clk_src-opp-handle = <&gmu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -814,6 +803,7 @@
 		reg = <0x5090000 0x9000>;
 		reg-names = "cc_base";
 		vdd_gfx-supply = <&pm660l_s2_level>;
+		qcom,gpu_cc_gx_gfx3d_clk_src-opp-handle = <&msm_gpu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -1947,6 +1937,46 @@
 
 		qcom,devfreq,freq-table = <50000000 200000000>;
 
+		qcom,msm-bus,name = "sdhc1";
+		qcom,msm-bus,num-cases = <9>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			/* No vote */
+			<78 512 0 0>, <1 606 0 0>,
+			/* 400 KB/s*/
+			<78 512 1046 1600>,
+			<1 606 1600 1600>,
+			/* 20 MB/s */
+			<78 512 52286 80000>,
+			<1 606 80000 80000>,
+			/* 25 MB/s */
+			<78 512 65360 100000>,
+			<1 606 100000 100000>,
+			/* 50 MB/s */
+			<78 512 130718 200000>,
+			<1 606 133320 133320>,
+			/* 100 MB/s */
+			<78 512 130718 200000>,
+			<1 606 150000 150000>,
+			/* 200 MB/s */
+			<78 512 261438 400000>,
+			<1 606 300000 300000>,
+			/* 400 MB/s */
+			<78 512 261438 400000>,
+			<1 606 300000 300000>,
+			/* Max. bandwidth */
+			<78 512 1338562 4096000>,
+			<1 606 1338562 4096000>;
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+			100000000 200000000 400000000 4294967295>;
+
+		/* PM QoS */
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <70 70>;
+		qcom,pm-qos-cpu-groups = <0x3f 0xc0>;
+		qcom,pm-qos-cmdq-latency-us = <70 70>, <70 70>;
+		qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>;
+
 		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
 			<&clock_gcc GCC_SDCC1_APPS_CLK>;
 		clock-names = "iface_clk", "core_clk";
@@ -1974,6 +2004,43 @@
 				      "SDR104";
 
 		qcom,devfreq,freq-table = <50000000 201500000>;
+
+		qcom,msm-bus,name = "sdhc2";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			/* No vote */
+			<81 512 0 0>, <1 608 0 0>,
+			/* 400 KB/s*/
+			<81 512 1046 1600>,
+			<1 608 1600 1600>,
+			/* 20 MB/s */
+			<81 512 52286 80000>,
+			<1 608 80000 80000>,
+			/* 25 MB/s */
+			<81 512 65360 100000>,
+			<1 608 100000 100000>,
+			/* 50 MB/s */
+			<81 512 130718 200000>,
+			<1 608 133320 133320>,
+			/* 100 MB/s */
+			<81 512 261438 200000>,
+			<1 608 150000 150000>,
+			/* 200 MB/s */
+			<81 512 261438 400000>,
+			<1 608 300000 300000>,
+			/* Max. bandwidth */
+			<81 512 1338562 4096000>,
+			<1 608 1338562 4096000>;
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+			100000000 200000000 4294967295>;
+
+		/* PM QoS */
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <70 70>;
+		qcom,pm-qos-cpu-groups = <0x3f 0xc0>;
+		qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>;
+
 		clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
 			<&clock_gcc GCC_SDCC2_APPS_CLK>;
 		clock-names = "iface_clk", "core_clk";
@@ -2091,12 +2158,12 @@
 			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_LLCC>;
 		qcom,active-only;
 		qcom,bw-tbl =
-			<  1144 /* 150 MHz */ >,
-			<  2288 /* 300 MHz */ >,
-			<  3555 /* 466 MHz */ >,
-			<  4577 /* 600 MHz */ >,
-			<  6149 /* 806 MHz */ >,
-			<  7118 /* 933 MHz */ >;
+			< MHZ_TO_MBPS(150, 16) >, /*  2288 MB/s */
+			< MHZ_TO_MBPS(300, 16) >, /*  4577 MB/s */
+			< MHZ_TO_MBPS(466, 16) >, /*  7110 MB/s */
+			< MHZ_TO_MBPS(600, 16) >, /*  9155 MB/s */
+			< MHZ_TO_MBPS(806, 16) >, /* 12298 MB/s */
+			< MHZ_TO_MBPS(933, 16) >; /* 14236 MB/s */
 	};
 
 	bwmon: qcom,cpu-bwmon {
@@ -2116,17 +2183,17 @@
 			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
 		qcom,active-only;
 		qcom,bw-tbl =
-			<  381 /*  100 MHz */ >,
-			<  762 /*  200 MHz */ >,
-			< 1144 /*  300 MHz */ >,
-			< 1720 /*  451 MHz */ >,
-			< 2086 /*  547 MHz */ >,
-			< 2597 /*  681 MHz */ >,
-			< 2929 /*  768 MHz */ >,
-			< 3879 /* 1017 MHz */ >,
-			< 5161 /* 1353 MHz */ >,
-			< 5931 /* 1555 MHz */ >,
-			< 6881 /* 1804 MHz */ >;
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
 	};
 
 	llcc_bwmon: qcom,llcc-bwmon {
@@ -2147,17 +2214,17 @@
 		qcom,src-dst-ports = <1 512>;
 		qcom,active-only;
 		qcom,bw-tbl =
-			<  381 /*  100 MHz */ >,
-			<  762 /*  200 MHz */ >,
-			< 1144 /*  300 MHz */ >,
-			< 1720 /*  451 MHz */ >,
-			< 2086 /*  547 MHz */ >,
-			< 2597 /*  681 MHz */ >,
-			< 2929 /*  768 MHz */ >,
-			< 3879 /* 1017 MHz */ >,
-			< 5161 /* 1353 MHz */ >,
-			< 5931 /* 1555 MHz */ >,
-			< 6881 /* 1804 MHz */ >;
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
 	};
 
 	memlat_cpu4: qcom,memlat-cpu4 {
@@ -2167,17 +2234,17 @@
 		qcom,active-only;
 		status = "ok";
 		qcom,bw-tbl =
-			<  381 /*  100 MHz */ >,
-			<  762 /*  200 MHz */ >,
-			< 1144 /*  300 MHz */ >,
-			< 1720 /*  451 MHz */ >,
-			< 2086 /*  547 MHz */ >,
-			< 2597 /*  681 MHz */ >,
-			< 2929 /*  768 MHz */ >,
-			< 3879 /* 1017 MHz */ >,
-			< 5161 /* 1353 MHz */ >,
-			< 5931 /* 1555 MHz */ >,
-			< 6881 /* 1804 MHz */ >;
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
 	};
 
 	devfreq_memlat_0: qcom,cpu0-memlat-mon {
@@ -2186,11 +2253,11 @@
 		qcom,target-dev = <&memlat_cpu0>;
 		qcom,cachemiss-ev = <0x24>;
 		qcom,core-dev-table =
-			<  748800 1144 >,
-			<  998400 1720 >,
-			< 1209600 2086 >,
-			< 1497600 2929 >,
-			< 1728000 3879 >;
+			<  748800 MHZ_TO_MBPS( 300, 4) >,
+			<  998400 MHZ_TO_MBPS( 451, 4) >,
+			< 1209600 MHZ_TO_MBPS( 547, 4) >,
+			< 1497600 MHZ_TO_MBPS( 768, 4) >,
+			< 1728000 MHZ_TO_MBPS(1017, 4) >;
 	};
 
 	devfreq_memlat_4: qcom,cpu4-memlat-mon {
@@ -2199,11 +2266,11 @@
 		qcom,target-dev = <&memlat_cpu4>;
 		qcom,cachemiss-ev = <0x24>;
 		qcom,core-dev-table =
-			<  787200 1144 >,
-			< 1113600 2086 >,
-			< 1344000 3879 >,
-			< 1900800 5931 >,
-			< 2438400 6881 >;
+			<  787200 MHZ_TO_MBPS( 300, 4) >,
+			< 1113600 MHZ_TO_MBPS( 547, 4) >,
+			< 1344000 MHZ_TO_MBPS(1017, 4) >,
+			< 1900800 MHZ_TO_MBPS(1555, 4) >,
+			< 2438400 MHZ_TO_MBPS(1804, 4) >;
 	};
 
 	l3_cpu0: qcom,l3-cpu0 {
@@ -2253,33 +2320,33 @@
 		qcom,src-dst-ports = <1 512>;
 		qcom,active-only;
 		qcom,bw-tbl =
-			<  381 /*  100 MHz */ >,
-			<  762 /*  200 MHz */ >,
-			< 1144 /*  300 MHz */ >,
-			< 1720 /*  451 MHz */ >,
-			< 2086 /*  547 MHz */ >,
-			< 2597 /*  681 MHz */ >,
-			< 2929 /*  768 MHz */ >,
-			< 3879 /* 1017 MHz */ >,
-			< 5161 /* 1353 MHz */ >,
-			< 5931 /* 1555 MHz */ >,
-			< 6881 /* 1804 MHz */ >;
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
 	};
 
 	devfreq-cpufreq {
 		mincpubw-cpufreq {
 			target-dev = <&mincpubw>;
 			cpu-to-dev-map-0 =
-				<  748800 1144 >,
-				< 1209600 1720 >,
-				< 1612000 2086 >,
-				< 1728000 2929 >;
+				<  748800 MHZ_TO_MBPS( 300, 4) >,
+				< 1209600 MHZ_TO_MBPS( 451, 4) >,
+				< 1612000 MHZ_TO_MBPS( 547, 4) >,
+				< 1728000 MHZ_TO_MBPS( 768, 4) >;
 			cpu-to-dev-map-4 =
-				< 1113600 1144 >,
-				< 1344000 2086 >,
-				< 1728000 2929 >,
-				< 1900800 3879 >,
-				< 2438400 6881 >;
+				< 1113600 MHZ_TO_MBPS( 300, 4) >,
+				< 1344000 MHZ_TO_MBPS( 547, 4) >,
+				< 1728000 MHZ_TO_MBPS( 768, 4) >,
+				< 1900800 MHZ_TO_MBPS(1017, 4) >,
+				< 2438400 MHZ_TO_MBPS(1804, 4) >;
 		};
 	};
 
@@ -2391,6 +2458,38 @@
 	status = "ok";
 };
 
+&mdss_dsi0 {
+	qcom,core-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,core-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "refgen";
+			qcom,supply-min-voltage = <0>;
+			qcom,supply-max-voltage = <0>;
+			qcom,supply-enable-load = <0>;
+			qcom,supply-disable-load = <0>;
+		};
+	};
+};
+
+&mdss_dsi1 {
+	qcom,core-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,core-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "refgen";
+			qcom,supply-min-voltage = <0>;
+			qcom,supply-max-voltage = <0>;
+			qcom,supply-enable-load = <0>;
+			qcom,supply-disable-load = <0>;
+		};
+	};
+};
+
 #include "sdm670-audio.dtsi"
 #include "sdm670-usb.dtsi"
 #include "sdm670-gpu.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 8df879a..e4f768f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -325,18 +325,8 @@
 
 		msm_cam_smmu_secure {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x1001 0x0>;
 			label = "cam-secure";
-			cam_secure_iova_mem_map: iova-mem-map {
-				/* Secure IO region is approximately 3.4 GB */
-				iova-mem-region-io {
-					iova-region-name = "io";
-					iova-region-start = <0x7400000>;
-					iova-region-len = <0xd8c00000>;
-					iova-region-id = <0x3>;
-					status = "ok";
-				};
-			};
+			qcom,secure-cb;
 		};
 
 		msm_cam_smmu_fd {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index fe19658..81ce1e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -121,45 +121,6 @@
 	status = "ok";
 };
 
-&extcon_storage_cd {
-	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
-	debounce-ms = <200>;
-	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&storage_cd>;
-
-	status = "ok";
-};
-
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qmp-v3";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	extcon = <&extcon_storage_cd>;
-
-	status = "ok";
-};
-
 &sdhc_2 {
 	vdd-supply = <&pm8998_l21>;
 	qcom,vdd-voltage-level = <2950000 2960000>;
@@ -170,10 +131,10 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
 
-	extcon = <&extcon_storage_cd>;
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index ec0b328..a61d96e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -1556,7 +1556,7 @@
 		reg = <0x69e1000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-ddr0";
+		coresight-name = "coresight-cti-DDR_DL_0_CTI";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1568,7 +1568,7 @@
 		reg = <0x69e4000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-ddr1";
+		coresight-name = "coresight-cti-DDR_DL_1_CTI0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1580,7 +1580,7 @@
 		reg = <0x69e5000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti1-ddr1";
+		coresight-name = "coresight-cti-DDR_DL_1_CTI1";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1592,7 +1592,7 @@
 		reg = <0x6c09000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-dlmm";
+		coresight-name = "coresight-cti-DLMM_CTI0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1604,7 +1604,43 @@
 		reg = <0x6c0a000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti1-dlmm";
+		coresight-name = "coresight-cti-DLMM_CTI1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti0_apss: cti@78e0000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x78e0000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-APSS_CTI0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1_apss: cti@78f0000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x78f0000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-APSS_CTI1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2_apss: cti@7900000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x7900000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-APSS_CTI2";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1932,7 +1968,7 @@
 		reg = <0x6b04000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-swao";
+		coresight-name = "coresight-cti-SWAO_CTI0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index f800b4e..11b6a4d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -329,9 +329,10 @@
 
 			compatible = "qcom,gmu-pwrlevels";
 
+			/* GMU power levels must go from lowest to highest */
 			qcom,gmu-pwrlevel@0 {
 				reg = <0>;
-				qcom,gmu-freq = <400000000>;
+				qcom,gmu-freq = <0>;
 			};
 
 			qcom,gmu-pwrlevel@1 {
@@ -341,7 +342,7 @@
 
 			qcom,gmu-pwrlevel@2 {
 				reg = <2>;
-				qcom,gmu-freq = <0>;
+				qcom,gmu-freq = <400000000>;
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index 5578ece..73cd794 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -84,17 +84,6 @@
 	/delete-property/ qcom,vddp-ref-clk-supply;
 };
 
-&ufsphy_card {
-	/delete-property/ vdda-phy-supply;
-	/delete-property/ vdda-pll-supply;
-};
-
-&ufshc_card {
-	/delete-property/ vcc-supply;
-	/delete-property/ vccq2-supply;
-	/delete-property/ qcom,vddp-ref-clk-supply;
-};
-
 &sdhc_2 {
 	/delete-property/ vdd-supply;
 	/delete-property/ vdd-io-supply;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 3756197..f0d16ec 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -209,45 +209,6 @@
 	status = "ok";
 };
 
-&extcon_storage_cd {
-	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
-	debounce-ms = <200>;
-	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&storage_cd>;
-
-	status = "ok";
-};
-
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qmp-v3";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	extcon = <&extcon_storage_cd>;
-
-	status = "ok";
-};
-
 &sdhc_2 {
 	vdd-supply = <&pm8998_l21>;
 	qcom,vdd-voltage-level = <2950000 2960000>;
@@ -258,10 +219,10 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
 
-	extcon = <&extcon_storage_cd>;
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index be76635..f73bf3a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -210,7 +210,7 @@
 				config {
 					pins = "gpio37";
 					drive-strength = <2>;
-					bias-pull-down;
+					bias-pull-up;
 				};
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 02f30fd..6dae069 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -126,45 +126,6 @@
 	status = "ok";
 };
 
-&extcon_storage_cd {
-	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
-	debounce-ms = <200>;
-	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&storage_cd>;
-
-	status = "ok";
-};
-
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qmp-v3";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	extcon = <&extcon_storage_cd>;
-
-	status = "ok";
-};
-
 &sdhc_2 {
 	vdd-supply = <&pm8998_l21>;
 	qcom,vdd-voltage-level = <2950000 2960000>;
@@ -175,10 +136,10 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
 
-	extcon = <&extcon_storage_cd>;
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 1fa6e26..a805e2e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -221,6 +221,9 @@
 		interrupts = <GIC_SPI 601 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 0 1 64 0>,
+			<&gpi_dma0 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -240,6 +243,9 @@
 		interrupts = <GIC_SPI 602 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 1 1 64 0>,
+			<&gpi_dma0 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -259,6 +265,9 @@
 		interrupts = <GIC_SPI 603 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 2 1 64 0>,
+			<&gpi_dma0 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -278,6 +287,9 @@
 		interrupts = <GIC_SPI 604 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 3 1 64 0>,
+			<&gpi_dma0 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -297,6 +309,9 @@
 		interrupts = <GIC_SPI 605 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 4 1 64 0>,
+			<&gpi_dma0 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -316,6 +331,9 @@
 		interrupts = <GIC_SPI 606 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 5 1 64 0>,
+			<&gpi_dma0 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -335,6 +353,9 @@
 		interrupts = <GIC_SPI 607 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 6 1 64 0>,
+			<&gpi_dma0 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -354,6 +375,9 @@
 		interrupts = <GIC_SPI 608 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 7 1 64 0>,
+			<&gpi_dma0 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -561,6 +585,9 @@
 		interrupts = <GIC_SPI 353 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 0 1 64 0>,
+			<&gpi_dma1 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -580,6 +607,9 @@
 		interrupts = <GIC_SPI 354 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 1 1 64 0>,
+			<&gpi_dma1 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -599,6 +629,9 @@
 		interrupts = <GIC_SPI 355 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 2 1 64 0>,
+			<&gpi_dma1 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -618,6 +651,9 @@
 		interrupts = <GIC_SPI 356 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 3 1 64 0>,
+			<&gpi_dma1 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -637,6 +673,9 @@
 		interrupts = <GIC_SPI 357 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 4 1 64 0>,
+			<&gpi_dma1 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -656,6 +695,9 @@
 		interrupts = <GIC_SPI 358 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 5 1 64 0>,
+			<&gpi_dma1 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -675,6 +717,9 @@
 		interrupts = <GIC_SPI 359 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 6 1 64 0>,
+			<&gpi_dma1 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -694,6 +739,9 @@
 		interrupts = <GIC_SPI 360 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 7 1 64 0>,
+			<&gpi_dma1 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 7bc4d89..d89722f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -12,11 +12,69 @@
 
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
+#include "smb1355.dtsi"
 
 &pmi8998_pdphy {
 	vbus-supply = <&smb2_vbus>;
 };
 
+&pmi8998_fg {
+	qcom,fg-bmd-en-delay-ms = <300>;
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
+
+&smb1355_charger_0 {
+	status = "ok";
+};
+
+&smb1355_charger_1 {
+	status = "ok";
+};
+
+&soc {
+	qcom,qbt1000 {
+		status = "disabled";
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default
+			     &key_home_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		home {
+			label = "home";
+			gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&pmi8998_haptics {
+	qcom,vmax-mv = <1800>;
+	qcom,wave-play-rate-us = <4255>;
+	qcom,lra-auto-mode;
+	status = "okay";
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -41,3 +99,21 @@
 
 	status = "ok";
 };
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &storage_cd>;
+
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_HIGH>;
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 99004bf..d607f75 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -140,38 +140,6 @@
 	qcom,cpr-ignore-invalid-fuses;
 };
 
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qrbtc-sdm845";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	limit-tx-hs-gear = <1>;
-	limit-rx-hs-gear = <1>;
-
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	qcom,disable-lpm;
-	rpm-level = <0>;
-	spm-level = <0>;
-	status = "ok";
-};
-
 &pmi8998_charger {
 	qcom,suspend-input;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 04edfa9..4337da7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -435,6 +435,11 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,mdss-dsi-min-refresh-rate = <53>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -478,6 +483,11 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,mdss-dsi-min-refresh-rate = <53>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 3f255ac..e7a946c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -46,6 +46,8 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
+		#power-domain-cells = <0>;
+
 		/* hw blocks */
 		qcom,sde-off = <0x1000>;
 		qcom,sde-len = <0x45C>;
@@ -137,8 +139,8 @@
 		qcom,sde-has-dest-scaler;
 		qcom,sde-max-dest-scaler-input-linewidth = <2048>;
 		qcom,sde-max-dest-scaler-output-linewidth = <2560>;
-		qcom,sde-max-bw-low-kbps = <9600000>;
-		qcom,sde-max-bw-high-kbps = <9600000>;
+		qcom,sde-max-bw-low-kbps = <6800000>;
+		qcom,sde-max-bw-high-kbps = <6800000>;
 		qcom,sde-min-core-ib-kbps = <2400000>;
 		qcom,sde-min-llcc-ib-kbps = <800000>;
 		qcom,sde-min-dram-ib-kbps = <800000>;
@@ -209,6 +211,7 @@
 			qcom,sde-dspp-gamut = <0x1000 0x00040000>;
 			qcom,sde-dspp-pcc = <0x1700 0x00040000>;
 			qcom,sde-dspp-gc = <0x17c0 0x00010008>;
+			qcom,sde-dspp-hist = <0x800 0x00010007>;
 		};
 
 		qcom,platform-supply-entries {
@@ -365,6 +368,8 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
+		power-domains = <&mdss_mdp>;
+
 		/* Offline rotator QoS setting */
 		qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
 		qcom,mdss-rot-vbif-memtype = <3 3>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 16d0988..09f4efa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -78,6 +78,7 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			usb-core-id = <0>;
 		};
 
 		qcom,usbbam@a704000 {
@@ -116,8 +117,9 @@
 	qusb_phy0: qusb@88e2000 {
 		compatible = "qcom,qusb2phy-v2";
 		reg = <0x088e2000 0x400>,
-			<0x007801e8 0x4>;
-		reg-names = "qusb_phy_base", "efuse_addr";
+			<0x007801e8 0x4>,
+			<0x088e0000 0x2000>;
+		reg-names = "qusb_phy_base", "efuse_addr", "eud_base";
 
 		qcom,efuse-bit-pos = <25>;
 		qcom,efuse-num-bits = <3>;
@@ -385,6 +387,7 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			usb-core-id = <1>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index c42a7be..c070ed6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -243,18 +243,8 @@
 
 		msm_cam_smmu_secure {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x1001 0x0>;
 			label = "cam-secure";
-			cam_secure_iova_mem_map: iova-mem-map {
-				/* Secure IO region is approximately 3.4 GB */
-				iova-mem-region-io {
-					iova-region-name = "io";
-					iova-region-start = <0x7400000>;
-					iova-region-len = <0xd8c00000>;
-					iova-region-id = <0x3>;
-					status = "ok";
-				};
-			};
+			qcom,secure-cb;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 4dd09a7..944ab24 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -1394,7 +1394,7 @@
 &msm_gpu {
 	/* Updated chip ID */
 	qcom,chipid = <0x06030001>;
-	qcom,initial-pwrlevel = <5>;
+	qcom,initial-pwrlevel = <6>;
 
 	qcom,gpu-pwrlevels {
 		#address-cells = <1>;
@@ -1404,54 +1404,62 @@
 
 		qcom,gpu-pwrlevel@0 {
 			reg = <0>;
+			qcom,gpu-freq = <710000000>;
+			qcom,bus-freq = <12>;
+			qcom,bus-min = <12>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@1 {
+			reg = <1>;
 			qcom,gpu-freq = <675000000>;
 			qcom,bus-freq = <12>;
 			qcom,bus-min = <10>;
 			qcom,bus-max = <12>;
 		};
 
-		qcom,gpu-pwrlevel@1 {
-			reg = <1>;
+		qcom,gpu-pwrlevel@2 {
+			reg = <2>;
 			qcom,gpu-freq = <596000000>;
 			qcom,bus-freq = <10>;
 			qcom,bus-min = <9>;
-			qcom,bus-max = <11>;
+			qcom,bus-max = <12>;
 		};
 
-		qcom,gpu-pwrlevel@2 {
-			reg = <2>;
+		qcom,gpu-pwrlevel@3 {
+			reg = <3>;
 			qcom,gpu-freq = <520000000>;
 			qcom,bus-freq = <9>;
 			qcom,bus-min = <8>;
 			qcom,bus-max = <10>;
 		};
 
-		qcom,gpu-pwrlevel@3 {
-			reg = <3>;
+		qcom,gpu-pwrlevel@4 {
+			reg = <4>;
 			qcom,gpu-freq = <414000000>;
 			qcom,bus-freq = <8>;
 			qcom,bus-min = <7>;
 			qcom,bus-max = <9>;
 		};
 
-		qcom,gpu-pwrlevel@4 {
-			reg = <4>;
+		qcom,gpu-pwrlevel@5 {
+			reg = <5>;
 			qcom,gpu-freq = <342000000>;
 			qcom,bus-freq = <6>;
 			qcom,bus-min = <5>;
 			qcom,bus-max = <7>;
 		};
 
-		qcom,gpu-pwrlevel@5 {
-			reg = <5>;
+		qcom,gpu-pwrlevel@6 {
+			reg = <6>;
 			qcom,gpu-freq = <257000000>;
 			qcom,bus-freq = <4>;
 			qcom,bus-min = <3>;
 			qcom,bus-max = <5>;
 		};
 
-		qcom,gpu-pwrlevel@6 {
-			reg = <6>;
+		qcom,gpu-pwrlevel@7 {
+			reg = <7>;
 			qcom,gpu-freq = <0>;
 			qcom,bus-freq = <0>;
 			qcom,bus-min = <0>;
@@ -1467,9 +1475,10 @@
 
 		compatible = "qcom,gmu-pwrlevels";
 
+		/* GMU power levels must go from lowest to highest */
 		qcom,gmu-pwrlevel@0 {
 			reg = <0>;
-			qcom,gmu-freq = <500000000>;
+			qcom,gmu-freq = <0>;
 		};
 
 		qcom,gmu-pwrlevel@1 {
@@ -1479,7 +1488,30 @@
 
 		qcom,gmu-pwrlevel@2 {
 			reg = <2>;
-			qcom,gmu-freq = <0>;
+			qcom,gmu-freq = <500000000>;
 		};
 	};
 };
+
+&qusb_phy0 {
+		qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x07 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x45 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x03 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index d85d178..ac2a550 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -36,7 +36,6 @@
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
-		ufshc2 = &ufshc_card; /* Removable UFS slot */
 		pci-domain0 = &pcie0;
 		pci-domain1 = &pcie1;
 		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
@@ -649,6 +648,94 @@
 	ranges = <0 0 0 0xffffffff>;
 	compatible = "simple-bus";
 
+	jtag_mm0: jtagmm@7040000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7040000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU0>;
+	};
+
+	jtag_mm1: jtagmm@7140000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7140000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU1>;
+	};
+
+	jtag_mm2: jtagmm@7240000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7240000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU2>;
+	};
+
+	jtag_mm3: jtagmm@7340000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7340000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU3>;
+	};
+
+	jtag_mm4: jtagmm@7440000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7440000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU4>;
+	};
+
+	jtag_mm5: jtagmm@7540000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7540000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU5>;
+	};
+
+	jtag_mm6: jtagmm@7640000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7640000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU6>;
+	};
+
+	jtag_mm7: jtagmm@7740000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7740000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU7>;
+	};
+
 	intc: interrupt-controller@17a00000 {
 		compatible = "arm,gic-v3";
 		#interrupt-cells = <3>;
@@ -847,7 +934,7 @@
 
 	llccbw: qcom,llccbw {
 		compatible = "qcom,devbw";
-		governor = "powersave";
+		governor = "performance";
 		qcom,src-dst-ports =
 			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
 		qcom,active-only;
@@ -1546,102 +1633,6 @@
 		status = "disabled";
 	};
 
-	extcon_storage_cd: extcon_storage_cd {
-		compatible = "extcon-gpio";
-		extcon-id = <62>; /* EXTCON_MECHANICAL */
-		status = "disabled";
-	};
-
-	ufsphy_card: ufsphy_card@1da7000 {
-		reg = <0x1da7000 0xda8>; /* PHY regs */
-		reg-names = "phy_mem";
-		#phy-cells = <0>;
-
-		lanes-per-direction = <1>;
-
-		clock-names = "ref_clk_src",
-			"ref_clk",
-			"ref_aux_clk";
-		clocks = <&clock_rpmh RPMH_CXO_CLK>,
-			<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK>;
-
-		status = "disabled";
-	};
-
-	ufshc_card: ufshc_card@1da4000 {
-		compatible = "qcom,ufshc";
-		reg = <0x1da4000 0x2500>;
-		interrupts = <0 125 0>;
-		phys = <&ufsphy_card>;
-		phy-names = "ufsphy";
-
-		lanes-per-direction = <1>;
-		dev-ref-clk-freq = <0>; /* 19.2 MHz */
-
-		clock-names =
-			"core_clk",
-			"bus_aggr_clk",
-			"iface_clk",
-			"core_clk_unipro",
-			"core_clk_ice",
-			"ref_clk",
-			"tx_lane0_sync_clk",
-			"rx_lane0_sync_clk";
-		clocks =
-			<&clock_gcc GCC_UFS_CARD_AXI_HW_CTL_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK>,
-			<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
-			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK>,
-			<&clock_gcc GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK>,
-			<&clock_rpmh RPMH_CXO_CLK>,
-			<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
-			<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
-		freq-table-hz =
-			<50000000 200000000>,
-			<0 0>,
-			<0 0>,
-			<37500000 150000000>,
-			<75000000 300000000>,
-			<0 0>,
-			<0 0>,
-			<0 0>;
-
-		qcom,msm-bus,name = "ufshc_card";
-		qcom,msm-bus,num-cases = <9>;
-		qcom,msm-bus,num-paths = <2>;
-		qcom,msm-bus,vectors-KBps =
-		<122 512 0 0>, <1 756 0 0>,          /* No vote */
-		<122 512 922 0>, <1 756 1000 0>,     /* PWM G1 */
-		<122 512 127796 0>, <1 756 1000 0>,  /* HS G1 RA */
-		<122 512 255591 0>, <1 756 1000 0>,  /* HS G2 RA */
-		<122 512 2097152 0>, <1 756 102400 0>,  /* HS G3 RA */
-		<122 512 149422 0>, <1 756 1000 0>,  /* HS G1 RB */
-		<122 512 298189 0>, <1 756 1000 0>,  /* HS G2 RB */
-		<122 512 2097152 0>, <1 756 102400 0>,  /* HS G3 RB */
-		<122 512 7643136 0>, <1 756 307200 0>; /* Max. bandwidth */
-		qcom,bus-vector-names = "MIN",
-		"PWM_G1_L1",
-		"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
-		"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
-		"MAX";
-
-		/* PM QoS */
-		qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
-		qcom,pm-qos-cpu-group-latency-us = <70 70>;
-		qcom,pm-qos-default-cpu = <0>;
-
-		/*
-		 * Note: this instance doesn't have control over UFS device
-		 * reset
-		 */
-
-		resets = <&clock_gcc GCC_UFS_CARD_BCR>;
-		reset-names = "core_reset";
-
-		status = "disabled";
-	};
-
 	sdhc_2: sdhci@8804000 {
 		compatible = "qcom,sdhci-msm-v5";
 		reg = <0x8804000 0x1000>;
@@ -1890,6 +1881,8 @@
 		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x88e0000 0x2000>;
 		reg-names = "eud_base";
+		clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "cfg_ahb_clk";
 		status = "ok";
 	};
 
@@ -2298,19 +2291,19 @@
 		};
 
 		LLCC_1: llcc_1_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 
 		LLCC_2: llcc_2_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 
 		LLCC_3: llcc_3_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 
 		LLCC_4: llcc_4_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 	};
 
@@ -2563,7 +2556,7 @@
 	qcom,qsee_ipc_irq_bridge {
 		compatible = "qcom,qsee-ipc-irq-bridge";
 
-		qcom,qsee-ipq-irq-spss {
+		qcom,qsee-ipc-irq-spss {
 			qcom,rx-irq-clr = <0x1888008 0x4>;
 			qcom,rx-irq-clr-mask = <0x1>;
 			qcom,dev-name = "qsee_ipc_irq_spss";
@@ -2969,6 +2962,7 @@
 		compatible = "qcom,pil-tz-generic";
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
+		qcom,pil-force-shutdown;
 	};
 
 	qcom,chd_sliver {
@@ -3724,6 +3718,10 @@
 			 <&clock_rpmh RPMH_RF_CLK3_A>;
 		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
 		qcom,smmu-support;
+		qcom,smmu-mapping = <0x20000000 0xe0000000>;
+		qcom,smmu-s1-en;
+		qcom,smmu-fast-map;
+		qcom,smmu-coherent;
 		qcom,keep-radio-on-during-sleep;
 		status = "disabled";
 	};
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index cc7f169..e05f395 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -7,6 +7,9 @@
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -127,6 +130,7 @@
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -202,6 +206,8 @@
 CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -230,6 +236,7 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_MEMORY_STATE_TIME=y
 CONFIG_QPNP_MISC=y
 CONFIG_SCSI=y
@@ -261,9 +268,15 @@
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
 CONFIG_PPPOLAC=y
 CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
 # CONFIG_WIL6210_TRACING is not set
@@ -506,18 +519,22 @@
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_MSM_QBT1000=y
 CONFIG_APSS_CORE_EA=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
@@ -568,13 +585,16 @@
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 6ab5d54..cae87d3 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -7,9 +7,6 @@
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -237,7 +234,6 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_QSEECOM=y
-CONFIG_UID_SYS_STATS=y
 CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -300,6 +296,7 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
 CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index edf67c7..0d89dc7 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -304,6 +304,7 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
 CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 6ebd2c3..f1ace59 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -21,6 +21,7 @@
 #include <asm/sysreg.h>
 
 #define ICC_EOIR1_EL1			sys_reg(3, 0, 12, 12, 1)
+#define ICC_HPPIR1_EL1			sys_reg(3, 0, 12, 12, 2)
 #define ICC_DIR_EL1			sys_reg(3, 0, 12, 11, 1)
 #define ICC_IAR1_EL1			sys_reg(3, 0, 12, 12, 0)
 #define ICC_SGI1R_EL1			sys_reg(3, 0, 12, 11, 5)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 1ac8008..fcf85be 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2743,6 +2743,48 @@
 	return true;
 }
 
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node:         struct binder_node for which to get refs
+ * @proc:         returns @node->proc if valid
+ * @error:        if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+		struct binder_node *node,
+		struct binder_proc **procp,
+		uint32_t *error)
+{
+	struct binder_node *target_node = NULL;
+
+	binder_node_inner_lock(node);
+	if (node->proc) {
+		target_node = node;
+		binder_inc_node_nilocked(node, 1, 0, NULL);
+		binder_inc_node_tmpref_ilocked(node);
+		node->proc->tmp_ref++;
+		*procp = node->proc;
+	} else
+		*error = BR_DEAD_REPLY;
+	binder_node_inner_unlock(node);
+
+	return target_node;
+}
+
 static void binder_transaction(struct binder_proc *proc,
 			       struct binder_thread *thread,
 			       struct binder_transaction_data *tr, int reply,
@@ -2845,43 +2887,35 @@
 			ref = binder_get_ref_olocked(proc, tr->target.handle,
 						     true);
 			if (ref) {
-				binder_inc_node(ref->node, 1, 0, NULL);
-				target_node = ref->node;
+				target_node = binder_get_node_refs_for_txn(
+						ref->node, &target_proc,
+						&return_error);
+			} else {
+				binder_user_error("%d:%d got transaction to invalid handle\n",
+						  proc->pid, thread->pid);
+				return_error = BR_FAILED_REPLY;
 			}
 			binder_proc_unlock(proc);
-			if (target_node == NULL) {
-				binder_user_error("%d:%d got transaction to invalid handle\n",
-					proc->pid, thread->pid);
-				return_error = BR_FAILED_REPLY;
-				return_error_param = -EINVAL;
-				return_error_line = __LINE__;
-				goto err_invalid_target_handle;
-			}
 		} else {
 			mutex_lock(&context->context_mgr_node_lock);
 			target_node = context->binder_context_mgr_node;
-			if (target_node == NULL) {
+			if (target_node)
+				target_node = binder_get_node_refs_for_txn(
+						target_node, &target_proc,
+						&return_error);
+			else
 				return_error = BR_DEAD_REPLY;
-				mutex_unlock(&context->context_mgr_node_lock);
-				return_error_line = __LINE__;
-				goto err_no_context_mgr_node;
-			}
-			binder_inc_node(target_node, 1, 0, NULL);
 			mutex_unlock(&context->context_mgr_node_lock);
 		}
-		e->to_node = target_node->debug_id;
-		binder_node_lock(target_node);
-		target_proc = target_node->proc;
-		if (target_proc == NULL) {
-			binder_node_unlock(target_node);
-			return_error = BR_DEAD_REPLY;
+		if (!target_node) {
+			/*
+			 * return_error is set above
+			 */
+			return_error_param = -EINVAL;
 			return_error_line = __LINE__;
 			goto err_dead_binder;
 		}
-		binder_inner_proc_lock(target_proc);
-		target_proc->tmp_ref++;
-		binder_inner_proc_unlock(target_proc);
-		binder_node_unlock(target_node);
+		e->to_node = target_node->debug_id;
 		if (security_binder_transaction(proc->tsk,
 						target_proc->tsk) < 0) {
 			return_error = BR_FAILED_REPLY;
@@ -3240,6 +3274,8 @@
 	if (target_thread)
 		binder_thread_dec_tmpref(target_thread);
 	binder_proc_dec_tmpref(target_proc);
+	if (target_node)
+		binder_dec_node_tmpref(target_node);
 	/*
 	 * write barrier to synchronize with initialization
 	 * of log entry
@@ -3259,6 +3295,8 @@
 err_copy_data_failed:
 	trace_binder_transaction_failed_buffer_release(t->buffer);
 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
+	if (target_node)
+		binder_dec_node_tmpref(target_node);
 	target_node = NULL;
 	t->buffer->transaction = NULL;
 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3273,13 +3311,14 @@
 err_empty_call_stack:
 err_dead_binder:
 err_invalid_target_handle:
-err_no_context_mgr_node:
 	if (target_thread)
 		binder_thread_dec_tmpref(target_thread);
 	if (target_proc)
 		binder_proc_dec_tmpref(target_proc);
-	if (target_node)
+	if (target_node) {
 		binder_dec_node(target_node, 1, 0);
+		binder_dec_node_tmpref(target_node);
+	}
 
 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index d734e29..ee76d39 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1982,7 +1982,8 @@
 
 void diag_send_updates_peripheral(uint8_t peripheral)
 {
-	diag_send_feature_mask_update(peripheral);
+	if (!driver->feature[peripheral].sent_feature_mask)
+		diag_send_feature_mask_update(peripheral);
 	/*
 	 * Masks (F3, logs and events) will be sent to
 	 * peripheral immediately following feature mask update only
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 18f941d..543f0a2 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -2368,7 +2368,9 @@
 		mutex_unlock(&driver->dci_mutex);
 		break;
 	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		mutex_lock(&driver->dci_mutex);
 		result = diag_ioctl_dci_event_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
 		break;
 	case DIAG_IOCTL_DCI_CLEAR_LOGS:
 		mutex_lock(&driver->dci_mutex);
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 354c6a0..710271e 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -765,21 +765,17 @@
 			fwd_info_data->diagid_root = ctrl_pkt.diag_id;
 		} else {
 			i = fwd_info_cmd->num_pd - 2;
-			if (i >= 0)
+			if (i >= 0 && i < MAX_PERIPHERAL_UPD)
 				fwd_info_cmd->diagid_user[i] =
 				ctrl_pkt.diag_id;
 
 			i = fwd_info_data->num_pd - 2;
-			if (i >= 0)
+			if (i >= 0 && i < MAX_PERIPHERAL_UPD)
 				fwd_info_data->diagid_user[i] =
 				ctrl_pkt.diag_id;
 		}
 	}
 
-	if (root_str)
-		driver->diag_id_sent[peripheral] = 0;
-
-
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 		"diag: peripheral = %d: diag_id string = %s,diag_id = %d\n",
 		peripheral, process_name, ctrl_pkt.diag_id);
@@ -796,22 +792,24 @@
 		pr_err("diag: Unable to send diag id ctrl packet to peripheral %d, err: %d\n",
 		       peripheral, err);
 	} else {
-	/*
-	 * Masks (F3, logs and events) will be sent to
-	 * peripheral immediately following feature mask update only
-	 * if diag_id support is not present or
-	 * diag_id support is present and diag_id has been sent to
-	 * peripheral.
-	 * With diag_id being sent now, mask will be updated
-	 * to peripherals.
-	 */
-		driver->diag_id_sent[peripheral] = 1;
+		/*
+		 * Masks (F3, logs and events) will be sent to
+		 * peripheral immediately following feature mask update only
+		 * if diag_id support is not present or
+		 * diag_id support is present and diag_id has been sent to
+		 * peripheral.
+		 * With diag_id being sent now, mask will be updated
+		 * to peripherals.
+		 */
+		if (root_str) {
+			driver->diag_id_sent[peripheral] = 1;
+			diag_send_updates_peripheral(peripheral);
+		}
+		diagfwd_buffers_init(fwd_info_data);
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 		"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
 			driver->diag_id_sent[peripheral], peripheral,
 			ctrl_pkt.diag_id, process_name);
-		diag_send_updates_peripheral(peripheral);
-		diagfwd_buffers_init(fwd_info_data);
 	}
 }
 
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 62ae8c5..5caa975 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1973,6 +1973,7 @@
 		&cam_cc_csi3phytimer_clk_src.clkr;
 	cam_cc_cphy_rx_clk_src.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2;
 	cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
+	cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 384000000;
 	cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_sdm845_v2;
 	cam_cc_fd_core_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
 	cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_icp_clk_src_sdm845_v2;
@@ -1985,6 +1986,9 @@
 	cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 320000000;
 	cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 400000000;
 	cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 80000000;
+	cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 80000000;
+	cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		80000000;
 }
 
 static void cam_cc_sdm845_fixup_sdm670(void)
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 258a6f2..93a08db 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -2060,7 +2060,7 @@
 			last_entry = true;
 		}
 	}
-	fmax_temp[k] = abs_fmax;
+	fmax_temp[k++] = abs_fmax;
 
 	osm_clks_init[c->cluster_num].rate_max = devm_kzalloc(&pdev->dev,
 						 k * sizeof(unsigned long),
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 7f56fb6..60758b4 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -23,8 +23,6 @@
 	u8 pre_div;
 	u16 m;
 	u16 n;
-	unsigned long src_freq;
-#define FIXED_FREQ_SRC   0
 };
 
 /**
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 7382cfa..8d5e527 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -281,10 +281,9 @@
 		const struct freq_tbl *f, struct clk_rate_request *req)
 {
 	unsigned long clk_flags, rate = req->rate;
-	struct clk_rate_request parent_req = { };
 	struct clk_hw *p;
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-	int index, ret = 0;
+	int index;
 
 	f = qcom_find_freq(f, rate);
 	if (!f)
@@ -315,21 +314,6 @@
 	req->best_parent_rate = rate;
 	req->rate = f->freq;
 
-	if (f->src_freq != FIXED_FREQ_SRC) {
-		rate = parent_req.rate = f->src_freq;
-		parent_req.best_parent_hw = p;
-		ret = __clk_determine_rate(p, &parent_req);
-		if (ret)
-			return ret;
-
-		ret = clk_set_rate(p->clk, parent_req.rate);
-		if (ret) {
-			pr_err("Failed set rate(%lu) on parent for non-fixed source\n",
-							parent_req.rate);
-			return ret;
-		}
-	}
-
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index ef1da5c..be84b46 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -117,6 +117,7 @@
 	"gcc_aggre_ufs_phy_axi_clk",
 	"gcc_aggre_usb3_prim_axi_clk",
 	"gcc_aggre_usb3_sec_axi_clk",
+	"gcc_apc_vs_clk",
 	"gcc_boot_rom_ahb_clk",
 	"gcc_camera_ahb_clk",
 	"gcc_camera_axi_clk",
@@ -144,12 +145,14 @@
 	"gcc_gpu_gpll0_div_clk_src",
 	"gcc_gpu_memnoc_gfx_clk",
 	"gcc_gpu_snoc_dvm_gfx_clk",
+	"gcc_gpu_vs_clk",
 	"gcc_mss_axis2_clk",
 	"gcc_mss_cfg_ahb_clk",
 	"gcc_mss_gpll0_div_clk_src",
 	"gcc_mss_mfab_axis_clk",
 	"gcc_mss_q6_memnoc_axi_clk",
 	"gcc_mss_snoc_axi_clk",
+	"gcc_mss_vs_clk",
 	"gcc_pcie_0_aux_clk",
 	"gcc_pcie_0_cfg_ahb_clk",
 	"gcc_pcie_0_mstr_axi_clk",
@@ -232,9 +235,14 @@
 	"gcc_usb3_sec_phy_com_aux_clk",
 	"gcc_usb3_sec_phy_pipe_clk",
 	"gcc_usb_phy_cfg_ahb2phy_clk",
+	"gcc_vdda_vs_clk",
+	"gcc_vddcx_vs_clk",
+	"gcc_vddmx_vs_clk",
 	"gcc_video_ahb_clk",
 	"gcc_video_axi_clk",
 	"gcc_video_xo_clk",
+	"gcc_vs_ctrl_ahb_clk",
+	"gcc_vs_ctrl_clk",
 	"gcc_sdcc1_ahb_clk",
 	"gcc_sdcc1_apps_clk",
 	"gcc_sdcc1_ice_core_clk",
@@ -452,6 +460,8 @@
 			0x11B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_aggre_usb3_sec_axi_clk", 0x11C, 4, GCC,
 			0x11C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_apc_vs_clk", 0x113, 4, GCC,
+			0x113, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_boot_rom_ahb_clk", 0x94, 4, GCC,
 			0x94, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_camera_ahb_clk", 0x3A, 4, GCC,
@@ -506,6 +516,8 @@
 			0x145, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_gpu_snoc_dvm_gfx_clk", 0x147, 4, GCC,
 			0x147, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gpu_vs_clk", 0x112, 4, GCC,
+			0x112, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_mss_axis2_clk", 0x12F, 4, GCC,
 			0x12F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_mss_cfg_ahb_clk", 0x12D, 4, GCC,
@@ -518,6 +530,8 @@
 			0x135, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_mss_snoc_axi_clk", 0x134, 4, GCC,
 			0x134, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_vs_clk", 0x111, 4, GCC,
+			0x111, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_pcie_0_aux_clk", 0xE5, 4, GCC,
 			0xE5, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_pcie_0_cfg_ahb_clk", 0xE4, 4, GCC,
@@ -682,12 +696,22 @@
 			0x6A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_usb_phy_cfg_ahb2phy_clk", 0x6F, 4, GCC,
 			0x6F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vdda_vs_clk", 0x10E, 4, GCC,
+			0x10E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vddcx_vs_clk", 0x10C, 4, GCC,
+			0x10C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vddmx_vs_clk", 0x10D, 4, GCC,
+			0x10D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_video_ahb_clk", 0x39, 4, GCC,
 			0x39, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_video_axi_clk", 0x3F, 4, GCC,
 			0x3F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_video_xo_clk", 0x42, 4, GCC,
 			0x42, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vs_ctrl_ahb_clk", 0x110, 4, GCC,
+			0x110, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vs_ctrl_clk", 0x10F, 4, GCC,
+			0x10F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_sdcc1_ahb_clk", 0x15C, 4, GCC,
 			0x42, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_sdcc1_apps_clk", 0x15B, 4, GCC,
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 7a2969a..3b13c9b 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -40,8 +40,6 @@
 #define DISP_CC_MISC_CMD	0x8000
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-#define F_SLEW(f, s, h, m, n, src_freq) { (f), (s), (2 * (h) - 1), (m), (n), \
-					(src_freq) }
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
 
@@ -1036,19 +1034,19 @@
 static void disp_cc_sdm845_fixup_sdm845v2(struct regmap *regmap)
 {
 	clk_fabia_pll_configure(&disp_cc_pll0, regmap,
-					&disp_cc_pll0_config_v2);
+		&disp_cc_pll0_config_v2);
 	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
 		180000000;
 	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		275000000;
 	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
-		358000000;
+		328580000;
 	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
 		180000000;
 	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		275000000;
 	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
-		358000000;
+		328580000;
 	disp_cc_mdss_dp_pixel1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		337500;
 	disp_cc_mdss_dp_pixel_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
@@ -1065,10 +1063,14 @@
 		280000000;
 	disp_cc_mdss_pclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		430000000;
+	disp_cc_mdss_pclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		430000000;
 	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
 		280000000;
 	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		430000000;
+	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		430000000;
 	disp_cc_mdss_rot_clk_src.freq_tbl =
 		ftbl_disp_cc_mdss_rot_clk_src_sdm845_v2;
 	disp_cc_mdss_rot_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
@@ -1085,6 +1087,10 @@
 
 	disp_cc_mdss_mdp_clk_src.freq_tbl =
 		ftbl_disp_cc_mdss_mdp_clk_src_sdm670;
+	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		358000000;
+	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		358000000;
 }
 
 static int disp_cc_sdm845_fixup(struct platform_device *pdev,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index a363235..4142dd5 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -4180,6 +4180,7 @@
 		240000000;
 	gcc_ufs_phy_axi_clk_src.freq_tbl =
 		ftbl_gcc_ufs_card_axi_clk_src_sdm845_v2;
+	gcc_vsensor_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 600000000;
 }
 
 static void gcc_sdm845_fixup_sdm670(void)
@@ -4250,16 +4251,14 @@
 
 	gcc_cpuss_rbcpr_clk_src.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src_sdm670;
 	gcc_cpuss_rbcpr_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
-					50000000;
-	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
-					50000000;
+		50000000;
+	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 50000000;
 	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
-					100000000;
+		100000000;
 	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
-					201500000;
+		201500000;
 	gcc_sdcc4_apps_clk_src.freq_tbl = ftbl_gcc_sdcc4_apps_clk_src_sdm670;
-	gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
-					33333333;
+	gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 33333333;
 }
 
 static int gcc_sdm845_fixup(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index db0dad1..35a23f7 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -631,7 +631,7 @@
 	clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
 
 	gpu_cc_gmu_clk_src.freq_tbl = ftbl_gpu_cc_gmu_clk_src_sdm670;
-	gpu_cc_gmu_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 0;
+	gpu_cc_gmu_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 200000000;
 }
 
 static void gpu_cc_gfx_sdm845_fixup_sdm845v2(void)
@@ -640,39 +640,36 @@
 				ftbl_gpu_cc_gx_gfx3d_clk_src_sdm845_v2;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_MIN] = 180000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOWER] =
-				257000000;
+		257000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW] = 342000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW_L1] =
-				414000000;
+		414000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL] =
-				520000000;
+		520000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL_L1] =
-				596000000;
+		596000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH] = 675000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH_L1] =
-				710000000;
+		710000000;
 }
 
 static void gpu_cc_gfx_sdm845_fixup_sdm670(void)
 {
 	gpu_cc_gx_gfx3d_clk_src.freq_tbl =
 				ftbl_gpu_cc_gx_gfx3d_clk_src_sdm670;
-	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_MIN] =
-				180000000;
+	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_MIN] = 180000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOWER] =
-				267000000;
-	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW] =
-				355000000;
+		267000000;
+	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW] = 355000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW_L1] =
-				430000000;
+		430000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL] =
-				565000000;
+		565000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL_L1] =
-				650000000;
-	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH] =
-				750000000;
+		650000000;
+	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH] = 750000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH_L1] =
-				780000000;
+		780000000;
 }
 
 static int gpu_cc_gfx_sdm845_fixup(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 1cd6c9c..89ed5cd 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -289,6 +289,14 @@
 
 	(void)mdss_pll_resource_enable(rsc, false);
 
+	/*
+	 * cache the current parent index for cases where parent
+	 * is not changing but rate is changing. In that case
+	 * clock framework won't call parent_set and hence dsiclk_sel
+	 * bit won't be programmed. e.g. dfps update use case.
+	 */
+	rsc->cached_cfg1 = val;
+
 	return rc;
 }
 
@@ -633,6 +641,12 @@
 	if (rsc->slave)
 		dsi_pll_enable_pll_bias(rsc->slave);
 
+	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
+	if (rsc->slave)
+		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
+				0x03, rsc->cached_cfg1);
+	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
+
 	/* Start PLL */
 	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
 
@@ -728,7 +742,6 @@
 		return;
 	}
 	pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
 	pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
 	pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
 			pll->cached_cfg1, pll->cached_outdiv);
@@ -770,8 +783,6 @@
 			pll->cached_cfg1);
 		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
 					pll->cached_cfg0);
-		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1,
-					pll->cached_cfg1);
 		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
 					pll->cached_outdiv);
 	}
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 3311e9f..f8fdf3f 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -114,6 +114,17 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_video_cc_venus_clk_src_sdm670[] = {
+	F(100000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+	F(200000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(330000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(364800000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(404000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(444000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(533000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 video_cc_venus_clk_src = {
 	.cmd_rcgr = 0x7f0,
 	.mnd_width = 0,
@@ -343,8 +354,10 @@
 
 static void video_cc_sdm845_fixup_sdm670(void)
 {
-	video_cc_sdm845_fixup_sdm845v2();
-
+	video_cc_venus_clk_src.freq_tbl = ftbl_video_cc_venus_clk_src_sdm670;
+	video_cc_venus_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 330000000;
+	video_cc_venus_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		404000000;
 }
 
 static int video_cc_sdm845_fixup(struct platform_device *pdev)
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 5802c21..f4f503e 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -55,8 +55,7 @@
 struct cpu_grp_info {
 	cpumask_t cpus;
 	cpumask_t inited_cpus;
-	unsigned long cache_miss_event;
-	unsigned long inst_event;
+	unsigned int event_ids[NUM_EVENTS];
 	struct cpu_pmu_stats *cpustats;
 	struct memlat_hwmon hw;
 	struct notifier_block arm_memlat_cpu_notif;
@@ -97,13 +96,8 @@
 	u64 total, enabled, running;
 
 	total = perf_event_read_value(event->pevent, &enabled, &running);
-	if (total >= event->prev_count)
-		ev_count = total - event->prev_count;
-	else
-		ev_count = (MAX_COUNT_LIM - event->prev_count) + total;
-
+	ev_count = total - event->prev_count;
 	event->prev_count = total;
-
 	return ev_count;
 }
 
@@ -134,7 +128,7 @@
 {
 	int i;
 
-	for (i = 0; i < NUM_EVENTS; i++) {
+	for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
 		cpustats->events[i].prev_count = 0;
 		perf_event_release_kernel(cpustats->events[i].pevent);
 	}
@@ -187,7 +181,7 @@
 {
 	struct perf_event *pevent;
 	struct perf_event_attr *attr;
-	int err;
+	int err, i;
 	struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
 
 	/* Allocate an attribute for event initialization */
@@ -195,26 +189,15 @@
 	if (!attr)
 		return -ENOMEM;
 
-	attr->config = cpu_grp->inst_event;
-	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
-	if (IS_ERR(pevent))
-		goto err_out;
-	cpustats->events[INST_IDX].pevent = pevent;
-	perf_event_enable(cpustats->events[INST_IDX].pevent);
-
-	attr->config = cpu_grp->cache_miss_event;
-	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
-	if (IS_ERR(pevent))
-		goto err_out;
-	cpustats->events[CM_IDX].pevent = pevent;
-	perf_event_enable(cpustats->events[CM_IDX].pevent);
-
-	attr->config = CYC_EV;
-	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
-	if (IS_ERR(pevent))
-		goto err_out;
-	cpustats->events[CYC_IDX].pevent = pevent;
-	perf_event_enable(cpustats->events[CYC_IDX].pevent);
+	for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
+		attr->config = cpu_grp->event_ids[i];
+		pevent = perf_event_create_kernel_counter(attr, cpu, NULL,
+							  NULL, NULL);
+		if (IS_ERR(pevent))
+			goto err_out;
+		cpustats->events[i].pevent = pevent;
+		perf_event_enable(pevent);
+	}
 
 	kfree(attr);
 	return 0;
@@ -315,7 +298,7 @@
 	struct memlat_hwmon *hw;
 	struct cpu_grp_info *cpu_grp;
 	int cpu, ret;
-	u32 cachemiss_ev, inst_ev;
+	u32 event_id;
 
 	cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
 	if (!cpu_grp)
@@ -346,22 +329,24 @@
 	if (!cpu_grp->cpustats)
 		return -ENOMEM;
 
+	cpu_grp->event_ids[CYC_IDX] = CYC_EV;
+
 	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
-			&cachemiss_ev);
+				   &event_id);
 	if (ret) {
 		dev_dbg(dev, "Cache Miss event not specified. Using def:0x%x\n",
-				L2DM_EV);
-		cachemiss_ev = L2DM_EV;
+			L2DM_EV);
+		event_id = L2DM_EV;
 	}
-	cpu_grp->cache_miss_event = cachemiss_ev;
+	cpu_grp->event_ids[CM_IDX] = event_id;
 
-	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &inst_ev);
+	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &event_id);
 	if (ret) {
 		dev_dbg(dev, "Inst event not specified. Using def:0x%x\n",
-				INST_EV);
-		inst_ev = INST_EV;
+			INST_EV);
+		event_id = INST_EV;
 	}
-	cpu_grp->inst_event = inst_ev;
+	cpu_grp->event_ids[INST_IDX] = event_id;
 
 	for_each_cpu(cpu, &cpu_grp->cpus)
 		to_devstats(cpu_grp, cpu)->id = cpu;
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index ffe60de..f9b758f 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -575,7 +575,9 @@
 		count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
 		break;
 	case MON3:
-		count = readl_relaxed(MON3_ZONE_MAX(m, zone)) + 1;
+		count = readl_relaxed(MON3_ZONE_MAX(m, zone));
+		if (count)
+			count++;
 		break;
 	}
 
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
index 81d98d1..1a8ef1f 100644
--- a/drivers/devfreq/governor_memlat.c
+++ b/drivers/devfreq/governor_memlat.c
@@ -36,6 +36,7 @@
 struct memlat_node {
 	unsigned int ratio_ceil;
 	bool mon_started;
+	bool already_zero;
 	struct list_head list;
 	void *orig_data;
 	struct memlat_hwmon *hw;
@@ -224,7 +225,7 @@
 static int devfreq_memlat_get_freq(struct devfreq *df,
 					unsigned long *freq)
 {
-	int i, lat_dev;
+	int i, lat_dev = 0;
 	struct memlat_node *node = df->data;
 	struct memlat_hwmon *hw = node->hw;
 	unsigned long max_freq = 0;
@@ -238,16 +239,16 @@
 		if (hw->core_stats[i].mem_count)
 			ratio /= hw->core_stats[i].mem_count;
 
+		if (!hw->core_stats[i].inst_count
+		    || !hw->core_stats[i].freq)
+			continue;
+
 		trace_memlat_dev_meas(dev_name(df->dev.parent),
 					hw->core_stats[i].id,
 					hw->core_stats[i].inst_count,
 					hw->core_stats[i].mem_count,
 					hw->core_stats[i].freq, ratio);
 
-		if (!hw->core_stats[i].inst_count
-		    || !hw->core_stats[i].freq)
-			continue;
-
 		if (ratio <= node->ratio_ceil
 		    && hw->core_stats[i].freq > max_freq) {
 			lat_dev = i;
@@ -255,8 +256,10 @@
 		}
 	}
 
-	if (max_freq) {
+	if (max_freq)
 		max_freq = core_to_dev_freq(node, max_freq);
+
+	if (max_freq || !node->already_zero) {
 		trace_memlat_dev_update(dev_name(df->dev.parent),
 					hw->core_stats[lat_dev].id,
 					hw->core_stats[lat_dev].inst_count,
@@ -265,6 +268,8 @@
 					max_freq);
 	}
 
+	node->already_zero = !max_freq;
+
 	*freq = max_freq;
 	return 0;
 }
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 5a9166a..2f34a01 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -285,7 +285,7 @@
 	struct sync_file *sync_file = container_of(kref, struct sync_file,
 						     kref);
 
-	if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+	if (test_bit(POLL_ENABLED, &sync_file->flags))
 		fence_remove_callback(sync_file->fence, &sync_file->cb);
 	fence_put(sync_file->fence);
 	kfree(sync_file);
@@ -305,7 +305,7 @@
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+	if (!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
 					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 7426fc4..acbaec4 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -107,6 +107,7 @@
 	}
 
 	aux->catalog->clear_trans(aux->catalog, false);
+	aux->catalog->clear_hw_interrupts(aux->catalog);
 
 	reg = 0; /* Transaction number == 1 */
 	if (!aux->native) { /* i2c */
@@ -203,6 +204,10 @@
 		aux->aux_error_num = DP_AUX_ERR_TOUT;
 	if (isr & DP_INTR_NACK_DEFER)
 		aux->aux_error_num = DP_AUX_ERR_NACK;
+	if (isr & DP_INTR_AUX_ERROR) {
+		aux->aux_error_num = DP_AUX_ERR_PHY;
+		aux->catalog->clear_hw_interrupts(aux->catalog);
+	}
 
 	complete(&aux->comp);
 }
@@ -227,6 +232,10 @@
 			aux->aux_error_num = DP_AUX_ERR_NACK;
 		if (isr & DP_INTR_I2C_DEFER)
 			aux->aux_error_num = DP_AUX_ERR_DEFER;
+		if (isr & DP_INTR_AUX_ERROR) {
+			aux->aux_error_num = DP_AUX_ERR_PHY;
+			aux->catalog->clear_hw_interrupts(aux->catalog);
+		}
 	}
 
 	complete(&aux->comp);
@@ -454,11 +463,11 @@
 
 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+	dp_aux_reset_phy_config_indices(aux_cfg);
+	aux->catalog->setup(aux->catalog, aux_cfg);
 	aux->catalog->reset(aux->catalog);
 	aux->catalog->enable(aux->catalog, true);
 	aux->retry_cnt = 0;
-	dp_aux_reset_phy_config_indices(aux_cfg);
-	aux->catalog->setup(aux->catalog, aux_cfg);
 }
 
 static void dp_aux_deinit(struct dp_aux *dp_aux)
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 5d96fd9..85761ce 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -25,6 +25,7 @@
 	DP_AUX_ERR_NACK	= -3,
 	DP_AUX_ERR_DEFER	= -4,
 	DP_AUX_ERR_NACK_DEFER	= -5,
+	DP_AUX_ERR_PHY	= -6,
 };
 
 struct dp_aux {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 8b17aed..2894e82 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -158,6 +158,31 @@
 	return rc;
 }
 
+static void dp_catalog_aux_clear_hw_interrupts(struct dp_catalog_aux *aux)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *phy_base;
+	u32 data = 0;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	phy_base = catalog->io->phy_io.base;
+
+	data = dp_read(phy_base + DP_PHY_AUX_INTERRUPT_STATUS);
+	pr_debug("PHY_AUX_INTERRUPT_STATUS=0x%08x\n", data);
+
+	dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+	wmb(); /* make sure 0x1f is written before next write */
+	dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+	wmb(); /* make sure 0x9f is written before next write */
+	dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+	wmb(); /* make sure register is cleared */
+}
+
 static void dp_catalog_aux_reset(struct dp_catalog_aux *aux)
 {
 	u32 aux_ctrl;
@@ -180,6 +205,7 @@
 
 	aux_ctrl &= ~BIT(1);
 	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+	wmb(); /* make sure AUX reset is done here */
 }
 
 static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable)
@@ -199,14 +225,15 @@
 	aux_ctrl = dp_read(base + DP_AUX_CTRL);
 
 	if (enable) {
+		aux_ctrl |= BIT(0);
+		dp_write(base + DP_AUX_CTRL, aux_ctrl);
+		wmb(); /* make sure AUX module is enabled */
 		dp_write(base + DP_TIMEOUT_COUNT, 0xffff);
 		dp_write(base + DP_AUX_LIMITS, 0xffff);
-		aux_ctrl |= BIT(0);
 	} else {
 		aux_ctrl &= ~BIT(0);
+		dp_write(base + DP_AUX_CTRL, aux_ctrl);
 	}
-
-	dp_write(base + DP_AUX_CTRL, aux_ctrl);
 }
 
 static void dp_catalog_aux_update_cfg(struct dp_catalog_aux *aux,
@@ -263,6 +290,7 @@
 	}
 
 	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
+	wmb(); /* make sure AUX configuration is done before enabling it */
 }
 
 static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
@@ -1242,6 +1270,7 @@
 		.enable        = dp_catalog_aux_enable,
 		.setup         = dp_catalog_aux_setup,
 		.get_irq       = dp_catalog_aux_get_irq,
+		.clear_hw_interrupts = dp_catalog_aux_clear_hw_interrupts,
 	};
 	struct dp_catalog_ctrl ctrl = {
 		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index c1b7a7e..aca2f18 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -75,6 +75,7 @@
 	void (*setup)(struct dp_catalog_aux *aux,
 			struct dp_aux_cfg *aux_cfg);
 	void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
+	void (*clear_hw_interrupts)(struct dp_catalog_aux *aux);
 };
 
 struct dp_catalog_ctrl {
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index d0512e6..25407c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -107,6 +107,40 @@
 	return len;
 }
 
+static ssize_t dp_debug_bw_code_write(struct file *file,
+		const char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char buf[SZ_8];
+	size_t len = 0;
+	u32 max_bw_code = 0;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	/* Leave room for termination char */
+	len = min_t(size_t, count, SZ_8 - 1);
+	if (copy_from_user(buf, user_buff, len))
+		return 0;
+
+	buf[len] = '\0';
+
+	if (kstrtoint(buf, 10, &max_bw_code) != 0)
+		return 0;
+
+	if (!is_link_rate_valid(max_bw_code)) {
+		pr_err("Unsupported bw code %d\n", max_bw_code);
+		return len;
+	}
+	debug->panel->max_bw_code = max_bw_code;
+	pr_debug("max_bw_code: %d\n", max_bw_code);
+
+	return len;
+}
+
 static ssize_t dp_debug_read_connected(struct file *file,
 		char __user *user_buff, size_t count, loff_t *ppos)
 {
@@ -349,6 +383,36 @@
 	return -EINVAL;
 }
 
+static ssize_t dp_debug_bw_code_read(struct file *file,
+	char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, (SZ_4K - len),
+			"max_bw_code = %d\n", debug->panel->max_bw_code);
+
+	if (copy_to_user(user_buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+	kfree(buf);
+	return len;
+}
+
 static const struct file_operations dp_debug_fops = {
 	.open = simple_open,
 	.read = dp_debug_read_info,
@@ -370,6 +434,12 @@
 	.read = dp_debug_read_connected,
 };
 
+static const struct file_operations bw_code_fops = {
+	.open = simple_open,
+	.read = dp_debug_bw_code_read,
+	.write = dp_debug_bw_code_write,
+};
+
 static int dp_debug_init(struct dp_debug *dp_debug)
 {
 	int rc = 0;
@@ -377,6 +447,7 @@
 		struct dp_debug_private, dp_debug);
 	struct dentry *dir, *file, *edid_modes;
 	struct dentry *hpd, *connected;
+	struct dentry *max_bw_code;
 	struct dentry *root = debug->root;
 
 	dir = debugfs_create_dir(DEBUG_NAME, NULL);
@@ -423,6 +494,15 @@
 		goto error_remove_dir;
 	}
 
+	max_bw_code = debugfs_create_file("max_bw_code", 0644, dir,
+			debug, &bw_code_fops);
+	if (IS_ERR_OR_NULL(max_bw_code)) {
+		rc = PTR_ERR(max_bw_code);
+		pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
+		       DEBUG_NAME, rc);
+		goto error_remove_dir;
+	}
+
 	root = dir;
 	return rc;
 error_remove_dir:
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a0b6cef..ea07d15 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -498,7 +498,7 @@
 	reinit_completion(&dp->notification_comp);
 	dp_display_send_hpd_event(&dp->dp_display);
 
-	if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 2)) {
+	if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 5)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 2b27b3e..fc3fb56 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -182,7 +182,9 @@
 	rc = dp_panel_read_dpcd(dp_panel);
 	if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code(
 		dp_panel->link_info.rate)) || !is_lane_count_valid(
-		dp_panel->link_info.num_lanes)) {
+		dp_panel->link_info.num_lanes) ||
+		((drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate)) >
+		dp_panel->max_bw_code)) {
 		pr_err("panel dpcd read failed/incorrect, set default params\n");
 		dp_panel_set_default_link_params(dp_panel);
 	}
@@ -501,6 +503,7 @@
 
 	dp_panel = &panel->dp_panel;
 	panel->aux_cfg_update_done = false;
+	dp_panel->max_bw_code = DP_LINK_BW_8_1;
 
 	dp_panel->sde_edid_register = dp_panel_edid_register;
 	dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 4486a84..01a978a 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -71,6 +71,9 @@
 	u32 vic;
 	u32 max_pclk_khz;
 
+	/* debug */
+	u32 max_bw_code;
+
 	int (*sde_edid_register)(struct dp_panel *dp_panel);
 	void (*sde_edid_deregister)(struct dp_panel *dp_panel);
 	int (*init_info)(struct dp_panel *dp_panel);
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 5aaad24..25d035d 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -179,6 +179,7 @@
 #define DP_PHY_AUX_CFG9                         (0x00000044)
 #define DP_PHY_AUX_INTERRUPT_MASK               (0x00000048)
 #define DP_PHY_AUX_INTERRUPT_CLEAR              (0x0000004C)
+#define DP_PHY_AUX_INTERRUPT_STATUS             (0x000000BC)
 
 #define DP_PHY_SPARE0				(0x00AC)
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 38a6e47..5d9d21f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -60,6 +60,8 @@
 	ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr;
 	ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr;
 	ctrl->ops.debug_bus = dsi_ctrl_hw_cmn_debug_bus;
+	ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data;
+	ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg;
 
 	switch (version) {
 	case DSI_CTRL_VERSION_1_4:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 84448ec..186a5b5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -170,6 +170,12 @@
 			bool enable);
 void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
 			bool enable);
+u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+				     u8 *rd_buf,
+				     u32 read_offset,
+				     u32 rx_byte,
+				     u32 pkt_size, u32 *hw_read_cnt);
+void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl);
 
 /* Definitions specific to 1.4 DSI controller hardware */
 int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 4be5069..790ee22 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -38,6 +38,8 @@
 #define DSI_CTRL_TX_TO_MS     200
 
 #define TO_ON_OFF(x) ((x) ? "ON" : "OFF")
+
+#define CEIL(x, y)              (((x) + ((y)-1)) / (y))
 /**
  * enum dsi_ctrl_driver_ops - controller driver ops
  */
@@ -884,12 +886,49 @@
 		buf[3] |= BIT(6);
 
 	buf[3] |= BIT(7);
+
+	/* send embedded BTA for read commands */
+	if ((buf[2] & 0x3f) == MIPI_DSI_DCS_READ)
+		buf[3] |= BIT(5);
+
 	*buffer = buf;
 	*size = len;
 
 	return rc;
 }
 
+static void dsi_ctrl_wait_for_video_done(struct dsi_ctrl *dsi_ctrl)
+{
+	u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0, ret;
+	struct dsi_mode_info *timing;
+
+	if (dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE)
+		return;
+
+	dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+				DSI_VIDEO_MODE_FRAME_DONE);
+
+	dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+				DSI_SINT_VIDEO_MODE_FRAME_DONE, NULL);
+	reinit_completion(&dsi_ctrl->irq_info.vid_frame_done);
+	ret = wait_for_completion_timeout(
+			&dsi_ctrl->irq_info.vid_frame_done,
+			msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+	if (ret <= 0)
+		pr_debug("wait for video done failed\n");
+	dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+				DSI_SINT_VIDEO_MODE_FRAME_DONE);
+
+	timing = &(dsi_ctrl->host_config.video_timing);
+	v_total = timing->v_sync_width + timing->v_back_porch +
+			timing->v_front_porch + timing->v_active;
+	v_blank = timing->v_sync_width + timing->v_back_porch;
+	fps = timing->refresh_rate;
+
+	sleep_ms = CEIL((v_blank * 1000), (v_total * fps)) + 1;
+	udelay(sleep_ms * 1000);
+}
+
 static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
 			  const struct mipi_dsi_msg *msg,
 			  u32 flags)
@@ -971,6 +1010,7 @@
 	}
 
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
+		dsi_ctrl_wait_for_video_done(dsi_ctrl);
 		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
 					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
@@ -1026,6 +1066,7 @@
 {
 	int rc = 0;
 	u8 tx[2] = { (u8)(size & 0xFF), (u8)(size >> 8) };
+	u32 flags = DSI_CTRL_CMD_FETCH_MEMORY;
 	struct mipi_dsi_msg msg = {
 		.channel = rx_msg->channel,
 		.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
@@ -1033,24 +1074,77 @@
 		.tx_buf = tx,
 	};
 
-	rc = dsi_message_tx(dsi_ctrl, &msg, 0x0);
+	rc = dsi_message_tx(dsi_ctrl, &msg, flags);
 	if (rc)
 		pr_err("failed to send max return size packet, rc=%d\n", rc);
 
 	return rc;
 }
 
+/* Helper functions to support DCS read operation */
+static int dsi_parse_short_read1_resp(const struct mipi_dsi_msg *msg,
+		unsigned char *buff)
+{
+	u8 *data = msg->rx_buf;
+	int read_len = 1;
+
+	if (!data)
+		return 0;
+
+	/* remove dcs type */
+	if (msg->rx_len >= 1)
+		data[0] = buff[1];
+	else
+		read_len = 0;
+
+	return read_len;
+}
+
+static int dsi_parse_short_read2_resp(const struct mipi_dsi_msg *msg,
+		unsigned char *buff)
+{
+	u8 *data = msg->rx_buf;
+	int read_len = 2;
+
+	if (!data)
+		return 0;
+
+	/* remove dcs type */
+	if (msg->rx_len >= 2) {
+		data[0] = buff[1];
+		data[1] = buff[2];
+	} else {
+		read_len = 0;
+	}
+
+	return read_len;
+}
+
+static int dsi_parse_long_read_resp(const struct mipi_dsi_msg *msg,
+		unsigned char *buff)
+{
+	if (!msg->rx_buf)
+		return 0;
+
+	/* remove dcs type */
+	if (msg->rx_buf && msg->rx_len)
+		memcpy(msg->rx_buf, buff + 4, msg->rx_len);
+
+	return msg->rx_len;
+}
+
 static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl,
 			  const struct mipi_dsi_msg *msg,
 			  u32 flags)
 {
 	int rc = 0;
-	u32 rd_pkt_size;
-	u32 total_read_len;
-	u32 bytes_read = 0, tot_bytes_read = 0;
-	u32 current_read_len;
+	u32 rd_pkt_size, total_read_len, hw_read_cnt;
+	u32 current_read_len = 0, total_bytes_read = 0;
 	bool short_resp = false;
 	bool read_done = false;
+	u32 dlen, diff, rlen = msg->rx_len;
+	unsigned char *buff;
+	char cmd;
 
 	if (msg->rx_len <= 2) {
 		short_resp = true;
@@ -1066,6 +1160,7 @@
 
 		total_read_len = current_read_len + 6;
 	}
+	buff = msg->rx_buf;
 
 	while (!read_done) {
 		rc = dsi_set_max_return_size(dsi_ctrl, msg, rd_pkt_size);
@@ -1075,24 +1170,79 @@
 			goto error;
 		}
 
+		/* clear RDBK_DATA registers before proceeding */
+		dsi_ctrl->hw.ops.clear_rdbk_register(&dsi_ctrl->hw);
+
 		rc = dsi_message_tx(dsi_ctrl, msg, flags);
 		if (rc) {
 			pr_err("Message transmission failed, rc=%d\n", rc);
 			goto error;
 		}
 
+		dlen = dsi_ctrl->hw.ops.get_cmd_read_data(&dsi_ctrl->hw,
+					buff, total_bytes_read,
+					total_read_len, rd_pkt_size,
+					&hw_read_cnt);
+		if (!dlen)
+			goto error;
 
-		tot_bytes_read += bytes_read;
 		if (short_resp)
+			break;
+
+		if (rlen <= current_read_len) {
+			diff = current_read_len - rlen;
 			read_done = true;
-		else if (msg->rx_len <= tot_bytes_read)
-			read_done = true;
+		} else {
+			diff = 0;
+			rlen -= current_read_len;
+		}
+
+		dlen -= 2; /* 2 bytes of CRC */
+		dlen -= diff;
+		buff += dlen;
+		total_bytes_read += dlen;
+		if (!read_done) {
+			current_read_len = 14; /* Not first read */
+			if (rlen < current_read_len)
+				rd_pkt_size += rlen;
+			else
+				rd_pkt_size += current_read_len;
+		}
 	}
+
+	if (hw_read_cnt < 16 && !short_resp)
+		buff = msg->rx_buf + (16 - hw_read_cnt);
+	else
+		buff = msg->rx_buf;
+
+	/* parse the data read from panel */
+	cmd = buff[0];
+	switch (cmd) {
+	case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+		pr_err("Rx ACK_ERROR\n");
+		rc = 0;
+		break;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+		rc = dsi_parse_short_read1_resp(msg, buff);
+		break;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+		rc = dsi_parse_short_read2_resp(msg, buff);
+		break;
+	case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+	case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+		rc = dsi_parse_long_read_resp(msg, buff);
+		break;
+	default:
+		pr_warn("Invalid response\n");
+		rc = 0;
+	}
+
 error:
 	return rc;
 }
 
-
 static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl)
 {
 	int rc = 0;
@@ -2300,8 +2450,8 @@
 
 	if (flags & DSI_CTRL_CMD_READ) {
 		rc = dsi_message_rx(dsi_ctrl, msg, flags);
-		if (rc)
-			pr_err("read message failed, rc=%d\n", rc);
+		if (rc <= 0)
+			pr_err("read message failed read length, rc=%d\n", rc);
 	} else {
 		rc = dsi_message_tx(dsi_ctrl, msg, flags);
 		if (rc)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 57bccfb..714a450 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -520,11 +520,17 @@
 	 * get_cmd_read_data() - get data read from the peripheral
 	 * @ctrl:           Pointer to the controller host hardware.
 	 * @rd_buf:         Buffer where data will be read into.
-	 * @total_read_len: Number of bytes to read.
+	 * @read_offset:    Offset from where to read.
+	 * @rx_byte:        Number of bytes to be read.
+	 * @pkt_size:        Size of response expected.
+	 * @hw_read_cnt:    Actual number of bytes read by HW.
 	 */
 	u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
 				 u8 *rd_buf,
-				 u32 total_read_len);
+				 u32 read_offset,
+				 u32 rx_byte,
+				 u32 pkt_size,
+				 u32 *hw_read_cnt);
 
 	/**
 	 * wait_for_lane_idle() - wait for DSI lanes to go to idle state
@@ -710,6 +716,11 @@
 	 */
 	void (*set_timing_db)(struct dsi_ctrl_hw *ctrl,
 				 bool enable);
+	/**
+	 * clear_rdbk_register() - Clear and reset read back register
+	 * @ctrl:         Pointer to the controller host hardware.
+	 */
+	void (*clear_rdbk_register)(struct dsi_ctrl_hw *ctrl);
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 8278ada..2959e94 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -16,6 +16,7 @@
 #include <linux/delay.h>
 #include <linux/iopoll.h>
 
+#include "dsi_catalog.h"
 #include "dsi_ctrl_hw.h"
 #include "dsi_ctrl_reg.h"
 #include "dsi_hw.h"
@@ -709,6 +710,22 @@
 }
 
 /**
+ * clear_rdbk_reg() - clear previously read panel data.
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * This function is called before sending DSI Rx command to
+ * panel in order to clear if any stale data remaining from
+ * previous read operation.
+ */
+void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_RDBK_DATA_CTRL, 0x1);
+	wmb(); /* ensure read back register is reset */
+	DSI_W32(ctrl, DSI_RDBK_DATA_CTRL, 0x0);
+	wmb(); /* ensure read back register is cleared */
+}
+
+/**
  * get_cmd_read_data() - get data read from the peripheral
  * @ctrl:           Pointer to the controller host hardware.
  * @rd_buf:         Buffer where data will be read into.
@@ -719,16 +736,16 @@
 u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
 				     u8 *rd_buf,
 				     u32 read_offset,
-				     u32 total_read_len)
+				     u32 rx_byte,
+				     u32 pkt_size,
+				     u32 *hw_read_cnt)
 {
 	u32 *lp, *temp, data;
-	int i, j = 0, cnt;
+	int i, j = 0, cnt, off;
 	u32 read_cnt;
-	u32 rx_byte = 0;
 	u32 repeated_bytes = 0;
 	u8 reg[16] = {0};
-	u32 pkt_size = 0;
-	int buf_offset = read_offset;
+	bool ack_err = false;
 
 	lp = (u32 *)rd_buf;
 	temp = (u32 *)reg;
@@ -737,28 +754,49 @@
 	if (cnt > 4)
 		cnt = 4;
 
-	if (rx_byte == 4)
-		read_cnt = 4;
-	else
-		read_cnt = pkt_size + 6;
+	read_cnt = (DSI_R32(ctrl, DSI_RDBK_DATA_CTRL) >> 16);
+	ack_err = (rx_byte == 4) ? (read_cnt == 8) :
+			((read_cnt - 4) == (pkt_size + 6));
+
+	if (ack_err)
+		read_cnt -= 4;
+	if (!read_cnt) {
+		pr_err("Panel detected error, no data read\n");
+		return 0;
+	}
 
 	if (read_cnt > 16) {
-		int bytes_shifted;
+		int bytes_shifted, data_lost = 0, rem_header = 0;
 
-		bytes_shifted = read_cnt - 16;
-		repeated_bytes = buf_offset - bytes_shifted;
+		bytes_shifted = read_cnt - rx_byte;
+		if (bytes_shifted >= 4)
+			data_lost = bytes_shifted - 4; /* remove DCS header */
+		else
+			rem_header = 4 - bytes_shifted; /* remaining header */
+
+		repeated_bytes = (read_offset - 4) - data_lost + rem_header;
 	}
 
-	for (i = cnt - 1; i >= 0; i--) {
-		data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
-		*temp++ = ntohl(data);
+	off = DSI_RDBK_DATA0;
+	off += ((cnt - 1) * 4);
+
+	for (i = 0; i < cnt; i++) {
+		data = DSI_R32(ctrl, off);
+		if (!repeated_bytes)
+			*lp++ = ntohl(data);
+		else
+			*temp++ = ntohl(data);
+		off -= 4;
 	}
 
-	for (i = repeated_bytes; i < 16; i++)
-		rd_buf[j++] = reg[i];
+	if (repeated_bytes) {
+		for (i = repeated_bytes; i < 16; i++)
+			rd_buf[j++] = reg[i];
+	}
 
-	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
-	return j;
+	*hw_read_cnt = read_cnt;
+	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, rx_byte);
+	return rx_byte;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 117fdc1..d71a5f21 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -125,6 +125,282 @@
 	return rc;
 }
 
+static int dsi_display_cmd_engine_enable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	if (display->cmd_engine_refcount > 0) {
+		display->cmd_engine_refcount++;
+		return 0;
+	}
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_ON);
+		if (rc) {
+			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+
+	display->cmd_engine_refcount++;
+	return rc;
+error_disable_master:
+	(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+	return rc;
+}
+
+static int dsi_display_cmd_engine_disable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	if (display->cmd_engine_refcount == 0) {
+		pr_err("[%s] Invalid refcount\n", display->name);
+		return 0;
+	} else if (display->cmd_engine_refcount > 1) {
+		display->cmd_engine_refcount--;
+		return 0;
+	}
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_OFF);
+		if (rc)
+			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+			       display->name, rc);
+	}
+
+	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+	if (rc) {
+		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+error:
+	display->cmd_engine_refcount = 0;
+	return rc;
+}
+
+static bool dsi_display_validate_reg_read(struct dsi_panel *panel)
+{
+	int i, j = 0;
+	int len = 0, *lenp;
+	int group = 0, count = 0;
+	struct dsi_display_mode *mode;
+	struct drm_panel_esd_config *config;
+
+	if (!panel)
+		return false;
+
+	config = &(panel->esd_config);
+
+	lenp = config->status_valid_params ?: config->status_cmds_rlen;
+	mode = panel->cur_mode;
+	count = mode->priv_info->cmd_sets[DSI_CMD_SET_PANEL_STATUS].count;
+
+	for (i = 0; i < count; i++)
+		len += lenp[i];
+
+	for (i = 0; i < len; i++)
+		j += len;
+
+	for (j = 0; j < config->groups; ++j) {
+		for (i = 0; i < len; ++i) {
+			if (config->return_buf[i] !=
+				config->status_value[group + i])
+				break;
+		}
+
+		if (i == len)
+			return true;
+		group += len;
+	}
+
+	return false;
+}
+
+static int dsi_display_read_status(struct dsi_display_ctrl *ctrl,
+		struct dsi_panel *panel)
+{
+	int i, rc = 0, count = 0, start = 0, *lenp;
+	struct drm_panel_esd_config *config;
+	struct dsi_cmd_desc *cmds;
+	u32 flags = 0;
+
+	if (!panel)
+		return -EINVAL;
+
+	config = &(panel->esd_config);
+	lenp = config->status_valid_params ?: config->status_cmds_rlen;
+	count = config->status_cmd.count;
+	cmds = config->status_cmd.cmds;
+	flags = (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ);
+
+	for (i = 0; i < count; ++i) {
+		memset(config->status_buf, 0x0, SZ_4K);
+		cmds[i].msg.rx_buf = config->status_buf;
+		cmds[i].msg.rx_len = config->status_cmds_rlen[i];
+		rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, &cmds[i].msg, flags);
+		if (rc <= 0) {
+			pr_err("rx cmd transfer failed rc=%d\n", rc);
+			goto error;
+		}
+
+		memcpy(config->return_buf + start,
+			config->status_buf, lenp[i]);
+		start += lenp[i];
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_display_validate_status(struct dsi_display_ctrl *ctrl,
+		struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	rc = dsi_display_read_status(ctrl, panel);
+	if (rc <= 0) {
+		goto exit;
+	} else {
+		/*
+		 * panel status read successfully.
+		 * check for validity of the data read back.
+		 */
+		rc = dsi_display_validate_reg_read(panel);
+		if (!rc) {
+			rc = -EINVAL;
+			goto exit;
+		}
+	}
+
+exit:
+	return rc;
+}
+
+static int dsi_display_status_reg_read(struct dsi_display *display)
+{
+	int rc = 0, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	pr_debug(" ++\n");
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+	rc = dsi_display_cmd_engine_enable(display);
+	if (rc) {
+		pr_err("cmd engine enable failed\n");
+		return -EPERM;
+	}
+
+	rc = dsi_display_validate_status(m_ctrl, display->panel);
+	if (rc <= 0) {
+		pr_err("[%s] read status failed on master,rc=%d\n",
+		       display->name, rc);
+		goto exit;
+	}
+
+	if (!display->panel->sync_broadcast_en)
+		goto exit;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (ctrl == m_ctrl)
+			continue;
+
+		rc = dsi_display_validate_status(ctrl, display->panel);
+		if (rc <= 0) {
+			pr_err("[%s] read status failed on master,rc=%d\n",
+			       display->name, rc);
+			goto exit;
+		}
+	}
+
+exit:
+	dsi_display_cmd_engine_disable(display);
+	return rc;
+}
+
+static int dsi_display_status_bta_request(struct dsi_display *display)
+{
+	int rc = 0;
+
+	pr_debug(" ++\n");
+	/* TODO: trigger SW BTA and wait for acknowledgment */
+
+	return rc;
+}
+
+static int dsi_display_status_check_te(struct dsi_display *display)
+{
+	int rc = 0;
+
+	pr_debug(" ++\n");
+	/* TODO: wait for TE interrupt from panel */
+
+	return rc;
+}
+
+int dsi_display_check_status(void *display)
+{
+	struct dsi_display *dsi_display = display;
+	struct dsi_panel *panel;
+	u32 status_mode;
+	int rc = 0;
+
+	if (dsi_display == NULL)
+		return -EINVAL;
+
+	panel = dsi_display->panel;
+
+	status_mode = panel->esd_config.status_mode;
+
+	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
+		DSI_ALL_CLKS, DSI_CLK_ON);
+
+	if (status_mode == ESD_MODE_REG_READ) {
+		rc = dsi_display_status_reg_read(dsi_display);
+	} else if (status_mode == ESD_MODE_SW_BTA) {
+		rc = dsi_display_status_bta_request(dsi_display);
+	} else if (status_mode == ESD_MODE_PANEL_TE) {
+		rc = dsi_display_status_check_te(dsi_display);
+	} else {
+		pr_warn("unsupported check status mode\n");
+		panel->esd_config.esd_enabled = false;
+	}
+
+	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
+		DSI_ALL_CLKS, DSI_CLK_OFF);
+
+	return rc;
+}
+
 int dsi_display_soft_reset(void *display)
 {
 	struct dsi_display *dsi_display;
@@ -1211,87 +1487,6 @@
 	return rc;
 }
 
-static int dsi_display_cmd_engine_enable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	if (display->cmd_engine_refcount > 0) {
-		display->cmd_engine_refcount++;
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-
-	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	for (i = 0; i < display->ctrl_count; i++) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_ON);
-		if (rc) {
-			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_master;
-		}
-	}
-
-	display->cmd_engine_refcount++;
-	return rc;
-error_disable_master:
-	(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-error:
-	return rc;
-}
-
-static int dsi_display_cmd_engine_disable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	if (display->cmd_engine_refcount == 0) {
-		pr_err("[%s] Invalid refcount\n", display->name);
-		return 0;
-	} else if (display->cmd_engine_refcount > 1) {
-		display->cmd_engine_refcount--;
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	for (i = 0; i < display->ctrl_count; i++) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_OFF);
-		if (rc)
-			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-			       display->name, rc);
-	}
-
-	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-error:
-	display->cmd_engine_refcount = 0;
-	return rc;
-}
-
 static int dsi_display_ctrl_host_enable(struct dsi_display *display)
 {
 	int rc = 0;
@@ -3418,6 +3613,9 @@
 		break;
 	}
 
+	if (display->panel->esd_config.esd_enabled)
+		info->capabilities |= MSM_DISPLAY_ESD_ENABLED;
+
 	memcpy(&info->roi_caps, &display->panel->roi_caps,
 			sizeof(info->roi_caps));
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 4d7a0b8..da4f5eb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -489,6 +489,12 @@
 int dsi_display_set_backlight(void *display, u32 bl_lvl);
 
 /**
+ * dsi_display_check_status() - check if panel is dead or alive
+ * @display:            Handle to display.
+ */
+int dsi_display_check_status(void *display);
+
+/**
  * dsi_display_soft_reset() - perform a soft reset on DSI controller
  * @display:         Handle to display
  *
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 693295f..6718156 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2495,6 +2495,7 @@
 
 static void dsi_panel_esd_config_deinit(struct drm_panel_esd_config *esd_config)
 {
+	kfree(esd_config->status_buf);
 	kfree(esd_config->return_buf);
 	kfree(esd_config->status_value);
 	kfree(esd_config->status_valid_params);
@@ -2621,6 +2622,10 @@
 		goto error3;
 	}
 
+	esd_config->status_buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!esd_config->status_buf)
+		goto error4;
+
 	rc = of_property_read_u32_array(of_node,
 		"qcom,mdss-dsi-panel-status-value",
 		esd_config->status_value, esd_config->groups * status_len);
@@ -2632,6 +2637,8 @@
 
 	return 0;
 
+error4:
+	kfree(esd_config->return_buf);
 error3:
 	kfree(esd_config->status_value);
 error2:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f80dea2..f63fd27 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -135,7 +135,8 @@
 	u32 *status_cmds_rlen;
 	u32 *status_valid_params;
 	u32 *status_value;
-	unsigned char *return_buf;
+	u8 *return_buf;
+	u8 *status_buf;
 	u32 groups;
 };
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0f48db6..2f9571b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1369,7 +1369,9 @@
 			continue;
 		len = event->length + sizeof(struct drm_msm_event_resp);
 		if (node->base.file_priv->event_space < len) {
-			DRM_ERROR("Insufficient space to notify\n");
+			DRM_ERROR("Insufficient space %d for event %x len %d\n",
+				node->base.file_priv->event_space, event->type,
+				len);
 			continue;
 		}
 		notify = kzalloc(len, GFP_ATOMIC);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a5cc6474..e19d1db 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -212,12 +212,14 @@
  * @MSM_DISPLAY_CAP_CMD_MODE:           Command mode supported
  * @MSM_DISPLAY_CAP_HOT_PLUG:           Hot plug detection supported
  * @MSM_DISPLAY_CAP_EDID:               EDID supported
+ * @MSM_DISPLAY_ESD_ENABLED:            ESD feature enabled
  */
 enum msm_display_caps {
 	MSM_DISPLAY_CAP_VID_MODE	= BIT(0),
 	MSM_DISPLAY_CAP_CMD_MODE	= BIT(1),
 	MSM_DISPLAY_CAP_HOT_PLUG	= BIT(2),
 	MSM_DISPLAY_CAP_EDID		= BIT(3),
+	MSM_DISPLAY_ESD_ENABLED		= BIT(4),
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index faef4ce..e9ffb96 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -62,6 +62,8 @@
 
 static void dspp_igc_install_property(struct drm_crtc *crtc);
 
+static void dspp_hist_install_property(struct drm_crtc *crtc);
+
 typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
 
 static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
@@ -77,6 +79,8 @@
 static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
 		enum ad_property ad_prop);
 
+static void sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg);
+
 #define setup_dspp_prop_install_funcs(func) \
 do { \
 	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -86,6 +90,7 @@
 	func[SDE_DSPP_GAMUT] = dspp_gamut_install_property; \
 	func[SDE_DSPP_GC] = dspp_gc_install_property; \
 	func[SDE_DSPP_IGC] = dspp_igc_install_property; \
+	func[SDE_DSPP_HIST] = dspp_hist_install_property; \
 } while (0)
 
 typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
@@ -111,7 +116,8 @@
 	SDE_CP_CRTC_DSPP_SIXZONE,
 	SDE_CP_CRTC_DSPP_GAMUT,
 	SDE_CP_CRTC_DSPP_DITHER,
-	SDE_CP_CRTC_DSPP_HIST,
+	SDE_CP_CRTC_DSPP_HIST_CTRL,
+	SDE_CP_CRTC_DSPP_HIST_IRQ,
 	SDE_CP_CRTC_DSPP_AD,
 	SDE_CP_CRTC_DSPP_VLUT,
 	SDE_CP_CRTC_DSPP_AD_MODE,
@@ -365,6 +371,13 @@
 		return;
 	}
 
+	/* create blob to store histogram data */
+	sde_crtc->hist_blob = drm_property_create_blob(crtc->dev,
+				sizeof(struct drm_msm_hist), NULL);
+	if (IS_ERR(sde_crtc->hist_blob))
+		sde_crtc->hist_blob = NULL;
+
+	mutex_init(&sde_crtc->crtc_cp_lock);
 	INIT_LIST_HEAD(&sde_crtc->active_list);
 	INIT_LIST_HEAD(&sde_crtc->dirty_list);
 	INIT_LIST_HEAD(&sde_crtc->feature_list);
@@ -531,6 +544,77 @@
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
+static struct sde_crtc_irq_info *_sde_cp_get_intr_node(u32 event,
+				struct sde_crtc *sde_crtc)
+{
+	bool found = false;
+	struct sde_crtc_irq_info *node = NULL;
+
+	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
+		if (node->event == event) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		node = NULL;
+
+	return node;
+}
+
+static void _sde_cp_crtc_enable_hist_irq(struct sde_crtc *sde_crtc)
+{
+	struct drm_crtc *crtc_drm = &sde_crtc->base;
+	struct sde_kms *kms = NULL;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_crtc_irq_info *node = NULL;
+	int i, irq_idx, ret = 0;
+	unsigned long flags;
+
+	if (!crtc_drm) {
+		DRM_ERROR("invalid crtc %pK\n", crtc_drm);
+		return;
+	}
+
+	kms = get_kms(crtc_drm);
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		hw_lm = sde_crtc->mixers[i].hw_lm;
+		hw_dspp = sde_crtc->mixers[i].hw_dspp;
+		if (!hw_lm->cfg.right_mixer)
+			break;
+	}
+
+	if (!hw_dspp) {
+		DRM_ERROR("invalid dspp\n");
+		return;
+	}
+
+	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_HIST_DSPP_DONE,
+					hw_dspp->idx);
+	if (irq_idx < 0) {
+		DRM_ERROR("failed to get irq idx\n");
+		return;
+	}
+
+	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, sde_crtc);
+	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+	if (!node)
+		return;
+
+	if (node->state == IRQ_DISABLED) {
+		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		if (ret)
+			DRM_ERROR("failed to enable irq %d\n", irq_idx);
+		else
+			node->state = IRQ_ENABLED;
+	}
+}
+
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 				   struct sde_crtc *sde_crtc)
 {
@@ -639,6 +723,21 @@
 			}
 			hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
 			break;
+		case SDE_CP_CRTC_DSPP_HIST_CTRL:
+			if (!hw_dspp || !hw_dspp->ops.setup_histogram) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_histogram(hw_dspp, &feature_enabled);
+			break;
+		case SDE_CP_CRTC_DSPP_HIST_IRQ:
+			if (!hw_dspp || !hw_lm) {
+				ret = -EINVAL;
+				continue;
+			}
+			if (!hw_lm->cfg.right_mixer)
+				_sde_cp_crtc_enable_hist_irq(sde_crtc);
+			break;
 		case SDE_CP_CRTC_DSPP_AD_MODE:
 			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
 				ret = -EINVAL;
@@ -746,6 +845,8 @@
 		return;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
+
 	/* Check if dirty lists are empty and ad features are disabled for
 	 * early return. If ad properties are active then we need to issue
 	 * dspp flush.
@@ -754,7 +855,7 @@
 		list_empty(&sde_crtc->ad_dirty)) {
 		if (list_empty(&sde_crtc->ad_active)) {
 			DRM_DEBUG_DRIVER("Dirty list is empty\n");
-			return;
+			goto exit;
 		}
 		sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
 		set_dspp_flush = true;
@@ -794,6 +895,8 @@
 			ctl->ops.update_pending_flush(ctl, flush_mask);
 		}
 	}
+exit:
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 }
 
 void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
@@ -824,13 +927,15 @@
 		return;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
+
 	/**
 	 * Function can be called during the atomic_check with test_only flag
 	 * and actual commit. Allocate properties only if feature list is
 	 * empty during the atomic_check with test_only flag.
 	 */
 	if (!list_empty(&sde_crtc->feature_list))
-		return;
+		goto exit;
 
 	catalog = kms->catalog;
 	priv = crtc->dev->dev_private;
@@ -846,7 +951,7 @@
 		setup_lm_prop_install_funcs(lm_prop_install_func);
 	}
 	if (!priv->cp_property)
-		return;
+		goto exit;
 
 	if (!catalog->dspp_count)
 		goto lm_property;
@@ -862,7 +967,7 @@
 
 lm_property:
 	if (!catalog->mixer_count)
-		return;
+		goto exit;
 
 	/* Check for all the LM properties and attach it to CRTC */
 	features = catalog->mixer[0].features;
@@ -872,6 +977,9 @@
 		if (lm_prop_install_func[i])
 			lm_prop_install_func[i](crtc);
 	}
+exit:
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
+
 }
 
 int sde_cp_crtc_set_property(struct drm_crtc *crtc,
@@ -894,6 +1002,7 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
 	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
 		if (property->base.id == prop_node->property_id) {
 			found = 1;
@@ -902,7 +1011,8 @@
 	}
 
 	if (!found)
-		return 0;
+		goto exit;
+
 	/**
 	 * sde_crtc is virtual ensure that hardware has been attached to the
 	 * crtc. Check LM and dspp counts based on whether feature is a
@@ -912,7 +1022,8 @@
 	    sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
 		DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
 			sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
-		return -EINVAL;
+		ret = -EINVAL;
+		goto exit;
 	}
 
 	dspp_cnt = 0;
@@ -927,17 +1038,19 @@
 	if (prop_node->is_dspp_feature && dspp_cnt < sde_crtc->num_mixers) {
 		DRM_ERROR("invalid dspp cnt %d mixer cnt %d\n", dspp_cnt,
 			sde_crtc->num_mixers);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto exit;
 	} else if (lm_cnt < sde_crtc->num_mixers) {
 		DRM_ERROR("invalid lm cnt %d mixer cnt %d\n", lm_cnt,
 			sde_crtc->num_mixers);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto exit;
 	}
 
 	ret = sde_cp_ad_validate_prop(prop_node, sde_crtc);
 	if (ret) {
 		DRM_ERROR("ad property validation failed ret %d\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	/* remove the property from dirty list */
@@ -955,6 +1068,8 @@
 		/* Mark the feature as dirty */
 		sde_cp_update_list(prop_node, sde_crtc, true);
 	}
+exit:
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 	return ret;
 }
 
@@ -977,12 +1092,14 @@
 	}
 	/* Return 0 if property is not supported */
 	*val = 0;
+	mutex_lock(&sde_crtc->crtc_cp_lock);
 	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
 		if (property->base.id == prop_node->property_id) {
 			*val = prop_node->prop_val;
 			break;
 		}
 	}
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 	return 0;
 }
 
@@ -1015,6 +1132,10 @@
 		kfree(prop_node);
 	}
 
+	if (sde_crtc->hist_blob)
+		drm_property_unreference_blob(sde_crtc->hist_blob);
+
+	mutex_destroy(&sde_crtc->crtc_cp_lock);
 	INIT_LIST_HEAD(&sde_crtc->active_list);
 	INIT_LIST_HEAD(&sde_crtc->dirty_list);
 	INIT_LIST_HEAD(&sde_crtc->feature_list);
@@ -1035,6 +1156,7 @@
 		return;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
 				 active_list) {
 		sde_cp_update_list(prop_node, sde_crtc, true);
@@ -1046,6 +1168,7 @@
 		sde_cp_update_list(prop_node, sde_crtc, true);
 		list_del_init(&prop_node->active_list);
 	}
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 }
 
 void sde_cp_crtc_resume(struct drm_crtc *crtc)
@@ -1273,6 +1396,30 @@
 	}
 }
 
+static void dspp_hist_install_property(struct drm_crtc *crtc)
+{
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+
+	version = catalog->dspp[0].sblk->hist.version >> 16;
+	switch (version) {
+	case 1:
+		sde_cp_crtc_install_enum_property(crtc,
+			SDE_CP_CRTC_DSPP_HIST_CTRL, sde_hist_modes,
+			ARRAY_SIZE(sde_hist_modes), "SDE_DSPP_HIST_CTRL_V1");
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_HIST_IRQ_V1",
+			SDE_CP_CRTC_DSPP_HIST_IRQ, 0, U16_MAX, 0);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
 static void sde_cp_update_list(struct sde_cp_node *prop_node,
 		struct sde_crtc *crtc, bool dirty_list)
 {
@@ -1394,6 +1541,7 @@
 	int i;
 	int irq_idx, ret;
 	struct sde_cp_node prop_node;
+	struct sde_crtc_irq_info *node = NULL;
 
 	if (!crtc_drm || !ad_irq) {
 		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, ad_irq);
@@ -1438,8 +1586,23 @@
 		goto exit;
 	}
 
+	node = _sde_cp_get_intr_node(DRM_EVENT_AD_BACKLIGHT, crtc);
+
 	if (!en) {
-		sde_core_irq_disable(kms, &irq_idx, 1);
+		if (node) {
+			if (node->state == IRQ_ENABLED) {
+				ret = sde_core_irq_disable(kms, &irq_idx, 1);
+				if (ret)
+					DRM_ERROR("disable irq %d error %d\n",
+						irq_idx, ret);
+				else
+					node->state = IRQ_NOINIT;
+			} else {
+				node->state = IRQ_NOINIT;
+			}
+		} else {
+			DRM_ERROR("failed to get node from crtc event list\n");
+		}
 		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
 		ret = 0;
 		goto exit;
@@ -1452,10 +1615,30 @@
 		DRM_ERROR("failed to register the callback ret %d\n", ret);
 		goto exit;
 	}
-	ret = sde_core_irq_enable(kms, &irq_idx, 1);
-	if (ret) {
-		DRM_ERROR("failed to enable irq ret %d\n", ret);
-		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
+
+	if (node) {
+		/* device resume or resume from IPC cases */
+		if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
+			ret = sde_core_irq_enable(kms, &irq_idx, 1);
+			if (ret) {
+				DRM_ERROR("enable irq %d error %d\n",
+					irq_idx, ret);
+				sde_core_irq_unregister_callback(kms,
+					irq_idx, ad_irq);
+			} else {
+				node->state = IRQ_ENABLED;
+			}
+		}
+	} else {
+		/* request from userspace to register the event
+		 * in this case, node has not been added into the event list
+		 */
+		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		if (ret) {
+			DRM_ERROR("failed to enable irq ret %d\n", ret);
+			sde_core_irq_unregister_callback(kms,
+				irq_idx, ad_irq);
+		}
 	}
 exit:
 	return ret;
@@ -1518,3 +1701,201 @@
 
 	sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESUME);
 }
+
+static void sde_cp_hist_interrupt_cb(void *arg, int irq_idx)
+{
+	struct sde_crtc *crtc = arg;
+	struct drm_crtc *crtc_drm = &crtc->base;
+	struct sde_hw_dspp *hw_dspp;
+	struct sde_kms *kms;
+	struct sde_crtc_irq_info *node = NULL;
+	u32 i;
+	int ret = 0;
+	unsigned long flags;
+
+	/* disable histogram irq */
+	kms = get_kms(crtc_drm);
+	spin_lock_irqsave(&crtc->spin_lock, flags);
+	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, crtc);
+	spin_unlock_irqrestore(&crtc->spin_lock, flags);
+
+	if (!node) {
+		DRM_ERROR("cannot find histogram event node in crtc\n");
+		return;
+	}
+
+	if (node->state == IRQ_ENABLED) {
+		if (sde_core_irq_disable_nolock(kms, irq_idx)) {
+			DRM_ERROR("failed to disable irq %d, ret %d\n",
+				irq_idx, ret);
+			return;
+		}
+		node->state = IRQ_DISABLED;
+	}
+
+	/* lock histogram buffer */
+	for (i = 0; i < crtc->num_mixers; i++) {
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (hw_dspp && hw_dspp->ops.lock_histogram)
+			hw_dspp->ops.lock_histogram(hw_dspp, NULL);
+	}
+
+	/* notify histogram event */
+	sde_crtc_event_queue(crtc_drm, sde_cp_notify_hist_event, NULL);
+}
+
+static void sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg)
+{
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_crtc *crtc;
+	struct drm_event event;
+	struct drm_msm_hist *hist_data;
+	struct drm_msm_hist tmp_hist_data;
+	u32 i, j;
+
+	if (!crtc_drm) {
+		DRM_ERROR("invalid crtc %pK\n", crtc_drm);
+		return;
+	}
+
+	crtc = to_sde_crtc(crtc_drm);
+	if (!crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
+		return;
+	}
+
+	if (!crtc->hist_blob)
+		return;
+
+	/* read histogram data into blob */
+	hist_data = (struct drm_msm_hist *)crtc->hist_blob->data;
+	for (i = 0; i < crtc->num_mixers; i++) {
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (!hw_dspp || !hw_dspp->ops.read_histogram) {
+			DRM_ERROR("invalid dspp %pK or read_histogram func\n",
+				hw_dspp);
+			return;
+		}
+		if (!i) {
+			hw_dspp->ops.read_histogram(hw_dspp, hist_data);
+		} else {
+			/* Merge hist data for DSPP0 and DSPP1 */
+			hw_dspp->ops.read_histogram(hw_dspp, &tmp_hist_data);
+			for (j = 0; j < HIST_V_SIZE; j++)
+				hist_data->data[j] += tmp_hist_data.data[j];
+		}
+	}
+
+	/* send histogram event with blob id */
+	event.length = sizeof(u32);
+	event.type = DRM_EVENT_HISTOGRAM;
+	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
+			&event, (u8 *)(&crtc->hist_blob->base.id));
+}
+
+int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
+	struct sde_irq_callback *hist_irq)
+{
+	struct sde_kms *kms = NULL;
+	u32 num_mixers;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_crtc *crtc;
+	struct sde_crtc_irq_info *node = NULL;
+	int i, irq_idx, ret = 0;
+
+	if (!crtc_drm || !hist_irq) {
+		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, hist_irq);
+		return -EINVAL;
+	}
+
+	crtc = to_sde_crtc(crtc_drm);
+	if (!crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
+		return -EINVAL;
+	}
+
+	kms = get_kms(crtc_drm);
+	num_mixers = crtc->num_mixers;
+
+	for (i = 0; i < num_mixers; i++) {
+		hw_lm = crtc->mixers[i].hw_lm;
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (!hw_lm->cfg.right_mixer)
+			break;
+	}
+
+	if (!hw_dspp) {
+		DRM_ERROR("invalid dspp\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_HIST_DSPP_DONE,
+			hw_dspp->idx);
+	if (irq_idx < 0) {
+		DRM_ERROR("failed to get the irq idx ret %d\n", irq_idx);
+		ret = irq_idx;
+		goto exit;
+	}
+
+	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, crtc);
+
+	/* deregister histogram irq */
+	if (!en) {
+		if (node) {
+			/* device suspend case or suspend to IPC cases */
+			if (node->state == IRQ_ENABLED) {
+				ret = sde_core_irq_disable(kms, &irq_idx, 1);
+				if (ret)
+					DRM_ERROR("disable irq %d error %d\n",
+						irq_idx, ret);
+				else
+					node->state = IRQ_NOINIT;
+			} else {
+				node->state = IRQ_NOINIT;
+			}
+		} else {
+			DRM_ERROR("failed to get node from crtc event list\n");
+		}
+
+		sde_core_irq_unregister_callback(kms, irq_idx, hist_irq);
+		goto exit;
+	}
+
+	/* register histogram irq */
+	hist_irq->arg = crtc;
+	hist_irq->func = sde_cp_hist_interrupt_cb;
+	ret = sde_core_irq_register_callback(kms, irq_idx, hist_irq);
+	if (ret) {
+		DRM_ERROR("failed to register the callback ret %d\n", ret);
+		goto exit;
+	}
+
+	if (node) {
+		/* device resume or resume from IPC cases */
+		if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
+			ret = sde_core_irq_enable(kms, &irq_idx, 1);
+			if (ret) {
+				DRM_ERROR("enable irq %d error %d\n",
+					irq_idx, ret);
+				sde_core_irq_unregister_callback(kms,
+					irq_idx, hist_irq);
+			} else {
+				node->state = IRQ_ENABLED;
+			}
+		}
+	} else {
+		/* request from userspace to register the event
+		 * in this case, node has not been added into the event list
+		 */
+		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		if (ret) {
+			DRM_ERROR("failed to enable irq ret %d\n", ret);
+			sde_core_irq_unregister_callback(kms,
+				irq_idx, hist_irq);
+		}
+	}
+exit:
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index 08e345d..aff07ef 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -29,6 +29,25 @@
 	MEMCOLOR_FOLIAGE
 };
 
+/*
+ * PA HISTOGRAM modes
+ * @HIST_DISABLED          Histogram disabled
+ * @HIST_ENABLED           Histogram enabled
+ */
+enum sde_hist_modes {
+	HIST_DISABLED,
+	HIST_ENABLED
+};
+
+/**
+ * struct drm_prop_enum_list - drm structure for creating enum property and
+ *                             enumerating values
+ */
+static const struct drm_prop_enum_list sde_hist_modes[] = {
+	{HIST_DISABLED, "hist_off"},
+	{HIST_ENABLED, "hist_on"},
+};
+
 /**
  * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
  *                     Should be called during crtc initialization.
@@ -117,4 +136,13 @@
  * @crtc: Pointer to crtc.
  */
 void sde_cp_crtc_post_ipc(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_hist_interrupt: Api to enable/disable histogram interrupt
+ * @crtc: Pointer to crtc.
+ * @en: Variable to enable/disable interrupt.
+ * @irq: Pointer to irq callback
+ */
+int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
+	struct sde_irq_callback *hist_irq);
 #endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 69ee2be..d83f476 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -19,6 +19,7 @@
 #include <linux/backlight.h>
 #include "dsi_drm.h"
 #include "dsi_display.h"
+#include "sde_crtc.h"
 
 #define BL_NODE_NAME_SIZE 32
 
@@ -366,6 +367,29 @@
 	return c_conn->ops.get_info(info, c_conn->display);
 }
 
+void sde_connector_schedule_status_work(struct drm_connector *connector,
+		bool en)
+{
+	struct sde_connector *c_conn;
+	struct msm_display_info info;
+
+	c_conn = to_sde_connector(connector);
+	if (!c_conn)
+		return;
+
+	sde_connector_get_info(connector, &info);
+	if (c_conn->ops.check_status &&
+		(info.capabilities & MSM_DISPLAY_ESD_ENABLED)) {
+		if (en)
+			/* Schedule ESD status check */
+			schedule_delayed_work(&c_conn->status_work,
+				msecs_to_jiffies(STATUS_CHECK_INTERVAL_MS));
+		else
+			/* Cancel any pending ESD status check */
+			cancel_delayed_work_sync(&c_conn->status_work);
+	}
+}
+
 static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
 {
 	struct drm_connector *connector;
@@ -411,6 +435,9 @@
 	}
 	c_conn->last_panel_power_mode = mode;
 
+	if (mode != SDE_MODE_DPMS_ON)
+		sde_connector_schedule_status_work(connector, false);
+
 	return rc;
 }
 
@@ -492,6 +519,9 @@
 
 	c_conn = to_sde_connector(connector);
 
+	/* cancel if any pending esd work */
+	sde_connector_schedule_status_work(connector, false);
+
 	if (c_conn->ops.put_modes)
 		c_conn->ops.put_modes(connector, c_conn->display);
 
@@ -559,6 +589,8 @@
 	if (c_state->out_fb)
 		_sde_connector_destroy_fb(c_conn, c_state);
 
+	__drm_atomic_helper_connector_destroy_state(&c_state->base);
+
 	if (!c_conn) {
 		kfree(c_state);
 	} else {
@@ -580,6 +612,12 @@
 
 	c_conn = to_sde_connector(connector);
 
+	if (connector->state &&
+			!sde_crtc_is_reset_required(connector->state->crtc)) {
+		SDE_DEBUG_CONN(c_conn, "avoid reset for connector\n");
+		return;
+	}
+
 	if (connector->state) {
 		sde_connector_atomic_destroy_state(connector, connector->state);
 		connector->state = 0;
@@ -596,8 +634,7 @@
 			&c_state->property_state,
 			c_state->property_values);
 
-	c_state->base.connector = connector;
-	connector->state = &c_state->base;
+	__drm_atomic_helper_connector_reset(connector, &c_state->base);
 }
 
 static struct drm_connector_state *
@@ -624,6 +661,9 @@
 			c_oldstate, c_state,
 			&c_state->property_state, c_state->property_values);
 
+	__drm_atomic_helper_connector_duplicate_state(connector,
+			&c_state->base);
+
 	/* additional handling for drm framebuffer objects */
 	if (c_state->out_fb)
 		drm_framebuffer_reference(c_state->out_fb);
@@ -1056,6 +1096,7 @@
 static int sde_connector_init_debugfs(struct drm_connector *connector)
 {
 	struct sde_connector *sde_connector;
+	struct msm_display_info info;
 
 	if (!connector || !connector->debugfs_entry) {
 		SDE_ERROR("invalid connector\n");
@@ -1064,6 +1105,13 @@
 
 	sde_connector = to_sde_connector(connector);
 
+	sde_connector_get_info(connector, &info);
+	if (sde_connector->ops.check_status &&
+		(info.capabilities & MSM_DISPLAY_ESD_ENABLED))
+		debugfs_create_u32("force_panel_dead", 0600,
+				connector->debugfs_entry,
+				&sde_connector->force_panel_dead);
+
 	if (!debugfs_create_bool("fb_kmap", 0600, connector->debugfs_entry,
 			&sde_connector->fb_kmap)) {
 		SDE_ERROR("failed to create connector fb_kmap\n");
@@ -1160,6 +1208,56 @@
 	return c_conn->encoder;
 }
 
+static void sde_connector_check_status_work(struct work_struct *work)
+{
+	struct sde_connector *conn;
+	struct drm_event event;
+	int rc = 0;
+	bool panel_dead = false;
+
+	conn = container_of(to_delayed_work(work),
+			struct sde_connector, status_work);
+	if (!conn) {
+		SDE_ERROR("not able to get connector object\n");
+		return;
+	}
+
+	mutex_lock(&conn->lock);
+	if (!conn->ops.check_status ||
+			(conn->dpms_mode != DRM_MODE_DPMS_ON)) {
+		SDE_DEBUG("dpms mode: %d\n", conn->dpms_mode);
+		mutex_unlock(&conn->lock);
+		return;
+	}
+
+	rc = conn->ops.check_status(conn->display);
+	mutex_unlock(&conn->lock);
+
+	if (conn->force_panel_dead) {
+		conn->force_panel_dead--;
+		if (!conn->force_panel_dead)
+			goto status_dead;
+	}
+
+	if (rc > 0) {
+		SDE_DEBUG("esd check status success conn_id: %d enc_id: %d\n",
+				conn->base.base.id, conn->encoder->base.id);
+		schedule_delayed_work(&conn->status_work,
+			msecs_to_jiffies(STATUS_CHECK_INTERVAL_MS));
+		return;
+	}
+
+status_dead:
+	SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+	SDE_ERROR("esd check failed report PANEL_DEAD conn_id: %d enc_id: %d\n",
+			conn->base.base.id, conn->encoder->base.id);
+	panel_dead = true;
+	event.type = DRM_EVENT_PANEL_DEAD;
+	event.length = sizeof(u32);
+	msm_mode_object_event_notify(&conn->base.base,
+		conn->base.dev, &event, (u8 *)&panel_dead);
+}
+
 static const struct drm_connector_helper_funcs sde_connector_helper_ops = {
 	.get_modes =    sde_connector_get_modes,
 	.mode_valid =   sde_connector_mode_valid,
@@ -1368,6 +1466,9 @@
 
 	priv->connectors[priv->num_connectors++] = &c_conn->base;
 
+	INIT_DELAYED_WORK(&c_conn->status_work,
+			sde_connector_check_status_work);
+
 	return &c_conn->base;
 
 error_destroy_property:
@@ -1399,6 +1500,9 @@
 	case DRM_EVENT_SYS_BACKLIGHT:
 		ret = 0;
 		break;
+	case DRM_EVENT_PANEL_DEAD:
+		ret = 0;
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 24b547b..4018441 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -212,6 +212,13 @@
 	 * @display: Pointer to private display structure
 	 */
 	void (*send_hpd_event)(void *display);
+
+	/**
+	 * check_status - check status of connected display panel
+	 * @display: Pointer to private display handle
+	 * Returns: positive value for success, negetive or zero for failure
+	 */
+	int (*check_status)(void *display);
 };
 
 /**
@@ -261,6 +268,8 @@
  * @event_table: Array of registered events
  * @event_lock: Lock object for event_table
  * @bl_device: backlight device node
+ * @status_work: work object to perform status checks
+ * @force_panel_dead: variable to trigger forced ESD recovery
  */
 struct sde_connector {
 	struct drm_connector base;
@@ -293,6 +302,8 @@
 	spinlock_t event_lock;
 
 	struct backlight_device *bl_device;
+	struct delayed_work status_work;
+	u32 force_panel_dead;
 };
 
 /**
@@ -580,4 +591,11 @@
 int sde_connector_get_dither_cfg(struct drm_connector *conn,
 		struct drm_connector_state *state, void **cfg, size_t *len);
 
+/**
+ * sde_connector_schedule_status_work - manage ESD thread
+ * conn: Pointer to drm_connector struct
+ * @en: flag to start/stop ESD thread
+ */
+void sde_connector_schedule_status_work(struct drm_connector *conn, bool en);
+
 #endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 1402fdd..a0846ff 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -31,14 +31,17 @@
 	struct sde_irq *irq_obj = &sde_kms->irq_obj;
 	struct sde_irq_callback *cb;
 	unsigned long irq_flags;
+	bool cb_tbl_error = false;
+	int enable_counts = 0;
 
 	pr_debug("irq_idx=%d\n", irq_idx);
 
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
 	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
-		SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx);
-		SDE_EVT32_IRQ(irq_idx, atomic_read(
-				&sde_kms->irq_obj.enable_counts[irq_idx]),
-				SDE_EVTLOG_ERROR);
+		/* print error outside lock */
+		cb_tbl_error = true;
+		enable_counts = atomic_read(
+				&sde_kms->irq_obj.enable_counts[irq_idx]);
 	}
 
 	atomic_inc(&irq_obj->irq_counts[irq_idx]);
@@ -46,12 +49,17 @@
 	/*
 	 * Perform registered function callback
 	 */
-	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
 	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
 		if (cb->func)
 			cb->func(cb->arg, irq_idx);
 	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
 
+	if (cb_tbl_error) {
+		SDE_ERROR("irq has no registered callback, idx %d enables %d\n",
+				irq_idx, enable_counts);
+		SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+	}
+
 	/*
 	 * Clear pending interrupt status in HW.
 	 * NOTE: sde_core_irq_callback_handler is protected by top-level
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 7b0e1b5..1ce76eb 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -48,14 +48,6 @@
 #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
 #define MDP_DEVICE_ID            0x1A
 
-struct sde_crtc_irq_info {
-	struct sde_irq_callback irq;
-	u32 event;
-	int (*func)(struct drm_crtc *crtc, bool en,
-			struct sde_irq_callback *irq);
-	struct list_head list;
-};
-
 struct sde_crtc_custom_events {
 	u32 event;
 	int (*func)(struct drm_crtc *crtc, bool en,
@@ -70,7 +62,8 @@
 static struct sde_crtc_custom_events custom_events[] = {
 	{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
 	{DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler},
-	{DRM_EVENT_IDLE_NOTIFY, sde_crtc_idle_interrupt_handler}
+	{DRM_EVENT_IDLE_NOTIFY, sde_crtc_idle_interrupt_handler},
+	{DRM_EVENT_HISTOGRAM, sde_cp_hist_interrupt},
 };
 
 /* default input fence timeout, in ms */
@@ -1504,6 +1497,7 @@
 
 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
 
+		mixer[i].pipe_mask = mixer[i].flush_mask;
 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
 			mixer[i].hw_lm->idx);
 
@@ -3255,17 +3249,17 @@
 	return rc;
 }
 
-static void _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
+static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
 		struct sde_crtc_state *cstate)
 {
 	struct drm_plane *plane;
 	struct sde_crtc *sde_crtc;
 	struct sde_hw_ctl *ctl, *master_ctl;
 	u32 flush_mask;
-	int i;
+	int i, rc = 0;
 
 	if (!crtc || !cstate)
-		return;
+		return -EINVAL;
 
 	sde_crtc = to_sde_crtc(crtc);
 
@@ -3279,7 +3273,7 @@
 	 * the hardware out of sbuf mode.
 	 */
 	if (!sde_crtc->sbuf_flush_mask_old && !sde_crtc->sbuf_flush_mask)
-		return;
+		return 0;
 
 	flush_mask = sde_crtc->sbuf_flush_mask_old | sde_crtc->sbuf_flush_mask;
 	sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask;
@@ -3288,29 +3282,75 @@
 
 	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
 		drm_atomic_crtc_for_each_plane(plane, crtc) {
-			sde_plane_kickoff(plane);
+			rc = sde_plane_kickoff_rot(plane);
+			if (rc) {
+				SDE_ERROR("crtc%d cancelling inline rotation\n",
+						crtc->base.id);
+				SDE_EVT32(DRMID(crtc), SDE_EVTLOG_ERROR);
+
+				/* revert to offline on errors */
+				cstate->sbuf_cfg.rot_op_mode =
+					SDE_CTL_ROT_OP_MODE_OFFLINE;
+				break;
+			}
 		}
 	}
 
 	master_ctl = NULL;
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
-		if (!ctl || !ctl->ops.setup_sbuf_cfg ||
-				!ctl->ops.update_pending_flush)
+		if (!ctl)
 			continue;
 
 		if (!master_ctl || master_ctl->idx > ctl->idx)
 			master_ctl = ctl;
-
-		ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
-		ctl->ops.update_pending_flush(ctl, flush_mask);
 	}
 
-	if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
-			master_ctl && master_ctl->ops.trigger_rot_start)
-		master_ctl->ops.trigger_rot_start(master_ctl);
+	/* only update sbuf_cfg and flush for master ctl */
+	if (master_ctl && master_ctl->ops.setup_sbuf_cfg &&
+			master_ctl->ops.update_pending_flush) {
+		master_ctl->ops.setup_sbuf_cfg(master_ctl, &cstate->sbuf_cfg);
+		master_ctl->ops.update_pending_flush(master_ctl, flush_mask);
+
+		/* explicitly trigger rotator for async modes */
+		if (cstate->sbuf_cfg.rot_op_mode ==
+				SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
+				master_ctl->ops.trigger_rot_start) {
+			master_ctl->ops.trigger_rot_start(master_ctl);
+			SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0);
+		}
+	}
 
 	SDE_ATRACE_END("crtc_kickoff_rot");
+	return rc;
+}
+
+/**
+ * _sde_crtc_remove_pipe_flush - remove staged pipes from flush mask
+ * @sde_crtc: Pointer to sde crtc structure
+ */
+static void _sde_crtc_remove_pipe_flush(struct sde_crtc *sde_crtc)
+{
+	struct sde_crtc_mixer *mixer;
+	struct sde_hw_ctl *ctl;
+	u32 i, flush_mask;
+
+	if (!sde_crtc)
+		return;
+
+	mixer = sde_crtc->mixers;
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		ctl = mixer[i].hw_ctl;
+		if (!ctl || !ctl->ops.get_pending_flush ||
+				!ctl->ops.clear_pending_flush ||
+				!ctl->ops.update_pending_flush)
+			continue;
+
+		flush_mask = ctl->ops.get_pending_flush(ctl);
+		flush_mask &= ~mixer[i].pipe_mask;
+		ctl->ops.clear_pending_flush(ctl);
+		ctl->ops.update_pending_flush(ctl, flush_mask);
+	}
 }
 
 void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
@@ -3321,6 +3361,7 @@
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 	struct sde_crtc_state *cstate;
+	bool is_error;
 	int ret;
 
 	if (!crtc) {
@@ -3330,6 +3371,7 @@
 	dev = crtc->dev;
 	sde_crtc = to_sde_crtc(crtc);
 	sde_kms = _sde_crtc_get_kms(crtc);
+	is_error = false;
 
 	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
 		SDE_ERROR("invalid argument\n");
@@ -3386,7 +3428,8 @@
 	 * can start as soon as it's ready.
 	 */
 	if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
-		_sde_crtc_commit_kickoff_rot(crtc, cstate);
+		if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
+			is_error = true;
 
 	/* wait for frame_event_done completion */
 	SDE_ATRACE_BEGIN("wait_for_frame_done_event");
@@ -3396,7 +3439,11 @@
 		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
 				crtc->base.id,
 				atomic_read(&sde_crtc->frame_pending));
-		goto end;
+
+		is_error = true;
+
+		/* force offline rotation mode since the commit has no pipes */
+		cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
 	}
 
 	if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
@@ -3419,18 +3466,21 @@
 	 * offline mode.
 	 */
 	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
-		_sde_crtc_commit_kickoff_rot(crtc, cstate);
+		if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
+			is_error = true;
 
 	sde_vbif_clear_errors(sde_kms);
 
+	if (is_error)
+		_sde_crtc_remove_pipe_flush(sde_crtc);
+
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->crtc != crtc)
 			continue;
 
-		sde_encoder_kickoff(encoder);
+		sde_encoder_kickoff(encoder, is_error);
 	}
 
-end:
 	reinit_completion(&sde_crtc->frame_done_comp);
 	SDE_ATRACE_END("crtc_commit");
 	return;
@@ -3605,9 +3655,16 @@
 	}
 
 	/* revert suspend actions, if necessary */
-	if (sde_kms_is_suspend_state(crtc->dev))
+	if (sde_kms_is_suspend_state(crtc->dev)) {
 		_sde_crtc_set_suspend(crtc, false);
 
+		if (!sde_crtc_is_reset_required(crtc)) {
+			SDE_DEBUG("avoiding reset for crtc:%d\n",
+					crtc->base.id);
+			return;
+		}
+	}
+
 	/* remove previous state, if present */
 	if (crtc->state) {
 		sde_crtc_destroy_state(crtc, crtc->state);
@@ -3756,6 +3813,10 @@
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
+	for (i = 0; i < cstate->num_connectors; i++)
+		sde_connector_schedule_status_work(cstate->connectors[i],
+							false);
+
 	if (sde_kms_is_suspend_state(crtc->dev))
 		_sde_crtc_set_suspend(crtc, true);
 
@@ -3848,13 +3909,15 @@
 	struct sde_crtc_irq_info *node = NULL;
 	struct drm_event event;
 	u32 power_on;
-	int ret;
+	int ret, i;
+	struct sde_crtc_state *cstate;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	priv = crtc->dev->dev_private;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32_VERBOSE(DRMID(crtc));
@@ -3905,6 +3968,9 @@
 		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
 		SDE_POWER_EVENT_PRE_DISABLE,
 		sde_crtc_handle_power_event, crtc, sde_crtc->name);
+
+	for (i = 0; i < cstate->num_connectors; i++)
+		sde_connector_schedule_status_work(cstate->connectors[i], true);
 }
 
 struct plane_state {
@@ -4473,7 +4539,7 @@
 			CRTC_PROP_ROT_CLK);
 
 	msm_property_install_range(&sde_crtc->property_info,
-		"idle_timeout", IDLE_TIMEOUT, 0, U64_MAX, 0,
+		"idle_time", IDLE_TIMEOUT, 0, U64_MAX, 0,
 		CRTC_PROP_IDLE_TIMEOUT);
 
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
@@ -5398,6 +5464,8 @@
 
 	if (!ret) {
 		spin_lock_irqsave(&crtc->spin_lock, flags);
+		/* irq is regiestered and enabled and set the state */
+		node->state = IRQ_ENABLED;
 		list_add_tail(&node->list, &crtc->user_event_list);
 		spin_unlock_irqrestore(&crtc->spin_lock, flags);
 	} else {
@@ -5421,7 +5489,6 @@
 	spin_lock_irqsave(&crtc->spin_lock, flags);
 	list_for_each_entry(node, &crtc->user_event_list, list) {
 		if (node->event == event) {
-			list_del(&node->list);
 			found = true;
 			break;
 		}
@@ -5437,12 +5504,15 @@
 	 * no need to disable/de-register.
 	 */
 	if (!crtc_drm->enabled) {
+		list_del(&node->list);
 		kfree(node);
 		return 0;
 	}
 	priv = kms->dev->dev_private;
 	sde_power_resource_enable(&priv->phandle, kms->core_client, true);
 	ret = node->func(crtc_drm, false, &node->irq);
+	list_del(&node->list);
+	kfree(node);
 	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index dd18b63..59bfc47 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -111,6 +111,7 @@
  * @encoder:	Encoder attached to this lm & ctl
  * @mixer_op_mode:	mixer blending operation mode
  * @flush_mask:	mixer flush mask for ctl, mixer and pipe
+ * @pipe_mask:	mixer flush mask for pipe
  */
 struct sde_crtc_mixer {
 	struct sde_hw_mixer *hw_lm;
@@ -120,6 +121,7 @@
 	struct drm_encoder *encoder;
 	u32 mixer_op_mode;
 	u32 flush_mask;
+	u32 pipe_mask;
 };
 
 /**
@@ -254,6 +256,7 @@
 	struct list_head user_event_list;
 
 	struct mutex crtc_lock;
+	struct mutex crtc_cp_lock;
 
 	atomic_t frame_pending;
 	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
@@ -282,6 +285,9 @@
 	struct list_head rp_head;
 
 	struct sde_crtc_smmu_state_data smmu_state;
+
+	/* blob for histogram data */
+	struct drm_property_blob *hist_blob;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -404,6 +410,29 @@
 	struct sde_crtc_respool rp;
 };
 
+enum sde_crtc_irq_state {
+	IRQ_NOINIT,
+	IRQ_ENABLED,
+	IRQ_DISABLED,
+};
+
+/**
+ * sde_crtc_irq_info - crtc interrupt info
+ * @irq: interrupt callback
+ * @event: event type of the interrupt
+ * @func: function pointer to enable/disable the interrupt
+ * @list: list of user customized event in crtc
+ * @ref_count: reference count for the interrupt
+ */
+struct sde_crtc_irq_info {
+	struct sde_irq_callback irq;
+	u32 event;
+	int (*func)(struct drm_crtc *crtc, bool en,
+			struct sde_irq_callback *irq);
+	struct list_head list;
+	enum sde_crtc_irq_state state;
+};
+
 #define to_sde_crtc_state(x) \
 	container_of(x, struct sde_crtc_state, base)
 
@@ -572,6 +601,30 @@
 }
 
 /**
+ * sde_crtc_is_reset_required - validate the reset request based on the
+ *	pm_suspend and crtc's active status. crtc's are left active
+ *	on pm_suspend during LP1/LP2 states, as the display is still
+ *	left ON. Avoid reset for the subsequent pm_resume in such cases.
+ * @crtc: Pointer to crtc
+ * return: false if in suspend state and crtc active, true otherwise
+ */
+static inline bool sde_crtc_is_reset_required(struct drm_crtc *crtc)
+{
+	/*
+	 * reset is required even when there is no crtc_state as it is required
+	 * to create the initial state object
+	 */
+	if (!crtc || !crtc->state)
+		return true;
+
+	/* reset not required if crtc is active during suspend state */
+	if (sde_kms_is_suspend_state(crtc->dev) && crtc->state->active)
+		return false;
+
+	return true;
+}
+
+/**
  * sde_crtc_event_queue - request event callback
  * @crtc: Pointer to drm crtc structure
  * @func: Pointer to callback function
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 213388d..7c600ca 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -179,6 +179,7 @@
  * @crtc_frame_event_cb_data:	callback handler private data
  * @frame_done_timeout:		frame done timeout in Hz
  * @frame_done_timer:		watchdog timer for frame done event
+ * @vsync_event_timer:		vsync timer
  * @rsc_client:			rsc client pointer
  * @rsc_state_init:		boolean to indicate rsc config init
  * @disp_info:			local copy of msm_display_info struct
@@ -191,6 +192,7 @@
  * @rc_state:			resource controller state
  * @delayed_off_work:		delayed worker to schedule disabling of
  *				clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work:		worker to handle vsync event for autorefresh
  * @topology:                   topology of the display
  * @mode_set_complete:          flag to indicate modeset completion
  * @rsc_config:			rsc configuration for display vtotal, fps, etc.
@@ -224,6 +226,7 @@
 
 	atomic_t frame_done_timeout;
 	struct timer_list frame_done_timer;
+	struct timer_list vsync_event_timer;
 
 	struct sde_rsc_client *rsc_client;
 	bool rsc_state_init;
@@ -236,6 +239,7 @@
 	struct mutex rc_lock;
 	enum sde_enc_rc_states rc_state;
 	struct kthread_delayed_work delayed_off_work;
+	struct kthread_work vsync_event_work;
 	struct msm_display_topology topology;
 	bool mode_set_complete;
 
@@ -2844,6 +2848,165 @@
 		phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len);
 }
 
+static u32 _sde_encoder_calculate_linetime(struct sde_encoder_virt *sde_enc,
+		struct drm_display_mode *mode)
+{
+	u64 pclk_rate;
+	u32 pclk_period;
+	u32 line_time;
+
+	/*
+	 * For linetime calculation, only operate on master encoder.
+	 */
+	if (!sde_enc->cur_master)
+		return 0;
+
+	if (!sde_enc->cur_master->ops.get_line_count) {
+		SDE_ERROR("get_line_count function not defined\n");
+		return 0;
+	}
+
+	pclk_rate = mode->clock; /* pixel clock in kHz */
+	if (pclk_rate == 0) {
+		SDE_ERROR("pclk is 0, cannot calculate line time\n");
+		return 0;
+	}
+
+	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+	if (pclk_period == 0) {
+		SDE_ERROR("pclk period is 0\n");
+		return 0;
+	}
+
+	/*
+	 * Line time calculation based on Pixel clock and HTOTAL.
+	 * Final unit is in ns.
+	 */
+	line_time = (pclk_period * mode->htotal) / 1000;
+	if (line_time == 0) {
+		SDE_ERROR("line time calculation is 0\n");
+		return 0;
+	}
+
+	SDE_DEBUG_ENC(sde_enc,
+			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+			pclk_rate, pclk_period, line_time);
+
+	return line_time;
+}
+
+static int _sde_encoder_wakeup_time(struct drm_encoder *drm_enc,
+		ktime_t *wakeup_time)
+{
+	struct drm_display_mode *mode;
+	struct sde_encoder_virt *sde_enc;
+	u32 cur_line;
+	u32 line_time;
+	u32 vtotal, time_to_vsync;
+	ktime_t cur_time;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (!drm_enc->crtc || !drm_enc->crtc->state) {
+		SDE_ERROR("crtc/crtc state object is NULL\n");
+		return -EINVAL;
+	}
+	mode = &drm_enc->crtc->state->adjusted_mode;
+
+	line_time = _sde_encoder_calculate_linetime(sde_enc, mode);
+	if (!line_time)
+		return -EINVAL;
+
+	cur_line = sde_enc->cur_master->ops.get_line_count(sde_enc->cur_master);
+
+	vtotal = mode->vtotal;
+	if (cur_line >= vtotal)
+		time_to_vsync = line_time * vtotal;
+	else
+		time_to_vsync = line_time * (vtotal - cur_line);
+
+	if (time_to_vsync == 0) {
+		SDE_ERROR("time to vsync should not be zero, vtotal=%d\n",
+				vtotal);
+		return -EINVAL;
+	}
+
+	cur_time = ktime_get();
+	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+	SDE_DEBUG_ENC(sde_enc,
+			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+			cur_line, vtotal, time_to_vsync,
+			ktime_to_ms(cur_time),
+			ktime_to_ms(*wakeup_time));
+	return 0;
+}
+
+static void sde_encoder_vsync_event_handler(unsigned long data)
+{
+	struct drm_encoder *drm_enc = (struct drm_encoder *) data;
+	struct sde_encoder_virt *sde_enc;
+	struct msm_drm_private *priv;
+	struct msm_drm_thread *event_thread;
+	bool autorefresh_enabled = false;
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+			!drm_enc->crtc) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+
+	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+		SDE_ERROR("invalid crtc index\n");
+		return;
+	}
+	event_thread = &priv->event_thread[drm_enc->crtc->index];
+	if (!event_thread) {
+		SDE_ERROR("event_thread not found for crtc:%d\n",
+				drm_enc->crtc->index);
+		return;
+	}
+
+	if (sde_enc->cur_master &&
+		sde_enc->cur_master->ops.is_autorefresh_enabled)
+		autorefresh_enabled =
+			sde_enc->cur_master->ops.is_autorefresh_enabled(
+						sde_enc->cur_master);
+
+	/*
+	 * Queue work to update the vsync event timer
+	 * if autorefresh is enabled.
+	 */
+	SDE_EVT32_VERBOSE(autorefresh_enabled);
+	if (autorefresh_enabled)
+		kthread_queue_work(&event_thread->worker,
+				&sde_enc->vsync_event_work);
+	else
+		del_timer(&sde_enc->vsync_event_timer);
+}
+
+static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+	struct sde_encoder_virt *sde_enc = container_of(work,
+			struct sde_encoder_virt, vsync_event_work);
+	ktime_t wakeup_time;
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid sde encoder\n");
+		return;
+	}
+
+	if (_sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time))
+		return;
+
+	SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
+	mod_timer(&sde_enc->vsync_event_timer,
+			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
 void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -2907,11 +3070,51 @@
 	}
 }
 
-void sde_encoder_kickoff(struct drm_encoder *drm_enc)
+/**
+ * _sde_encoder_reset_ctl_hw - reset h/w configuration for all ctl's associated
+ *	with the specified encoder, and unstage all pipes from it
+ * @encoder:	encoder pointer
+ * Returns: 0 on success
+ */
+static int _sde_encoder_reset_ctl_hw(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
 	unsigned int i;
+	int rc = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	SDE_ATRACE_BEGIN("encoder_release_lm");
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+		if (!phys)
+			continue;
+
+		SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0);
+
+		rc = sde_encoder_helper_reset_mixers(phys, NULL);
+		if (rc)
+			SDE_EVT32(DRMID(drm_enc), rc, SDE_EVTLOG_ERROR);
+	}
+
+	SDE_ATRACE_END("encoder_release_lm");
+	return rc;
+}
+
+void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	ktime_t wakeup_time;
+	unsigned int i;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -2928,6 +3131,10 @@
 	mod_timer(&sde_enc->frame_done_timer, jiffies +
 		((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
 
+	/* create a 'no pipes' commit to release buffers on errors */
+	if (is_error)
+		_sde_encoder_reset_ctl_hw(drm_enc);
+
 	/* All phys encs are ready to go, trigger the kickoff */
 	_sde_encoder_kickoff_phys(sde_enc);
 
@@ -2937,10 +3144,18 @@
 		if (phys && phys->ops.handle_post_kickoff)
 			phys->ops.handle_post_kickoff(phys);
 	}
+
+	if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+			!_sde_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+		SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
+		mod_timer(&sde_enc->vsync_event_timer,
+				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+	}
+
 	SDE_ATRACE_END("encoder_kickoff");
 }
 
-int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
+int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
 		struct drm_framebuffer *fb)
 {
 	struct drm_encoder *drm_enc;
@@ -2957,8 +3172,6 @@
 	memset(&mixer, 0, sizeof(mixer));
 
 	/* reset associated CTL/LMs */
-	if (phys_enc->hw_ctl->ops.clear_pending_flush)
-		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
 	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
 		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
 
@@ -2998,7 +3211,7 @@
 	}
 
 	if (!lm_valid) {
-		SDE_DEBUG_ENC(to_sde_encoder_virt(drm_enc), "lm not found\n");
+		SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc), "lm not found\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -3555,6 +3768,12 @@
 	setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout,
 			(unsigned long) sde_enc);
 
+	if ((disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) &&
+			disp_info->is_primary)
+		setup_timer(&sde_enc->vsync_event_timer,
+				sde_encoder_vsync_event_handler,
+				(unsigned long)sde_enc);
+
 	snprintf(name, SDE_NAME_SIZE, "rsc_enc%u", drm_enc->base.id);
 	sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
 					disp_info->is_primary);
@@ -3568,6 +3787,10 @@
 	kthread_init_delayed_work(&sde_enc->delayed_off_work,
 			sde_encoder_off_work);
 	sde_enc->idle_timeout = IDLE_TIMEOUT;
+
+	kthread_init_work(&sde_enc->vsync_event_work,
+			sde_encoder_vsync_event_work_handler);
+
 	memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
 
 	SDE_DEBUG_ENC(sde_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index bb7f31d..baf59b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -128,8 +128,10 @@
  * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
  *	(i.e. ctl flush and start) immediately.
  * @encoder:	encoder pointer
+ * @is_error:	whether the current commit needs to be aborted and replaced
+ *		with a 'safe' commit
  */
-void sde_encoder_kickoff(struct drm_encoder *encoder);
+void sde_encoder_kickoff(struct drm_encoder *encoder, bool is_error);
 
 /**
  * sde_encoder_wait_for_event - Waits for encoder events
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index f4ac1ea..8813fd2 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -130,6 +130,7 @@
  * @restore:			Restore all the encoder configs.
  * @is_autorefresh_enabled:	provides the autorefresh current
  *                              enable/disable state.
+ * @get_line_count:		Obtain current vertical line count
  */
 
 struct sde_encoder_phys_ops {
@@ -172,6 +173,7 @@
 	void (*prepare_idle_pc)(struct sde_encoder_phys *phys_enc);
 	void (*restore)(struct sde_encoder_phys *phys);
 	bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
+	int (*get_line_count)(struct sde_encoder_phys *phys);
 };
 
 /**
@@ -520,12 +522,12 @@
 		enum sde_intf interface);
 
 /**
- * sde_encoder_helper_hw_release - prepare for h/w reset during disable
+ * sde_encoder_helper_reset_mixers - reset mixers associated with phys enc
  * @phys_enc: Pointer to physical encoder structure
  * @fb: Optional fb for specifying new mixer output resolution, may be NULL
  * Return: Zero on success
  */
-int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
+int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
 		struct drm_framebuffer *fb);
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index fbd3df1..4291098 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -900,6 +900,24 @@
 	_sde_encoder_phys_cmd_connect_te(phys_enc, false);
 }
 
+static int sde_encoder_phys_cmd_get_line_count(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_hw_pingpong *hw_pp;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return -EINVAL;
+
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		return -EINVAL;
+
+	hw_pp = phys_enc->hw_pp;
+	if (!hw_pp->ops.get_line_count)
+		return -EINVAL;
+
+	return hw_pp->ops.get_line_count(hw_pp);
+}
+
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
@@ -1246,6 +1264,7 @@
 	ops->is_autorefresh_enabled =
 			sde_encoder_phys_cmd_is_autorefresh_enabled;
 	ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff;
+	ops->get_line_count = sde_encoder_phys_cmd_get_line_count;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index cc90267..6a4348ba 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -905,6 +905,24 @@
 		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
 }
 
+static int sde_encoder_phys_vid_get_line_count(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	if (!sde_encoder_phys_vid_is_master(phys_enc))
+		return -EINVAL;
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+		return -EINVAL;
+
+	return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
 static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
 {
 	ops->is_master = sde_encoder_phys_vid_is_master;
@@ -925,6 +943,7 @@
 	ops->setup_misr = sde_encoder_phys_vid_setup_misr;
 	ops->collect_misr = sde_encoder_phys_vid_collect_misr;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
+	ops->get_line_count = sde_encoder_phys_vid_get_line_count;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_vid_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 240e521..12115756 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -1139,7 +1139,9 @@
 	}
 
 	/* reset h/w before final flush */
-	if (sde_encoder_helper_hw_release(phys_enc, wb_enc->fb_disable))
+	if (phys_enc->hw_ctl->ops.clear_pending_flush)
+		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+	if (sde_encoder_helper_reset_mixers(phys_enc, wb_enc->fb_disable))
 		goto exit;
 
 	phys_enc->enable_state = SDE_ENC_DISABLING;
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index c013fef..816339b 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -206,9 +206,9 @@
 	/* create fd */
 	fd = get_unused_fd_flags(0);
 	if (fd < 0) {
-		fence_put(&sde_fence->base);
 		SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
 							sde_fence->name);
+		fence_put(&sde_fence->base);
 		goto exit;
 	}
 
@@ -217,8 +217,8 @@
 	if (sync_file == NULL) {
 		put_unused_fd(fd);
 		fd = -EINVAL;
-		fence_put(&sde_fence->base);
 		SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
+		fence_put(&sde_fence->base);
 		goto exit;
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index c8e732a..8e54a2a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -24,6 +24,9 @@
 #define PA_VAL_DSPP_OFF		0x240
 #define PA_CONT_DSPP_OFF	0x244
 
+#define PA_HIST_CTRL_DSPP_OFF	0x4
+#define PA_HIST_DATA_DSPP_OFF	0x400
+
 #define PA_LUTV_DSPP_OFF	0x1400
 #define PA_LUT_SWAP_OFF		0x234
 
@@ -70,6 +73,7 @@
 #define DSPP_OP_PA_CONT_EN	BIT(28)
 #define DSPP_OP_PA_EN		BIT(20)
 #define DSPP_OP_PA_LUTV_EN	BIT(19)
+#define DSPP_OP_PA_HIST_EN	BIT(16)
 #define DSPP_OP_PA_SKIN_EN	BIT(5)
 #define DSPP_OP_PA_FOL_EN	BIT(6)
 #define DSPP_OP_PA_SKY_EN	BIT(7)
@@ -563,3 +567,69 @@
 	i = BIT(0) | ((payload->flags & PGC_8B_ROUND) ? BIT(1) : 0);
 	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base, i);
 }
+
+void sde_setup_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	u32 base, offset;
+	u32 op_mode;
+	bool feature_enabled;
+
+	if (!ctx || !cfg) {
+		DRM_ERROR("invalid parameters ctx %pK cfg %pK", ctx, cfg);
+		return;
+	}
+
+	feature_enabled = *(bool *)cfg;
+	base = ctx->cap->sblk->hist.base;
+	offset = base + PA_HIST_CTRL_DSPP_OFF;
+
+	op_mode = SDE_REG_READ(&ctx->hw, base);
+	if (!feature_enabled) {
+		op_mode &= ~DSPP_OP_PA_HIST_EN;
+		if (PA_DSPP_DISABLE_REQUIRED(op_mode))
+			op_mode &= ~DSPP_OP_PA_EN;
+	} else {
+		op_mode |= DSPP_OP_PA_HIST_EN | DSPP_OP_PA_EN;
+	}
+
+	SDE_REG_WRITE(&ctx->hw, offset, 0);
+	SDE_REG_WRITE(&ctx->hw, base, op_mode);
+}
+
+void sde_read_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct drm_msm_hist *hist_data;
+	u32 offset, offset_ctl;
+	u32 i;
+
+	if (!ctx || !cfg) {
+		DRM_ERROR("invalid parameters ctx %pK cfg %pK", ctx, cfg);
+		return;
+	}
+
+	hist_data = (struct drm_msm_hist *)cfg;
+	offset = ctx->cap->sblk->hist.base + PA_HIST_DATA_DSPP_OFF;
+	offset_ctl = ctx->cap->sblk->hist.base + PA_HIST_CTRL_DSPP_OFF;
+
+	for (i = 0; i < HIST_V_SIZE; i++)
+		hist_data->data[i] = SDE_REG_READ(&ctx->hw, offset + i * 4) &
+					REG_MASK(24);
+
+	/* unlock hist buffer */
+	SDE_REG_WRITE(&ctx->hw, offset_ctl, 0);
+}
+
+void sde_lock_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	u32 offset_ctl;
+
+	if (!ctx) {
+		DRM_ERROR("invalid parameters ctx %pK", ctx);
+		return;
+	}
+
+	offset_ctl = ctx->cap->sblk->hist.base + PA_HIST_CTRL_DSPP_OFF;
+
+	/* lock hist buffer */
+	SDE_REG_WRITE(&ctx->hw, offset_ctl, 1);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
index 4cd2e5a..74018a3 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
@@ -89,4 +89,23 @@
  */
 void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
 
+/**
+ * sde_setup_dspp_hist_v1_7 - setup DSPP histogram feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to histogram control data
+ */
+void sde_setup_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_read_dspp_hist_v1_7 - read DSPP histogram data in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to histogram data
+ */
+void sde_read_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_lock_dspp_hist_v1_7 - lock DSPP histogram buffer in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ */
+void sde_lock_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
 #endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index d30c0ae..36e30b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -90,6 +90,17 @@
 					sde_setup_dspp_pa_vlut_v1_8;
 			}
 			break;
+		case SDE_DSPP_HIST:
+			if (c->cap->sblk->hist.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+				c->ops.setup_histogram =
+				    sde_setup_dspp_hist_v1_7;
+				c->ops.read_histogram =
+				    sde_read_dspp_hist_v1_7;
+				c->ops.lock_histogram =
+				    sde_lock_dspp_hist_v1_7;
+			}
+			break;
 		case SDE_DSPP_GAMUT:
 			if (c->cap->sblk->gamut.version ==
 					SDE_COLOR_PROCESS_VER(0x4, 0)) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 44b3831..4878fc6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -38,6 +38,13 @@
 	void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
 
 	/**
+	 * lock_histogram - lock dspp histogram buffer
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*lock_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
 	 * setup_igc - update dspp igc
 	 * @ctx: Pointer to dspp context
 	 * @cfg: Pointer to configuration
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index 35f1800..fd06c12 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -289,6 +289,18 @@
 	return SDE_REG_READ(c, INTF_MISR_SIGNATURE);
 }
 
+static u32 sde_hw_intf_get_line_count(struct sde_hw_intf *intf)
+{
+	struct sde_hw_blk_reg_map *c;
+
+	if (!intf)
+		return 0;
+
+	c = &intf->hw;
+
+	return SDE_REG_READ(c, INTF_LINE_COUNT);
+}
+
 static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
 		unsigned long cap)
 {
@@ -298,6 +310,7 @@
 	ops->enable_timing = sde_hw_intf_enable_timing_engine;
 	ops->setup_misr = sde_hw_intf_setup_misr;
 	ops->collect_misr = sde_hw_intf_collect_misr;
+	ops->get_line_count = sde_hw_intf_get_line_count;
 	if (cap & BIT(SDE_INTF_ROT_START))
 		ops->setup_rot_start = sde_hw_intf_setup_rot_start;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
index 83e206d..89068bc 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -62,6 +62,7 @@
  * @ get_status: returns if timing engine is enabled or not
  * @ setup_misr: enables/disables MISR in HW register
  * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
  */
 struct sde_hw_intf_ops {
 	void (*setup_timing_gen)(struct sde_hw_intf *intf,
@@ -84,6 +85,8 @@
 			bool enable, u32 frame_count);
 
 	u32 (*collect_misr)(struct sde_hw_intf *intf);
+
+	u32 (*get_line_count)(struct sde_hw_intf *intf);
 };
 
 struct sde_hw_intf {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index d65e8d0..d8f79f1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -302,6 +302,33 @@
 	return 0;
 }
 
+static u32 sde_hw_pp_get_line_count(struct sde_hw_pingpong *pp)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+	u32 height, init;
+	u32 line = 0xFFFF;
+
+	if (!pp)
+		return 0;
+	c = &pp->hw;
+
+	init = SDE_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+	height = SDE_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+	if (height < init)
+		goto line_count_exit;
+
+	line = SDE_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+	if (line < init)
+		line += (0xFFFF - init);
+	else
+		line -= init;
+
+line_count_exit:
+	return line;
+}
+
 static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
 	const struct sde_pingpong_cfg *hw_cap)
 {
@@ -317,6 +344,7 @@
 	ops->disable_dsc = sde_hw_pp_dsc_disable;
 	ops->get_autorefresh = sde_hw_pp_get_autorefresh_config;
 	ops->poll_timeout_wr_ptr = sde_hw_pp_poll_timeout_wr_ptr;
+	ops->get_line_count = sde_hw_pp_get_line_count;
 
 	version = SDE_COLOR_PROCESS_MAJOR(hw_cap->sblk->dither.version);
 	switch (version) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index f0a2054..389b2d2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -64,6 +64,7 @@
  *  @enable_dsc : enables DSC encoder
  *  @disable_dsc : disables DSC encoder
  *  @setup_dither : function to program the dither hw block
+ *  @get_line_count: obtain current vertical line counter
  */
 struct sde_hw_pingpong_ops {
 	/**
@@ -130,6 +131,11 @@
 	 * Program the dither hw block
 	 */
 	int (*setup_dither)(struct sde_hw_pingpong *pp, void *cfg, size_t len);
+
+	/**
+	 * Obtain current vertical line counter
+	 */
+	u32 (*get_line_count)(struct sde_hw_pingpong *pp);
 };
 
 struct sde_hw_pingpong {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index e5f6471..acecf1a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -85,6 +85,7 @@
 #define SSPP_TRAFFIC_SHAPER                0x130
 #define SSPP_CDP_CNTL                      0x134
 #define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_CDP_CNTL_REC1                 0x13c
 #define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
 #define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
 #define SSPP_TRAFFIC_SHAPER_REC1           0x158
@@ -873,10 +874,12 @@
 }
 
 static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cdp_cfg *cfg)
+		struct sde_hw_pipe_cdp_cfg *cfg,
+		enum sde_sspp_multirect_index index)
 {
 	u32 idx;
 	u32 cdp_cntl = 0;
+	u32 cdp_cntl_offset = 0;
 
 	if (!ctx || !cfg)
 		return;
@@ -884,6 +887,13 @@
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
 		return;
 
+	if (index == SDE_SSPP_RECT_0)
+		cdp_cntl_offset = SSPP_CDP_CNTL;
+	else if (index == SDE_SSPP_RECT_1)
+		cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
+	else
+		return;
+
 	if (cfg->enable)
 		cdp_cntl |= BIT(0);
 	if (cfg->ubwc_meta_enable)
@@ -893,7 +903,7 @@
 	if (cfg->preload_ahead == SDE_SSPP_CDP_PRELOAD_AHEAD_64)
 		cdp_cntl |= BIT(3);
 
-	SDE_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+	SDE_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl);
 }
 
 static void _setup_layer_ops(struct sde_hw_pipe *c,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 6e03ab1..d32c9d8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -502,9 +502,11 @@
 	 * setup_cdp - setup client driven prefetch
 	 * @ctx: Pointer to pipe context
 	 * @cfg: Pointer to cdp configuration
+	 * @index: rectangle index in multirect
 	 */
 	void (*setup_cdp)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_cdp_cfg *cfg);
+			struct sde_hw_pipe_cdp_cfg *cfg,
+			enum sde_sspp_multirect_index index);
 
 	/**
 	 * setup_secure_address - setup secureity status of the source address
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index 7864b9f..76f89f4 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -21,6 +21,28 @@
 
 static uint32_t g_sde_irq_status;
 
+void sde_irq_update(struct msm_kms *msm_kms, bool enable)
+{
+	int irq_num;
+	struct sde_kms *sde_kms = to_sde_kms(msm_kms);
+
+	if (!msm_kms || !sde_kms) {
+		SDE_ERROR("invalid kms arguments\n");
+		return;
+	}
+
+	irq_num = platform_get_irq(sde_kms->dev->platformdev, 0);
+	if (irq_num < 0) {
+		SDE_ERROR("invalid irq number\n");
+		return;
+	}
+
+	if (enable)
+		enable_irq(irq_num);
+	else
+		disable_irq(irq_num);
+}
+
 irqreturn_t sde_irq(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.h b/drivers/gpu/drm/msm/sde/sde_irq.h
index e1090071..5bb299a 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_irq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,4 +56,11 @@
  */
 irqreturn_t sde_irq(struct msm_kms *kms);
 
+/**
+ * sde_irq_update - enable/disable IRQ line
+ * @kms:		pointer to kms context
+ * @enable:		enable:true, disable:false
+ */
+void sde_irq_update(struct msm_kms *kms, bool enable);
+
 #endif /* __SDE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index ec8c346..bf06dfb 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -773,7 +773,8 @@
 		.set_power = dsi_display_set_power,
 		.get_mode_info = dsi_conn_get_mode_info,
 		.get_dst_format = dsi_display_get_dst_format,
-		.post_kickoff = dsi_conn_post_kickoff
+		.post_kickoff = dsi_conn_post_kickoff,
+		.check_status = dsi_display_check_status,
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -783,7 +784,8 @@
 		.get_info =     sde_wb_get_info,
 		.soft_reset =   NULL,
 		.get_mode_info = sde_wb_get_mode_info,
-		.get_dst_format = NULL
+		.get_dst_format = NULL,
+		.check_status = NULL,
 	};
 	static const struct sde_connector_ops dp_ops = {
 		.post_init  = dp_connector_post_init,
@@ -793,6 +795,7 @@
 		.get_info   = dp_connector_get_info,
 		.get_mode_info  = dp_connector_get_mode_info,
 		.send_hpd_event = dp_connector_send_hpd_event,
+		.check_status = NULL,
 	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
@@ -1420,6 +1423,12 @@
 	if (!priv)
 		return;
 
+	if (sde_kms->genpd_init) {
+		sde_kms->genpd_init = false;
+		pm_genpd_remove(&sde_kms->genpd);
+		of_genpd_del_provider(pdev->dev.of_node);
+	}
+
 	if (sde_kms->hw_intr)
 		sde_hw_intr_destroy(sde_kms->hw_intr);
 	sde_kms->hw_intr = NULL;
@@ -1993,12 +2002,71 @@
 static void sde_kms_handle_power_event(u32 event_type, void *usr)
 {
 	struct sde_kms *sde_kms = usr;
+	struct msm_kms *msm_kms;
 
+	msm_kms = &sde_kms->base;
 	if (!sde_kms)
 		return;
 
-	if (event_type == SDE_POWER_EVENT_POST_ENABLE)
+	SDE_DEBUG("event_type:%d\n", event_type);
+	SDE_EVT32_VERBOSE(event_type);
+
+	if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
+		sde_irq_update(msm_kms, true);
 		sde_vbif_init_memtypes(sde_kms);
+	} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
+		sde_irq_update(msm_kms, false);
+	}
+}
+
+#define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
+
+static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
+{
+	struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int rc;
+
+	SDE_DEBUG("\n");
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return -EINVAL;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return -EINVAL;
+
+	SDE_EVT32(genpd->device_count);
+
+	rc = sde_power_resource_enable(&priv->phandle, priv->pclient, true);
+
+	return rc;
+}
+
+static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
+{
+	struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int rc;
+
+	SDE_DEBUG("\n");
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return -EINVAL;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return -EINVAL;
+
+	SDE_EVT32(genpd->device_count);
+
+	rc = sde_power_resource_enable(&priv->phandle, priv->pclient, false);
+
+	return rc;
 }
 
 static int sde_kms_hw_init(struct msm_kms *kms)
@@ -2226,12 +2294,41 @@
 	 */
 	sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
 	sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
-			SDE_POWER_EVENT_POST_ENABLE,
+			SDE_POWER_EVENT_POST_ENABLE |
+			SDE_POWER_EVENT_PRE_DISABLE,
 			sde_kms_handle_power_event, sde_kms, "kms");
 
+	/* initialize power domain if defined */
+	if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
+		sde_kms->genpd.name = dev->unique;
+		sde_kms->genpd.power_off = sde_kms_pd_disable;
+		sde_kms->genpd.power_on = sde_kms_pd_enable;
+
+		rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
+		if (rc < 0) {
+			SDE_ERROR("failed to init genpd provider %s: %d\n",
+					sde_kms->genpd.name, rc);
+			goto genpd_err;
+		}
+
+		rc = of_genpd_add_provider_simple(dev->dev->of_node,
+				&sde_kms->genpd);
+		if (rc < 0) {
+			SDE_ERROR("failed to add genpd provider %s: %d\n",
+					sde_kms->genpd.name, rc);
+			pm_genpd_remove(&sde_kms->genpd);
+			goto genpd_err;
+		}
+
+		sde_kms->genpd_init = true;
+		SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
+	}
+
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
 	return 0;
 
+genpd_err:
 drm_obj_init_err:
 	sde_core_perf_destroy(&sde_kms->perf);
 hw_intr_init_err:
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 0ddfb30..aacff78 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -20,6 +20,7 @@
 #define __SDE_KMS_H__
 
 #include <linux/msm_ion.h>
+#include <linux/pm_domain.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
@@ -107,6 +108,9 @@
 #define SDE_KMS_OPS_CLEANUP_PLANE_FB                       BIT(2)
 #define SDE_KMS_OPS_PREPARE_PLANE_FB                       BIT(3)
 
+/* ESD status check interval in miliseconds */
+#define STATUS_CHECK_INTERVAL_MS 5000
+
 /*
  * struct sde_irq_callback - IRQ callback handlers
  * @list: list to callback
@@ -173,6 +177,9 @@
 	int core_rev;
 	struct sde_mdss_cfg *catalog;
 
+	struct generic_pm_domain genpd;
+	bool genpd_init;
+
 	struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
 	struct sde_power_client *core_client;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index f85f2c6..f4672b8 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2467,21 +2467,21 @@
 	msm_framebuffer_cleanup(state->fb, pstate->aspace);
 }
 
-void sde_plane_kickoff(struct drm_plane *plane)
+int sde_plane_kickoff_rot(struct drm_plane *plane)
 {
 	struct sde_plane_state *pstate;
 
 	if (!plane || !plane->state) {
 		SDE_ERROR("invalid plane\n");
-		return;
+		return -EINVAL;
 	}
 
 	pstate = to_sde_plane_state(plane->state);
 
 	if (!pstate->rot.rot_hw || !pstate->rot.rot_hw->ops.commit)
-		return;
+		return 0;
 
-	pstate->rot.rot_hw->ops.commit(pstate->rot.rot_hw,
+	return pstate->rot.rot_hw->ops.commit(pstate->rot.rot_hw,
 			&pstate->rot.rot_cmd,
 			SDE_HW_ROT_CMD_START);
 }
@@ -3603,7 +3603,8 @@
 					SDE_FORMAT_IS_TILE(fmt);
 			cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
 
-			psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg);
+			psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg,
+					pstate->multirect_index);
 		}
 
 		if (psde->pipe_hw->ops.setup_sys_cache) {
@@ -4386,6 +4387,11 @@
 	psde = to_sde_plane(plane);
 	SDE_DEBUG_PLANE(psde, "\n");
 
+	if (plane->state && !sde_crtc_is_reset_required(plane->state->crtc)) {
+		SDE_DEBUG_PLANE(psde, "avoid reset for plane\n");
+		return;
+	}
+
 	/* remove previous state, if present */
 	if (plane->state) {
 		sde_plane_destroy_state(plane, plane->state);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 2e8adfe..d6c5876 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -228,10 +228,11 @@
 void sde_plane_flush(struct drm_plane *plane);
 
 /**
- * sde_plane_kickoff - final plane operations before commit kickoff
+ * sde_plane_kickoff_rot - final plane rotator operations before commit kickoff
  * @plane: Pointer to drm plane structure
+ * Returns: Zero on success
  */
-void sde_plane_kickoff(struct drm_plane *plane);
+int sde_plane_kickoff_rot(struct drm_plane *plane);
 
 /**
  * sde_plane_set_error: enable/disable error condition
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index b8fbcf7..dc16ab1 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -2664,6 +2664,7 @@
 				dump_dbgbus_sde;
 		sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
 				dump_dbgbus_vbif_rt;
+		sde_dbg_base.dump_all = dump_all;
 		schedule_work(&sde_dbg_base.dump_work);
 	} else {
 		_sde_dump_array(blk_arr, blk_len, do_panic, name,
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index 7951c23..3673d125 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -363,12 +363,15 @@
 			if (bytes_read != read_size) {
 				pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
 					offset, read_size, bytes_read);
+				rc = -EIO;
 				break;
 			}
 
 			buf += read_size;
-			offset += read_size;
 			size -= read_size;
+
+			if (!realign)
+				offset += read_size;
 		} while (size > 0);
 	}
 
@@ -393,6 +396,7 @@
 			if (bytes_written != write_size) {
 				pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
 					offset, write_size, bytes_written);
+				rc = -EIO;
 				break;
 			}
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index e112fd1..c2eb9ea 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -764,6 +764,29 @@
 #define A6XX_VBIF_PERF_PWR_CNT_HIGH1            0x3119
 #define A6XX_VBIF_PERF_PWR_CNT_HIGH2            0x311a
 
+/* GBIF registers */
+#define A6XX_GBIF_HALT                    0x3c45
+#define A6XX_GBIF_HALT_ACK                0x3c46
+#define A6XX_GBIF_HALT_MASK               0x1
+
+#define A6XX_GBIF_PERF_PWR_CNT_EN         0x3cc0
+#define A6XX_GBIF_PERF_CNT_SEL            0x3cc2
+#define A6XX_GBIF_PERF_CNT_LOW0           0x3cc4
+#define A6XX_GBIF_PERF_CNT_LOW1           0x3cc5
+#define A6XX_GBIF_PERF_CNT_LOW2           0x3cc6
+#define A6XX_GBIF_PERF_CNT_LOW3           0x3cc7
+#define A6XX_GBIF_PERF_CNT_HIGH0          0x3cc8
+#define A6XX_GBIF_PERF_CNT_HIGH1          0x3cc9
+#define A6XX_GBIF_PERF_CNT_HIGH2          0x3cca
+#define A6XX_GBIF_PERF_CNT_HIGH3          0x3ccb
+#define A6XX_GBIF_PWR_CNT_LOW0            0x3ccc
+#define A6XX_GBIF_PWR_CNT_LOW1            0x3ccd
+#define A6XX_GBIF_PWR_CNT_LOW2            0x3cce
+#define A6XX_GBIF_PWR_CNT_HIGH0           0x3ccf
+#define A6XX_GBIF_PWR_CNT_HIGH1           0x3cd0
+#define A6XX_GBIF_PWR_CNT_HIGH2           0x3cd1
+
+
 /* CX_DBGC_CFG registers */
 #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A                   0x18400
 #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_B                   0x18401
@@ -935,6 +958,7 @@
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
 
 /* GMU RSC control registers */
+#define A6XX_GPU_RSCC_RSC_STATUS0_DRV0		0x23404
 #define A6XX_GMU_RSCC_CONTROL_REQ		0x23B07
 #define A6XX_GMU_RSCC_CONTROL_ACK		0x23B08
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7943745..844142a 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -554,7 +554,13 @@
 	return 0;
 }
 
-
+/**
+ * adreno_irqctrl() - Enables/disables the RBBM interrupt mask
+ * @adreno_dev: Pointer to an adreno_device
+ * @state: 1 for masked or 0 for unmasked
+ * Power: The caller of this function must make sure to use OOBs
+ * so that we know that the GPU is powered on
+ */
 void adreno_irqctrl(struct adreno_device *adreno_dev, int state)
 {
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -599,7 +605,7 @@
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_irq *irq_params = gpudev->irq;
 	irqreturn_t ret = IRQ_NONE;
-	unsigned int status = 0, tmp, int_bit;
+	unsigned int status = 0, fence = 0, tmp, int_bit;
 	int i;
 
 	atomic_inc(&adreno_dev->pending_irq_refcnt);
@@ -614,6 +620,17 @@
 	if (gpudev->gpu_keepalive)
 		gpudev->gpu_keepalive(adreno_dev, true);
 
+	/*
+	 * If the AHB fence is not in ALLOW mode when we receive an RBBM
+	 * interrupt, something went wrong. Set a fault and change the
+	 * fence to ALLOW so we can clear the interrupt.
+	 */
+	adreno_readreg(adreno_dev, ADRENO_REG_GMU_AO_AHB_FENCE_CTRL, &fence);
+	if (fence != 0) {
+		KGSL_DRV_CRIT_RATELIMIT(device, "AHB fence is stuck in ISR\n");
+		return ret;
+	}
+
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
 
 	/*
@@ -1498,9 +1515,9 @@
 
 	/* Send OOB request to turn on the GX */
 	if (gpudev->oob_set) {
-		status = gpudev->oob_set(adreno_dev, OOB_GPUSTART_SET_MASK,
-				OOB_GPUSTART_CHECK_MASK,
-				OOB_GPUSTART_CLEAR_MASK);
+		status = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
+				OOB_GPU_CHECK_MASK,
+				OOB_GPU_CLEAR_MASK);
 		if (status)
 			goto error_mmu_off;
 	}
@@ -1599,17 +1616,28 @@
 				pmqos_active_vote);
 
 	/* Send OOB request to allow IFPC */
-	if (gpudev->oob_clear)
-		gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
+	if (gpudev->oob_clear) {
+		gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
+
+		/* If we made it this far, the BOOT OOB was sent to the GMU */
+		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+			gpudev->oob_clear(adreno_dev,
+					OOB_BOOT_SLUMBER_CLEAR_MASK);
+	}
 
 	return 0;
 
 error_oob_clear:
 	if (gpudev->oob_clear)
-		gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
+		gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
 
 error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
+	if (gpudev->oob_clear &&
+			ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
+	}
 
 error_pwr_off:
 	/* set the state back to original state */
@@ -1667,10 +1695,23 @@
 static int adreno_stop(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	int error = 0;
 
 	if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
 		return 0;
 
+	/* Turn the power on one last time before stopping */
+	if (gpudev->oob_set) {
+		error = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
+				OOB_GPU_CHECK_MASK,
+				OOB_GPU_CLEAR_MASK);
+		if (error) {
+			gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
+			return error;
+		}
+	}
+
 	adreno_set_active_ctxs_null(adreno_dev);
 
 	adreno_dispatcher_stop(adreno_dev);
@@ -1694,6 +1735,19 @@
 	/* Save physical performance counter values before GPU power down*/
 	adreno_perfcounter_save(adreno_dev);
 
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
+
+	/*
+	 * Saving perfcounters will use an OOB to put the GMU into
+	 * active state. Before continuing, we should wait for the
+	 * GMU to return to the lowest idle level. This is
+	 * because some idle level transitions require VBIF and MMU.
+	 */
+	if (gpudev->wait_for_lowest_idle &&
+			gpudev->wait_for_lowest_idle(adreno_dev))
+		return -EINVAL;
+
 	adreno_vbif_clear_pending_transactions(device);
 
 	kgsl_mmu_stop(&device->mmu);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 8349a9f..3118375 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -636,6 +636,9 @@
 	ADRENO_REG_VBIF_XIN_HALT_CTRL0,
 	ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 	ADRENO_REG_VBIF_VERSION,
+	ADRENO_REG_GBIF_HALT,
+	ADRENO_REG_GBIF_HALT_ACK,
+	ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
 	ADRENO_REG_GMU_AO_INTERRUPT_EN,
 	ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
 	ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
@@ -884,6 +887,7 @@
 	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
 				unsigned int arg1, unsigned int arg2);
 	bool (*hw_isidle)(struct adreno_device *);
+	int (*wait_for_lowest_idle)(struct adreno_device *);
 	int (*wait_for_gmu_idle)(struct adreno_device *);
 	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
 				unsigned int fsynr1);
@@ -891,6 +895,8 @@
 	int (*soft_reset)(struct adreno_device *);
 	bool (*gx_is_on)(struct adreno_device *);
 	bool (*sptprac_is_on)(struct adreno_device *);
+	unsigned int (*ccu_invalidate)(struct adreno_device *adreno_dev,
+				unsigned int *cmds);
 };
 
 /**
@@ -1172,6 +1178,12 @@
 		(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
 }
 
+static inline int adreno_is_a630v2(struct adreno_device *adreno_dev)
+{
+	return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A630) &&
+		(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
+}
+
 /*
  * adreno_checkreg_off() - Checks the validity of a register enum
  * @adreno_dev:		Pointer to adreno device
@@ -1795,6 +1807,47 @@
 	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
 }
 
+static inline bool adreno_has_gbif(struct adreno_device *adreno_dev)
+{
+	if (adreno_is_a615(adreno_dev))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * adreno_wait_for_vbif_halt_ack() - wait for VBIF acknowledgment
+ * for given HALT request.
+ * @ack_reg: register offset to wait for acknowledge
+ */
+static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device,
+	int ack_reg)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned long wait_for_vbif;
+	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
+	unsigned int val;
+	int ret = 0;
+
+	/* wait for the transactions to clear */
+	wait_for_vbif = jiffies + msecs_to_jiffies(100);
+	while (1) {
+		adreno_readreg(adreno_dev, ack_reg,
+			&val);
+		if ((val & mask) == mask)
+			break;
+		if (time_after(jiffies, wait_for_vbif)) {
+			KGSL_DRV_ERR(device,
+				"Wait limit reached for VBIF XIN Halt\n");
+			ret = -ETIMEDOUT;
+			break;
+		}
+	}
+
+	return ret;
+}
+
 /**
  * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
  * @device: Pointer to the device whose VBIF pipe is to be cleared
@@ -1805,26 +1858,20 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
-	unsigned int val;
-	unsigned long wait_for_vbif;
 	int ret = 0;
 
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
-	/* wait for the transactions to clear */
-	wait_for_vbif = jiffies + msecs_to_jiffies(100);
-	while (1) {
-		adreno_readreg(adreno_dev,
-			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
-		if ((val & mask) == mask)
-			break;
-		if (time_after(jiffies, wait_for_vbif)) {
-			KGSL_DRV_ERR(device,
-				"Wait limit reached for VBIF XIN Halt\n");
-			ret = -ETIMEDOUT;
-			break;
-		}
+	if (adreno_has_gbif(adreno_dev)) {
+		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask);
+		ret = adreno_wait_for_vbif_halt_ack(device,
+				ADRENO_REG_GBIF_HALT_ACK);
+		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0);
+	} else {
+		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0,
+			mask);
+		ret = adreno_wait_for_vbif_halt_ack(device,
+				ADRENO_REG_VBIF_XIN_HALT_CTRL1);
+		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
 	}
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
 	return ret;
 }
 
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 2d078ba..434fef8 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -61,8 +61,8 @@
 };
 
 static void a5xx_irq_storm_worker(struct work_struct *work);
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
-	uint32_t major, uint32_t minor);
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+	uint32_t id, uint32_t major, uint32_t minor);
 static void a5xx_gpmu_reset(struct work_struct *work);
 static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
 
@@ -678,6 +678,7 @@
 	if (data[1] != GPMU_FIRMWARE_ID)
 		goto err;
 	ret = _read_fw2_block_header(&data[2],
+		data[0] - 2,
 		GPMU_FIRMWARE_ID,
 		adreno_dev->gpucore->gpmu_major,
 		adreno_dev->gpucore->gpmu_minor);
@@ -1200,8 +1201,8 @@
 	kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
 }
 
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
-				uint32_t major, uint32_t minor)
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+			uint32_t id, uint32_t major, uint32_t minor)
 {
 	uint32_t header_size;
 	int i = 1;
@@ -1211,7 +1212,8 @@
 
 	header_size = header[0];
 	/* Headers have limited size and always occur as pairs of words */
-	if (header_size > MAX_HEADER_SIZE || header_size % 2)
+	if (header_size >  MAX_HEADER_SIZE || header_size >= remain ||
+				header_size % 2 || header_size == 0)
 		return -EINVAL;
 	/* Sequences must have an identifying id first thing in their header */
 	if (id == GPMU_SEQUENCE_ID) {
@@ -1306,6 +1308,7 @@
 		/* For now ignore blocks other than the LM sequence */
 		if (block[4] == LM_SEQUENCE_ID) {
 			ret = _read_fw2_block_header(&block[2],
+				block_size - 2,
 				GPMU_SEQUENCE_ID,
 				adreno_dev->gpucore->lm_major,
 				adreno_dev->gpucore->lm_minor);
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index da168dc..7c56ba4 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -50,10 +50,15 @@
 	{0, 0},
 };
 
-static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
-	{ adreno_is_a630, a630_vbif },
+static const struct adreno_vbif_data a615_gbif[] = {
+	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
+	{0, 0},
 };
 
+static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
+	{ adreno_is_a630, a630_vbif },
+	{ adreno_is_a615, a615_gbif },
+};
 
 struct kgsl_hwcg_reg {
 	unsigned int off;
@@ -244,18 +249,6 @@
 	{ A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
 };
 
-static void a6xx_platform_setup(struct adreno_device *adreno_dev)
-{
-	uint64_t addr;
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-
-	/* Calculate SP local and private mem addresses */
-	addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
-	adreno_dev->sp_local_gpuaddr = addr;
-	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
-	gpudev->vbif_xin_halt_ctrl0_mask = A6XX_VBIF_XIN_HALT_CTRL0_MASK;
-}
-
 static void _update_always_on_regs(struct adreno_device *adreno_dev)
 {
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -369,14 +362,33 @@
 	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
 }
 
+#define RBBM_CLOCK_CNTL_ON 0x8AA8AA02
 
 static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	const struct kgsl_hwcg_reg *regs;
+	unsigned int value;
 	int i, j;
 
 	if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
+		on = false;
+
+	if (kgsl_gmu_isenabled(device)) {
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+			on ? 0x00020222 : 0);
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+			on ? 0x00010111 : 0);
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+			on ? 0x00050555 : 0);
+	}
+
+	kgsl_regread(device, A6XX_RBBM_CLOCK_CNTL, &value);
+
+	if (value == RBBM_CLOCK_CNTL_ON && on)
+		return;
+
+	if (value == 0 && !on)
 		return;
 
 	for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
@@ -395,19 +407,12 @@
 	for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
 		kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
 
-	if (kgsl_gmu_isenabled(device)) {
-		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
-			0x00020222);
-		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
-			0x00010111);
-		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
-			0x00050555);
-	}
 	/* Enable SP clock */
 	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
 
 	/* enable top level HWCG */
-	kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL, on ? 0x8AA8AA02 : 0);
+	kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
+		on ? RBBM_CLOCK_CNTL_ON : 0);
 }
 
 #define LM_DEFAULT_LIMIT	6000
@@ -694,7 +699,7 @@
 		*cmds++ = lower_32_bits(gpuaddr);
 		*cmds++ = upper_32_bits(gpuaddr);
 		/* Size is in dwords */
-		*cmds++ = sizeof(a6xx_pwrup_reglist) >> 2;
+		*cmds++ = 0;
 	}
 
 	/* Pad rest of the cmds with 0's */
@@ -888,8 +893,12 @@
  */
 static void _load_gmu_rpmh_ucode(struct kgsl_device *device)
 {
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = &device->gmu;
 
+	/* Disable SDE clock gating */
+	kgsl_gmu_regwrite(device, A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
 	/* Setup RSC PDC handshake for sleep and wakeup */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
 	kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
@@ -909,8 +918,9 @@
 	kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
 	kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
 
-	/* Enable timestamp event */
-	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+	/* Enable timestamp event for v1 only */
+	if (adreno_is_a630v1(adreno_dev))
+		kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
 
 	/* Load RSC sequencer uCode for sleep and wakeup */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
@@ -920,11 +930,11 @@
 	kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
 
 	/* Load PDC sequencer uCode for power up and power down sequence */
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFFBFA1E1);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xE0A4A3A2);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0xE2848382);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xFDBDE4E3);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x00002081);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFEBEA1E1);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xA5A4A3A2);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0x8382A6E0);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xBCE3E284);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x002081FC);
 
 	/* Set TCS commands used by PDC sequence for low power modes */
 	_regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD_ENABLE_BANK, 7);
@@ -1016,6 +1026,15 @@
 }
 
 /*
+ * The lowest 16 bits of this value are the number of XO clock cycles
+ * for main hysteresis. This is the first hysteresis. Here we set it
+ * to 0x5DC cycles, or 78.1 us. The highest 16 bits of this value are
+ * the number of XO clock cycles for short hysteresis. This happens
+ * after main hysteresis. Here we set it to 0xA cycles, or 0.5 us.
+ */
+#define GMU_PWR_COL_HYST 0x000A05DC
+
+/*
  * a6xx_gmu_power_config() - Configure and enable GMU's low power mode
  * setting based on ADRENO feature flags.
  * @device: Pointer to KGSL device
@@ -1046,13 +1065,13 @@
 		/* fall through */
 	case GPU_HW_IFPC:
 		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
-				0x000A0080);
+				GMU_PWR_COL_HYST);
 		kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 				IFPC_ENABLE_MASK);
 		/* fall through */
 	case GPU_HW_SPTP_PC:
 		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
-				0x000A0080);
+				GMU_PWR_COL_HYST);
 		kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 				SPTP_ENABLE_MASK);
 		/* fall through */
@@ -1271,21 +1290,12 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	unsigned int val;
-	bool state;
 
 	if (!kgsl_gmu_isenabled(device))
 		return true;
 
 	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
-	state = !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
-
-	/* If GMU is holding on to the fence then we cannot dump any GX stuff */
-	kgsl_gmu_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
-	if (val)
-		return false;
-
-	return state;
-
+	return !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
 }
 
 /*
@@ -1356,12 +1366,13 @@
 	/* Disable the power counter so that the GMU is not busy */
 	kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
 
-	/* Turn off SPTPRAC before GMU turns off GX */
-	a6xx_sptprac_disable(adreno_dev);
+	/* Turn off SPTPRAC if we own it */
+	if (gmu->idle_level < GPU_HW_SPTP_PC)
+		a6xx_sptprac_disable(adreno_dev);
 
 	if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
-		return ret;
+		goto out;
 	}
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
@@ -1387,6 +1398,9 @@
 		}
 	}
 
+out:
+	/* Make sure the fence is in ALLOW mode */
+	kgsl_gmu_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
 	return ret;
 }
 
@@ -1435,36 +1449,50 @@
 static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = &device->gmu;
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int val;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int ret;
 
-	/* RSC sleep sequence */
-	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+	/* RSC sleep sequence is different on v1 */
+	if (adreno_is_a630v1(adreno_dev))
+		kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
 	wmb();
 
-	if (timed_poll_check(device,
-			A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
-			BIT(0),
-			GPU_START_TIMEOUT,
-			BIT(0))) {
+	if (adreno_is_a630v1(adreno_dev))
+		ret = timed_poll_check(device,
+				A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
+				BIT(0),
+				GPU_START_TIMEOUT,
+				BIT(0));
+	else
+		ret = timed_poll_check(device,
+				A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+				BIT(16),
+				GPU_START_TIMEOUT,
+				BIT(16));
+
+	if (ret) {
 		dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
-		return -EINVAL;
+		return -ETIMEDOUT;
 	}
 
-	/* Read to clear the timestamp */
-	kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
-			&val);
-	kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
-			&val);
+	/* Read to clear the timestamp valid signal. Don't care what we read. */
+	if (adreno_is_a630v1(adreno_dev)) {
+		kgsl_gmu_regread(device,
+				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
+				&ret);
+		kgsl_gmu_regread(device,
+				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
+				&ret);
+	}
+
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
-		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
+			test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
 		kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
 
-	/* FIXME: v2 has different procedure to trigger sequence */
-
 	return 0;
 }
 
@@ -1540,6 +1568,8 @@
 	kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
 
 }
+
+#define GPU_LIMIT_THRESHOLD_ENABLE	BIT(31)
 /*
  * a6xx_gmu_fw_start() - set up GMU and start FW
  * @device: Pointer to KGSL device
@@ -1623,7 +1653,7 @@
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
 		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
 		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
-			lm_limit(adreno_dev));
+			GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
 		isense_cold_trimm(device);
 	}
 
@@ -1720,6 +1750,53 @@
 	return true;
 }
 
+static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned int reg;
+	unsigned long t;
+
+	if (!kgsl_gmu_isenabled(device))
+		return 0;
+
+	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
+	while (!time_after(jiffies, t)) {
+		adreno_read_gmureg(ADRENO_DEVICE(device),
+				ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+
+		/* SPTPRAC PC has the same idle level as IFPC */
+		if ((reg == gmu->idle_level) ||
+				(gmu->idle_level == GPU_HW_SPTP_PC &&
+				reg == GPU_HW_IFPC)) {
+			/* IFPC is not complete until GX is off */
+			if (gmu->idle_level != GPU_HW_IFPC ||
+					!gpudev->gx_is_on(adreno_dev))
+				return 0;
+		}
+
+		/* Wait 100us to reduce unnecessary AHB bus traffic */
+		udelay(100);
+		cond_resched();
+	}
+
+	/* Check one last time */
+	adreno_read_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+	if ((reg == gmu->idle_level) ||
+			(gmu->idle_level == GPU_HW_SPTP_PC &&
+			reg == GPU_HW_IFPC)) {
+		if (gmu->idle_level != GPU_HW_IFPC ||
+				!gpudev->gx_is_on(adreno_dev))
+			return 0;
+	}
+
+	dev_err(&gmu->pdev->dev,
+			"Timeout waiting for lowest idle level: %d\n", reg);
+	return -ETIMEDOUT;
+}
+
 static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -2690,6 +2767,27 @@
 		A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
 };
 
+
+static struct adreno_perfcount_register a6xx_perfcounters_gbif[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW0,
+		A6XX_GBIF_PERF_CNT_HIGH0, -1, A6XX_GBIF_PERF_CNT_SEL },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW1,
+		A6XX_GBIF_PERF_CNT_HIGH1, -1, A6XX_GBIF_PERF_CNT_SEL },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW2,
+		A6XX_GBIF_PERF_CNT_HIGH2, -1, A6XX_GBIF_PERF_CNT_SEL },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW3,
+		A6XX_GBIF_PERF_CNT_HIGH3, -1, A6XX_GBIF_PERF_CNT_SEL },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW0,
+		A6XX_GBIF_PWR_CNT_HIGH0, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW1,
+		A6XX_GBIF_PWR_CNT_HIGH1, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW2,
+		A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
+};
+
 static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
 	{ KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
@@ -2800,6 +2898,49 @@
 	return 0;
 }
 
+static void a6xx_platform_setup(struct adreno_device *adreno_dev)
+{
+	uint64_t addr;
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+	/* Calculate SP local and private mem addresses */
+	addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
+	adreno_dev->sp_local_gpuaddr = addr;
+	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
+
+	if (adreno_has_gbif(adreno_dev)) {
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs =
+				a6xx_perfcounters_gbif;
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
+				= ARRAY_SIZE(a6xx_perfcounters_gbif);
+
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
+				a6xx_perfcounters_gbif_pwr;
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
+				= ARRAY_SIZE(a6xx_perfcounters_gbif_pwr);
+
+		gpudev->vbif_xin_halt_ctrl0_mask =
+				A6XX_GBIF_HALT_MASK;
+	} else
+		gpudev->vbif_xin_halt_ctrl0_mask =
+				A6XX_VBIF_XIN_HALT_CTRL0_MASK;
+}
+
+
+static unsigned int a6xx_ccu_invalidate(struct adreno_device *adreno_dev,
+	unsigned int *cmds)
+{
+	/* CCU_INVALIDATE_DEPTH */
+	*cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
+	*cmds++ = 24;
+
+	/* CCU_INVALIDATE_COLOR */
+	*cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
+	*cmds++ = 25;
+
+	return 4;
+}
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
@@ -2859,10 +3000,14 @@
 				A6XX_VBIF_XIN_HALT_CTRL0),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 				A6XX_VBIF_XIN_HALT_CTRL1),
+	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
+	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_GMU_ALWAYS_ON_COUNTER_L),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
 				A6XX_GMU_ALWAYS_ON_COUNTER_H),
+	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
+				A6XX_GMU_AO_AHB_FENCE_CTRL),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
 				A6XX_GMU_AO_INTERRUPT_EN),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
@@ -2943,6 +3088,7 @@
 	.gpu_keepalive = a6xx_gpu_keepalive,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
+	.wait_for_lowest_idle = a6xx_wait_for_lowest_idle,
 	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
 	.iommu_fault_block = a6xx_iommu_fault_block,
 	.reset = a6xx_reset,
@@ -2956,4 +3102,5 @@
 	.preemption_context_destroy = a6xx_preemption_context_destroy,
 	.gx_is_on = a6xx_gx_is_on,
 	.sptprac_is_on = a6xx_sptprac_is_on,
+	.ccu_invalidate = a6xx_ccu_invalidate,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 880ee13..e865f20 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -557,6 +557,7 @@
 	const unsigned int *regs;
 	unsigned int count;
 	const struct sel_reg *sel;
+	uint64_t offset;
 } a6xx_reg_list[] = {
 	{ a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
 	{ a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
@@ -589,7 +590,7 @@
 	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
 	struct reg_list *regs = (struct reg_list *)priv;
 	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
-	unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
+	unsigned int *src;
 	unsigned int j, k;
 	unsigned int count = 0;
 
@@ -602,6 +603,7 @@
 		return 0;
 	}
 
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
 	remain -= sizeof(*header);
 
 	for (j = 0; j < regs->count; j++) {
@@ -1357,6 +1359,7 @@
 		struct kgsl_snapshot *snapshot)
 {
 	int i;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
 		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
@@ -1447,9 +1450,12 @@
 			(void *) &a6xx_dbgc_debugbus_blocks[i]);
 	}
 
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUGBUS,
-			snapshot, a6xx_snapshot_vbif_debugbus_block,
-			(void *) &a6xx_vbif_debugbus_blocks);
+	/* Skip if GPU has GBIF */
+	if (!adreno_has_gbif(adreno_dev))
+		kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+				snapshot, a6xx_snapshot_vbif_debugbus_block,
+				(void *) &a6xx_vbif_debugbus_blocks);
 
 	if (a6xx_cx_dbgc) {
 		for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
@@ -1573,6 +1579,12 @@
 	bool sptprac_on;
 	unsigned int i;
 
+	/* Make sure the fence is in ALLOW mode so registers can be read */
+	kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+	/* GMU TCM data dumped through AHB */
+	a6xx_snapshot_gmu(adreno_dev, snapshot);
+
 	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
 
 	/* Return if the GX is off */
@@ -1584,9 +1596,10 @@
 		snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
 
 	/* Dump vbif registers as well which get affected by crash dumper */
-	adreno_snapshot_vbif_registers(device, snapshot,
-		a6xx_vbif_snapshot_registers,
-		ARRAY_SIZE(a6xx_vbif_snapshot_registers));
+	if (!adreno_has_gbif(adreno_dev))
+		adreno_snapshot_vbif_registers(device, snapshot,
+			a6xx_vbif_snapshot_registers,
+			ARRAY_SIZE(a6xx_vbif_snapshot_registers));
 
 	/* Try to run the crash dumper */
 	if (sptprac_on)
@@ -1919,6 +1932,8 @@
 	for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
 		struct reg_list *regs = &a6xx_reg_list[i];
 
+		regs->offset = offset;
+
 		/* Program the SEL_CNTL_CD register appropriately */
 		if (regs->sel) {
 			*ptr++ = regs->sel->val;
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 0da4da9..9ea8069 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -28,6 +28,20 @@
 /* offset of enable register from select register */
 #define VBIF2_PERF_EN_REG_SEL_OFF 16
 
+/* offset of clear register from select register for GBIF */
+#define GBIF_PERF_CLR_REG_SEL_OFF 1
+
+/* offset of enable register from select register for GBIF*/
+#define GBIF_PERF_EN_REG_SEL_OFF  2
+
+/* offset of clear register from the power enable register for GBIF*/
+#define GBIF_PWR_CLR_REG_EN_OFF    1
+
+/* */
+#define GBIF_PERF_RMW_MASK   0xFF
+/* */
+#define GBIF_PWR_RMW_MASK    0x10000
+
 /* offset of clear register from the enable register */
 #define VBIF2_PERF_PWR_CLR_REG_EN_OFF 8
 
@@ -160,10 +174,15 @@
 	struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
 	struct adreno_perfcount_group *group;
 	unsigned int counter, groupid;
+	int ret;
 
 	if (counters == NULL)
 		return;
 
+	ret = adreno_perfcntr_active_oob_get(adreno_dev);
+	if (ret)
+		return;
+
 	for (groupid = 0; groupid < counters->group_count; groupid++) {
 		group = &(counters->groups[groupid]);
 
@@ -183,6 +202,8 @@
 								counter);
 		}
 	}
+
+	adreno_perfcntr_active_oob_put(adreno_dev);
 }
 
 static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
@@ -612,14 +633,41 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_perfcount_register *reg;
+	unsigned int shift = counter << 3;
 
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs[counter];
-	/* Write 1, followed by 0 to CLR register for clearing the counter */
-	kgsl_regwrite(device, reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 1);
-	kgsl_regwrite(device, reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 0);
-	kgsl_regwrite(device, reg->select, countable & VBIF2_PERF_CNT_SEL_MASK);
-	/* enable reg is 8 DWORDS before select reg */
-	kgsl_regwrite(device, reg->select - VBIF2_PERF_EN_REG_SEL_OFF, 1);
+
+	if (adreno_has_gbif(adreno_dev)) {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
+			1 << counter, 1);
+		kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
+			1 << counter, 0);
+		/* select the desired countable */
+		kgsl_regrmw(device, reg->select,
+			GBIF_PERF_RMW_MASK << shift, countable << shift);
+		/* enable counter */
+		kgsl_regrmw(device, reg->select - GBIF_PERF_EN_REG_SEL_OFF,
+			1 << counter, 1);
+
+	} else {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regwrite(device,
+			reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 1);
+		kgsl_regwrite(device,
+			reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 0);
+		kgsl_regwrite(device,
+			reg->select, countable & VBIF2_PERF_CNT_SEL_MASK);
+		/* enable reg is 8 DWORDS before select reg */
+		kgsl_regwrite(device,
+			reg->select - VBIF2_PERF_EN_REG_SEL_OFF, 1);
+	}
 	reg->value = 0;
 }
 
@@ -630,10 +678,30 @@
 	struct adreno_perfcount_register *reg;
 
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs[counter];
-	/* Write 1, followed by 0 to CLR register for clearing the counter */
-	kgsl_regwrite(device, reg->select + VBIF2_PERF_PWR_CLR_REG_EN_OFF, 1);
-	kgsl_regwrite(device, reg->select + VBIF2_PERF_PWR_CLR_REG_EN_OFF, 0);
-	kgsl_regwrite(device, reg->select, 1);
+
+	if (adreno_has_gbif(adreno_dev)) {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
+			GBIF_PWR_RMW_MASK << counter, 1);
+		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
+			GBIF_PWR_RMW_MASK << counter, 0);
+		/* Enable the counter */
+		kgsl_regrmw(device, reg->select,
+			GBIF_PWR_RMW_MASK << counter, 1);
+	} else {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regwrite(device, reg->select +
+			VBIF2_PERF_PWR_CLR_REG_EN_OFF, 1);
+		kgsl_regwrite(device, reg->select +
+			VBIF2_PERF_PWR_CLR_REG_EN_OFF, 0);
+		kgsl_regwrite(device, reg->select, 1);
+	}
 	reg->value = 0;
 }
 
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 09c1ae6..d248479 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -83,10 +83,12 @@
 /*
  * Wait time before trying to write the register again.
  * Hopefully the GMU has finished waking up during this delay.
+ * This delay must be less than the IFPC main hysteresis or
+ * the GMU will start shutting down before we try again.
  */
-#define GMU_WAKEUP_DELAY 50
+#define GMU_WAKEUP_DELAY 20
 /* Max amount of tries to wake up the GMU. */
-#define GMU_WAKEUP_RETRY_MAX 20
+#define GMU_WAKEUP_RETRY_MAX 60
 
 /*
  * Check the WRITEDROPPED0 bit in the
@@ -877,6 +879,9 @@
 	if (gpudev->set_marker)
 		dwords += 4;
 
+	if (gpudev->ccu_invalidate)
+		dwords += 4;
+
 	link = kcalloc(dwords, sizeof(unsigned int), GFP_KERNEL);
 	if (!link) {
 		ret = -ENOMEM;
@@ -930,6 +935,9 @@
 		}
 	}
 
+	if (gpudev->ccu_invalidate)
+		cmds += gpudev->ccu_invalidate(adreno_dev, cmds);
+
 	if (gpudev->set_marker)
 		cmds += gpudev->set_marker(cmds, 0);
 
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index b06aa98..2d001af6 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -165,7 +165,6 @@
 
 	if (test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
 		kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
-		adreno_irqctrl(adreno_dev, 1);
 	} else if (device->state == KGSL_STATE_INIT) {
 		ret = -EACCES;
 		change_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 2df71e4..78ef8e5 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -505,9 +505,10 @@
 	unsigned int len;
 
 	len = cmd_db_get_aux_data_len(res_id);
+	if (len == 0)
+		return -EINVAL;
 
 	if (len > (MAX_GX_LEVELS << 1)) {
-		/* CmdDB VLVL table size in bytes is too large */
 		dev_err(&gmu->pdev->dev,
 			"gfx cmddb size %d larger than alloc buf %d of %s\n",
 			len, (MAX_GX_LEVELS << 1), res_id);
@@ -515,8 +516,16 @@
 	}
 
 	cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
-	for (arc->num = 1; arc->num <= MAX_GX_LEVELS; arc->num++) {
-		if (arc->num == MAX_GX_LEVELS ||
+
+	/*
+	 * cmd_db_get_aux_data() gives us a zero-padded table of
+	 * size len that contains the arc values. To determine the
+	 * number of arc values, we loop through the table and count
+	 * them until we get to the end of the buffer or hit the
+	 * zero padding.
+	 */
+	for (arc->num = 1; arc->num <= len; arc->num++) {
+		if (arc->num == len ||
 				arc->val[arc->num - 1] >= arc->val[arc->num])
 			break;
 	}
@@ -1149,7 +1158,6 @@
 		goto error;
 
 	gmu->num_gpupwrlevels = pwr->num_pwrlevels;
-	gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
 
 	for (i = 0; i < gmu->num_gpupwrlevels; i++) {
 		int j = gmu->num_gpupwrlevels - 1 - i;
@@ -1411,11 +1419,8 @@
 		if (ret)
 			goto error_gmu;
 
-		/* Send default DCVS level */
-		ret = gmu_dcvs_set(gmu, pwr->default_pwrlevel,
-				pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
-		if (ret)
-			goto error_gmu;
+		/* Request default DCVS level */
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->default_pwrlevel);
 
 		msm_bus_scale_client_update_request(gmu->pcl, 0);
 		break;
@@ -1435,12 +1440,7 @@
 		if (ret)
 			goto error_gmu;
 
-		ret = gmu_dcvs_set(gmu, gmu->wakeup_pwrlevel,
-				pwr->pwrlevels[gmu->wakeup_pwrlevel].bus_freq);
-		if (ret)
-			goto error_gmu;
-
-		gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->default_pwrlevel);
 		break;
 
 	case KGSL_STATE_RESET:
@@ -1462,11 +1462,8 @@
 				goto error_gmu;
 
 			/* Send DCVS level prior to reset*/
-			ret = gmu_dcvs_set(gmu, pwr->active_pwrlevel,
-					pwr->pwrlevels[pwr->active_pwrlevel]
-					.bus_freq);
-			if (ret)
-				goto error_gmu;
+			kgsl_pwrctrl_pwrlevel_change(device,
+				pwr->default_pwrlevel);
 
 			ret = gpudev->oob_set(adreno_dev,
 				OOB_CPINIT_SET_MASK,
@@ -1480,10 +1477,6 @@
 		break;
 	}
 
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
-		gpudev->oob_clear(adreno_dev,
-				OOB_BOOT_SLUMBER_CLEAR_MASK);
-
 	return ret;
 
 error_gmu:
@@ -1491,44 +1484,23 @@
 	return ret;
 }
 
-#define GMU_IDLE_TIMEOUT	10 /* ms */
-
 /* Caller shall ensure GPU is ready for SLUMBER */
 void gmu_stop(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = &device->gmu;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	unsigned long t;
-	bool idle = false;
-	unsigned int reg;
+	bool idle = true;
 
 	if (!test_bit(GMU_CLK_ON, &gmu->flags))
 		return;
 
-	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
-	while (!time_after(jiffies, t)) {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
-		if (reg == device->gmu.idle_level) {
-			idle = true;
-			break;
-		}
-		/* Wait 100us to reduce unnecessary AHB bus traffic */
-		udelay(100);
-		cond_resched();
-	}
-
-	/* Double check one last time */
-	if (idle == false) {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
-		if (reg == device->gmu.idle_level)
-			idle = true;
-	}
+	/* Wait for the lowest idle level we requested */
+	if (gpudev->wait_for_lowest_idle &&
+			gpudev->wait_for_lowest_idle(adreno_dev))
+		idle = false;
 
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
-
 	if (!idle || (gpudev->wait_for_gmu_idle &&
 			gpudev->wait_for_gmu_idle(adreno_dev))) {
 		dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index ff65f66..fc6bafa 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -56,6 +56,9 @@
 #define GPUBUSYIGNAHB		BIT(23)
 #define CXGXCPUBUSYIGNAHB	BIT(30)
 
+/* GMU timeouts */
+#define GMU_IDLE_TIMEOUT        10 /* ms */
+
 /* Constants for GMU OOBs */
 #define OOB_BOOT_OPTION         0
 #define OOB_SLUMBER_OPTION      1
@@ -73,16 +76,17 @@
 #define OOB_PERFCNTR_SET_MASK		BIT(17)
 #define OOB_PERFCNTR_CHECK_MASK		BIT(25)
 #define OOB_PERFCNTR_CLEAR_MASK		BIT(25)
-#define OOB_GPUSTART_SET_MASK		BIT(18)
-#define OOB_GPUSTART_CHECK_MASK		BIT(26)
-#define OOB_GPUSTART_CLEAR_MASK		BIT(26)
+#define OOB_GPU_SET_MASK		BIT(18)
+#define OOB_GPU_CHECK_MASK		BIT(26)
+#define OOB_GPU_CLEAR_MASK		BIT(26)
 
 /* Bits for the flags field in the gmu structure */
 enum gmu_flags {
 	GMU_BOOT_INIT_DONE = 0,
 	GMU_CLK_ON = 1,
 	GMU_HFI_ON = 2,
-	GMU_FAULT = 3
+	GMU_FAULT = 3,
+	GMU_DCVS_REPLAY = 4,
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 68e0f3a..2cc60b5 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -278,8 +278,7 @@
 	int rc = 0;
 	struct pending_msg msg;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&init_msg,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &init_msg.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -309,8 +308,7 @@
 	int rc = 0;
 	struct pending_msg msg;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&fw_ver,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &fw_ver.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -346,8 +344,7 @@
 		lmconfig.lm_enable_bitmask =
 			(1 << (gmu->lm_dcvs_level + 1)) - 1;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *) &lmconfig,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &lmconfig.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -388,8 +385,7 @@
 
 	}
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&dcvstbl,
-			msg_size, &msg);
+	rc = hfi_send_msg(gmu, &dcvstbl.hdr, msg_size, &msg);
 	if (rc)
 		return rc;
 
@@ -441,8 +437,7 @@
 					gmu->rpmh_votes.cnoc_votes.
 					cmd_data[i][j];
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *) &bwtbl,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &bwtbl.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -454,6 +449,22 @@
 	return rc;
 }
 
+static int hfi_send_test(struct gmu_device *gmu)
+{
+	struct hfi_test_cmd test_msg = {
+		.hdr = {
+			.id = H2F_MSG_TEST,
+			.size = sizeof(test_msg) >> 2,
+			.type = HFI_MSG_CMD,
+		},
+	};
+	uint32_t msg_size_dwords = (sizeof(test_msg)) >> 2;
+	struct pending_msg msg;
+
+	return hfi_send_msg(gmu, (struct hfi_msg_hdr *)&test_msg.hdr,
+			msg_size_dwords, &msg);
+}
+
 int hfi_send_dcvs_vote(struct gmu_device *gmu, uint32_t perf_idx,
 		uint32_t bw_idx, enum rpm_ack_type ack_type)
 {
@@ -478,8 +489,7 @@
 	int rc = 0;
 	struct pending_msg msg;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&dcvs_cmd,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &dcvs_cmd.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -511,8 +521,7 @@
 	if (init_perf_idx >= MAX_GX_LEVELS || init_bw_idx >= MAX_GX_LEVELS)
 		return -EINVAL;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *) &slumber_cmd,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &slumber_cmd.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -614,12 +623,19 @@
 
 		result = hfi_send_lmconfig(gmu);
 		if (result) {
-			dev_err(dev, "Failire enabling limits management (%d)\n",
-				result);
+			dev_err(dev, "Failure enabling LM (%d)\n",
+					result);
 			return result;
 		}
 	}
 
+	/* Tell the GMU we are sending no more HFIs until the next boot */
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
+		result = hfi_send_test(gmu);
+		if (result)
+			return result;
+	}
+
 	set_bit(GMU_HFI_ON, &gmu->flags);
 	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 47d07d9..191987e 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -228,6 +228,10 @@
 	uint32_t ddr_cmd_data[MAX_GX_LEVELS][MAX_BW_CMDS];
 };
 
+struct hfi_test_cmd {
+	struct hfi_msg_hdr hdr;
+};
+
 struct arc_vote_desc {
 	/* In case of GPU freq vote, primary is GX, secondary is MX
 	 * in case of GMU freq vote, primary is CX, secondary is MX
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 007121c..97e5c22 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -244,12 +244,9 @@
 	/* GMU scales GPU freq */
 	if (kgsl_gmu_isenabled(device)) {
 		/* If GMU has not been started, save it */
-		if (!(gmu->flags & GMU_HFI_ON)) {
-			/* In slumber the clock is off so we are done */
-			if (pwrlevel == (gmu->num_gpupwrlevels - 1))
-				return 0;
-
-			gmu->wakeup_pwrlevel = pwrlevel;
+		if (!test_bit(GMU_HFI_ON, &gmu->flags)) {
+			/* store clock change request */
+			set_bit(GMU_DCVS_REPLAY, &gmu->flags);
 			return 0;
 		}
 
@@ -259,6 +256,8 @@
 			return -EINVAL;
 		}
 		ret = gmu_dcvs_set(gmu, pwrlevel, INVALID_DCVS_IDX);
+		/* indicate actual clock  change */
+		clear_bit(GMU_DCVS_REPLAY, &gmu->flags);
 	} else
 		/* Linux clock driver scales GPU freq */
 		ret = clk_set_rate(pwr->grp_clks[0], pl->gpu_freq);
@@ -412,7 +411,8 @@
 	 */
 	kgsl_pwrctrl_set_thermal_cycle(pwr, new_level);
 
-	if (new_level == old_level)
+	if (new_level == old_level &&
+		!test_bit(GMU_DCVS_REPLAY, &device->gmu.flags))
 		return;
 
 	kgsl_pwrscale_update_stats(device);
@@ -2485,6 +2485,9 @@
 static void kgsl_pwrctrl_disable(struct kgsl_device *device)
 {
 	if (kgsl_gmu_isenabled(device)) {
+		struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+		pwr->active_pwrlevel = pwr->num_pwrlevels - 1;
 		kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
 		return gmu_stop(device);
 	}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7264381..7e9999b 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -448,8 +448,8 @@
 		int stretch = (i < (num - 1));
 		dma_cookie_t tx_cookie, rx_cookie;
 		struct msm_gpi_tre *go_t = &gi2c->go_t;
-		struct device *rx_dev = gi2c->dev;
-		struct device *tx_dev = gi2c->dev;
+		struct device *rx_dev = gi2c->wrapper_dev;
+		struct device *tx_dev = gi2c->wrapper_dev;
 
 		gi2c->cur = &msgs[i];
 		if (!gi2c->cfg_sent) {
@@ -480,9 +480,8 @@
 
 		if (msgs[i].flags & I2C_M_RD) {
 			sg_init_table(&gi2c->rx_sg, 1);
-			gi2c->rx_ph = dma_map_single(rx_dev, msgs[i].buf,
-						     msgs[i].len,
-						     DMA_FROM_DEVICE);
+			geni_se_iommu_map_buf(rx_dev, &gi2c->rx_ph, msgs[i].buf,
+						msgs[i].len, DMA_FROM_DEVICE);
 			gi2c->rx_t.dword[0] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->rx_ph);
 			gi2c->rx_t.dword[1] =
@@ -512,9 +511,8 @@
 			rx_cookie = dmaengine_submit(gi2c->rx_desc);
 			dma_async_issue_pending(gi2c->rx_c);
 		} else {
-			gi2c->tx_ph = dma_map_single(tx_dev, msgs[i].buf,
-						     msgs[i].len,
-						     DMA_TO_DEVICE);
+			geni_se_iommu_map_buf(tx_dev, &gi2c->tx_ph, msgs[i].buf,
+						msgs[i].len, DMA_TO_DEVICE);
 			gi2c->tx_t.dword[0] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->tx_ph);
 			gi2c->tx_t.dword[1] =
@@ -547,11 +545,11 @@
 
 		timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
 		if (msgs[i].flags & I2C_M_RD)
-			dma_unmap_single(rx_dev, gi2c->rx_ph, msgs[i].len,
-					 DMA_FROM_DEVICE);
+			geni_se_iommu_unmap_buf(rx_dev, &gi2c->rx_ph,
+				msgs[i].len, DMA_FROM_DEVICE);
 		else
-			dma_unmap_single(tx_dev, gi2c->tx_ph, msgs[i].len,
-					 DMA_TO_DEVICE);
+			geni_se_iommu_unmap_buf(tx_dev, &gi2c->tx_ph,
+				msgs[i].len, DMA_TO_DEVICE);
 
 		if (!timeout) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 357bfb2..02dfbf8 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -22,6 +22,7 @@
 #include <linux/regmap.h>
 #include <linux/delay.h>
 #include <linux/qpnp/qpnp-revid.h>
+#include <linux/power_supply.h>
 
 #define FG_ADC_RR_EN_CTL			0x46
 #define FG_ADC_RR_SKIN_TEMP_LSB			0x50
@@ -192,8 +193,7 @@
 #define FG_RR_ADC_STS_CHANNEL_READING_MASK	0x3
 #define FG_RR_ADC_STS_CHANNEL_STS		0x2
 
-#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US	50000
-#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US	51000
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS	50
 #define FG_RR_CONV_MAX_RETRY_CNT		50
 #define FG_RR_TP_REV_VERSION1		21
 #define FG_RR_TP_REV_VERSION2		29
@@ -235,6 +235,7 @@
 	struct device_node		*revid_dev_node;
 	struct pmic_revid_data		*pmic_fab_id;
 	int volt;
+	struct power_supply		*usb_trig;
 };
 
 struct rradc_channels {
@@ -726,6 +727,24 @@
 	return rc;
 }
 
+static bool rradc_is_usb_present(struct rradc_chip *chip)
+{
+	union power_supply_propval pval;
+	int rc;
+	bool usb_present = false;
+
+	if (!chip->usb_trig) {
+		pr_debug("USB property not present\n");
+		return usb_present;
+	}
+
+	rc = power_supply_get_property(chip->usb_trig,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	usb_present = (rc < 0) ? 0 : pval.intval;
+
+	return usb_present;
+}
+
 static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
 		struct rradc_chan_prop *prop, u8 *buf, u16 status)
 {
@@ -745,8 +764,18 @@
 			(retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
 		pr_debug("%s is not ready; nothing to read:0x%x\n",
 			rradc_chans[prop->channel].datasheet_name, buf[0]);
-		usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
-				FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+
+		if (((prop->channel == RR_ADC_CHG_TEMP) ||
+			(prop->channel == RR_ADC_SKIN_TEMP) ||
+			(prop->channel == RR_ADC_USBIN_I) ||
+			(prop->channel == RR_ADC_DIE_TEMP)) &&
+					((!rradc_is_usb_present(chip)))) {
+			pr_debug("USB not present for %d\n", prop->channel);
+			rc = -ENODATA;
+			break;
+		}
+
+		msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS);
 		retry_cnt++;
 		rc = rradc_read(chip, status, buf, 1);
 		if (rc < 0) {
@@ -764,7 +793,7 @@
 static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
 			struct rradc_chan_prop *prop, u8 *buf)
 {
-	int rc = 0;
+	int rc = 0, ret = 0;
 	u16 status = 0;
 
 	rc = rradc_enable_continuous_mode(chip);
@@ -777,23 +806,25 @@
 	rc = rradc_read(chip, status, buf, 1);
 	if (rc < 0) {
 		pr_err("status read failed:%d\n", rc);
-		return rc;
+		ret = rc;
+		goto disable;
 	}
 
 	rc = rradc_check_status_ready_with_retry(chip, prop,
 						buf, status);
 	if (rc < 0) {
 		pr_err("Status read failed:%d\n", rc);
-		return rc;
+		ret = rc;
 	}
 
+disable:
 	rc = rradc_disable_continuous_mode(chip);
 	if (rc < 0) {
 		pr_err("Failed to switch to non continuous mode\n");
-		return rc;
+		ret = rc;
 	}
 
-	return rc;
+	return ret;
 }
 
 static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
@@ -1152,6 +1183,10 @@
 	indio_dev->channels = chip->iio_chans;
 	indio_dev->num_channels = chip->nchannels;
 
+	chip->usb_trig = power_supply_get_by_name("usb");
+	if (!chip->usb_trig)
+		pr_debug("Error obtaining usb power supply\n");
+
 	return devm_iio_device_register(dev, indio_dev);
 }
 
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 49c6ea6..a0e1f59 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1713,6 +1713,9 @@
 		quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
 	if (is_iommu_pt_coherent(smmu_domain))
 		quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
+	if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
+		(smmu->model == QCOM_SMMUV500))
+		quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
 
 	tlb = &arm_smmu_gather_ops;
 	if (smmu->options & ARM_SMMU_OPT_QCOM_MMU500_ERRATA1)
@@ -2393,6 +2396,8 @@
 	if (ret)
 		return ret;
 
+	arm_smmu_secure_domain_lock(smmu_domain);
+
 	__saved_iova_start = iova;
 	idx_start = idx_end = 0;
 	sg_start = sg_end = sg;
@@ -2430,6 +2435,7 @@
 		arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
 		iova = __saved_iova_start;
 	}
+	arm_smmu_secure_domain_unlock(smmu_domain);
 	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
 	return iova - __saved_iova_start;
 }
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index dde2876..a3594d2 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -1011,6 +1011,11 @@
 		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+	else if ((cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
+		(cfg->quirks & IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE))
+		reg = (ARM_LPAE_TCR_SH_NS << ARM_LPAE_TCR_SH0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
 	else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
 		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index a686ad0..b35016e 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -81,6 +81,12 @@
 	 *
 	 * IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT: Set the page table as
 	 *	coherent.
+	 *
+	 * IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE:
+	 *	Having page tables which are non coherent, but cached in a
+	 *	system cache requires SH=Non-Shareable. This applies to the
+	 *	qsmmuv500 model. For data buffers SH=Non-Shareable is not
+	 *	required.
 	 */
 	#define IO_PGTABLE_QUIRK_ARM_NS		BIT(0)
 	#define IO_PGTABLE_QUIRK_NO_PERMS	BIT(1)
@@ -88,6 +94,7 @@
 	#define IO_PGTABLE_QUIRK_ARM_MTK_4GB	BIT(3)
 	#define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT	BIT(4)
 	#define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT BIT(5)
+	#define IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE BIT(6)
 	unsigned long			quirks;
 	unsigned long			pgsize_bitmap;
 	unsigned int			ias;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 779001e..01f9435 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -132,6 +132,45 @@
 }
 #endif
 
+/*
+ * gic_show_pending_irq - Shows the pending interrupts
+ * Note: Interrupts should be disabled on the cpu from which
+ * this is called to get accurate list of pending interrupts.
+ */
+void gic_show_pending_irqs(void)
+{
+	void __iomem *base;
+	u32 pending[32], enabled;
+	unsigned int j;
+
+	base = gic_data.dist_base;
+	for (j = 0; j * 32 < gic_data.irq_nr; j++) {
+		enabled = readl_relaxed(base +
+					GICD_ISENABLER + j * 4);
+		pending[j] = readl_relaxed(base +
+					GICD_ISPENDR + j * 4);
+		pending[j] &= enabled;
+		pr_err("Pending irqs[%d] %x\n", j, pending[j]);
+	}
+}
+
+/*
+ * get_gic_highpri_irq - Returns next high priority interrupt on current CPU
+ */
+unsigned int get_gic_highpri_irq(void)
+{
+	unsigned long flags;
+	unsigned int val = 0;
+
+	local_irq_save(flags);
+	val = read_gicreg(ICC_HPPIR1_EL1);
+	local_irq_restore(flags);
+
+	if (val >= 1020)
+		return 0;
+	return val;
+}
+
 static void gic_enable_redist(bool enable)
 {
 	void __iomem *rbase;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index d9133b9..e3d46df 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -402,9 +402,10 @@
 			CAM_CPAS_POLL_RETRY_CNT,
 			CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
 		if (rc) {
-			CAM_ERR(CAM_CPAS,
+			CAM_DBG(CAM_CPAS,
 				"camnoc flush slave pending trans failed");
 			/* Do not return error, passthrough */
+			rc = 0;
 		}
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
index f4d0e36..918258d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -515,7 +515,7 @@
 
 static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
 	.camnoc_flush_slave_pending_trans = {
-		.enable = true,
+		.enable = false,
 		.data.reg_info = {
 			.access_type = CAM_REG_TYPE_READ,
 			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index 5ed2222e..37e6954 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -1376,7 +1376,7 @@
 
 	/* We do not expect any patching, but just do it anyway */
 	rc = cam_packet_util_process_patches(prepare->packet,
-		hw_mgr->device_iommu.non_secure);
+		hw_mgr->device_iommu.non_secure, -1);
 	if (rc) {
 		CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);
 		return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 7c5b405..3354e2c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -880,6 +880,12 @@
 	buf_data.request_id = hfi_frame_process->request_id[idx];
 	ctx_data->ctxt_event_cb(ctx_data->context_priv, flag, &buf_data);
 	hfi_frame_process->request_id[idx] = 0;
+	if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+		CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+			ctx_data->hfi_frame_process.in_resource[idx]);
+		cam_sync_destroy(ctx_data->hfi_frame_process.in_resource[idx]);
+		ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
 	clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
 	hfi_frame_process->fw_process_flag[idx] = false;
 	mutex_unlock(&ctx_data->ctx_mutex);
@@ -1550,6 +1556,7 @@
 	cam_icp_mgr_device_deinit(hw_mgr);
 	cam_icp_free_hfi_mem();
 	hw_mgr->fw_download = false;
+	hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return rc;
@@ -1973,13 +1980,16 @@
 	return rc;
 }
 
-static void cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_icp_hw_ctx_data *ctx_data,
 	struct cam_packet *packet,
-	struct cam_hw_prepare_update_args *prepare_args)
+	struct cam_hw_prepare_update_args *prepare_args,
+	int32_t index)
 {
-	int i, j, k;
+	int i, j, k, rc = 0;
 	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+	int32_t sync_in_obj[CAM_MAX_OUT_RES];
+	int32_t merged_sync_in_obj;
 
 	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
 				packet->io_configs_offset/4);
@@ -1988,8 +1998,7 @@
 
 	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
 		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
-			prepare_args->in_map_entries[j++].sync_id =
-				io_cfg_ptr[i].fence;
+			sync_in_obj[j++] = io_cfg_ptr[i].fence;
 			prepare_args->num_in_map_entries++;
 		} else {
 			prepare_args->out_map_entries[k++].sync_id =
@@ -1999,6 +2008,33 @@
 		CAM_DBG(CAM_ICP, "dir[%d]: %u, fence: %u",
 			i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
 	}
+
+	if (prepare_args->num_in_map_entries > 1) {
+		rc = cam_sync_merge(&sync_in_obj[0],
+			prepare_args->num_in_map_entries, &merged_sync_in_obj);
+		if (rc) {
+			prepare_args->num_out_map_entries = 0;
+			prepare_args->num_in_map_entries = 0;
+			return rc;
+		}
+
+		ctx_data->hfi_frame_process.in_resource[index] =
+			merged_sync_in_obj;
+		prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
+		prepare_args->num_in_map_entries = 1;
+		CAM_DBG(CAM_ICP, "Merged Sync obj = %d", merged_sync_in_obj);
+	} else if (prepare_args->num_in_map_entries == 1) {
+		prepare_args->in_map_entries[0].sync_id = sync_in_obj[0];
+		prepare_args->num_in_map_entries = 1;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+	} else {
+		CAM_ERR(CAM_ICP, "No input fences");
+		prepare_args->num_in_map_entries = 0;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+		rc = -EINVAL;
+	}
+
+	return rc;
 }
 
 static int cam_icp_packet_generic_blob_handler(void *user_data,
@@ -2146,21 +2182,28 @@
 	}
 
 	/* Update Buffer Address from handles and patch information */
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
+		hw_mgr->iommu_sec_hdl);
 	if (rc) {
 		mutex_unlock(&ctx_data->ctx_mutex);
 		return rc;
 	}
 
-	cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
-		packet, prepare_args);
-
 	rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
 		prepare_args, &idx);
 	if (rc) {
-		if (prepare_args->in_map_entries[0].sync_id > 0)
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+		packet, prepare_args, idx);
+	if (rc) {
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0)
 			cam_sync_destroy(
-				prepare_args->in_map_entries[0].sync_id);
+				ctx_data->hfi_frame_process.in_resource[idx]);
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		ctx_data->hfi_frame_process.request_id[idx] = -1;
 		mutex_unlock(&ctx_data->ctx_mutex);
 		return rc;
 	}
@@ -2195,6 +2238,13 @@
 
 		/* now release memory for hfi frame process command */
 		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
 		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
 	}
 	mutex_unlock(&ctx_data->ctx_mutex);
@@ -2239,6 +2289,7 @@
 			NULL, 0);
 		cam_icp_mgr_hw_close(hw_mgr, NULL);
 		cam_icp_hw_mgr_reset_clk_info(hw_mgr);
+		hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	}
 
 	return rc;
@@ -2384,6 +2435,12 @@
 		sizeof(struct cam_icp_acquire_dev_info)))
 		return -EFAULT;
 
+	if (icp_dev_acquire_info.secure_mode > CAM_SECURE_MODE_SECURE) {
+		CAM_ERR(CAM_ICP, "Invalid mode:%d",
+			icp_dev_acquire_info.secure_mode);
+		return -EINVAL;
+	}
+
 	if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
 		CAM_ERR(CAM_ICP, "num of out resources exceeding : %u",
 			icp_dev_acquire_info.num_out_res);
@@ -2396,28 +2453,46 @@
 		return -EFAULT;
 	}
 
+	if (!hw_mgr->ctxt_cnt) {
+		hw_mgr->secure_mode = icp_dev_acquire_info.secure_mode;
+	} else {
+		if (hw_mgr->secure_mode != icp_dev_acquire_info.secure_mode) {
+			CAM_ERR(CAM_ICP,
+				"secure mode mismatch driver:%d, context:%d",
+				hw_mgr->secure_mode,
+				icp_dev_acquire_info.secure_mode);
+			return -EINVAL;
+		}
+	}
+
 	acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
 		(icp_dev_acquire_info.num_out_res *
 		sizeof(struct cam_icp_res_info));
 	ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
-	if (!ctx_data->icp_dev_acquire_info)
+	if (!ctx_data->icp_dev_acquire_info) {
+		if (!hw_mgr->ctxt_cnt)
+			hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 		return -ENOMEM;
+	}
 
 	if (copy_from_user(ctx_data->icp_dev_acquire_info,
 		(void __user *)args->acquire_info, acquire_size)) {
+		if (!hw_mgr->ctxt_cnt)
+			hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 		kfree(ctx_data->icp_dev_acquire_info);
 		ctx_data->icp_dev_acquire_info = NULL;
 		return -EFAULT;
 	}
 
-	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
+	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x %u",
 		ctx_data->icp_dev_acquire_info->dev_type,
 		ctx_data->icp_dev_acquire_info->in_res.format,
 		ctx_data->icp_dev_acquire_info->in_res.width,
 		ctx_data->icp_dev_acquire_info->in_res.height,
 		ctx_data->icp_dev_acquire_info->in_res.fps,
 		ctx_data->icp_dev_acquire_info->num_out_res,
-		ctx_data->icp_dev_acquire_info->scratch_mem_size);
+		ctx_data->icp_dev_acquire_info->scratch_mem_size,
+		hw_mgr->secure_mode);
 
 	p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
 	for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
@@ -2470,18 +2545,10 @@
 		goto acquire_info_failed;
 	icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
 
-	/* Get IOCONFIG command info */
-	if (icp_dev_acquire_info->secure_mode)
-		rc = cam_mem_get_io_buf(
-			icp_dev_acquire_info->io_config_cmd_handle,
-			hw_mgr->iommu_sec_hdl,
-			&io_buf_addr, &io_buf_size);
-	else
-		rc = cam_mem_get_io_buf(
-			icp_dev_acquire_info->io_config_cmd_handle,
-			hw_mgr->iommu_hdl,
-			&io_buf_addr, &io_buf_size);
-
+	rc = cam_mem_get_io_buf(
+		icp_dev_acquire_info->io_config_cmd_handle,
+		hw_mgr->iommu_hdl,
+		&io_buf_addr, &io_buf_size);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
 		goto get_io_buf_failed;
@@ -2808,6 +2875,7 @@
 	hw_mgr_intf->download_fw = cam_icp_mgr_download_fw;
 	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close;
 
+	icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	mutex_init(&icp_hw_mgr.hw_mgr_mutex);
 	spin_lock_init(&icp_hw_mgr.hw_mgr_lock);
 
@@ -2820,7 +2888,7 @@
 
 	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
 	if (rc) {
-		CAM_ERR(CAM_ICP, "icp get iommu handle failed: %d", rc);
+		CAM_ERR(CAM_ICP, "get mmu handle failed: %d", rc);
 		goto icp_get_hdl_failed;
 	}
 
@@ -2830,6 +2898,12 @@
 		goto icp_attach_failed;
 	}
 
+	rc = cam_smmu_get_handle("cam-secure", &icp_hw_mgr.iommu_sec_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "get secure mmu handle failed: %d", rc);
+		goto secure_hdl_failed;
+	}
+
 	rc = cam_icp_mgr_create_wq();
 	if (rc)
 		goto icp_wq_create_failed;
@@ -2839,6 +2913,9 @@
 	return rc;
 
 icp_wq_create_failed:
+	cam_smmu_destroy_handle(icp_hw_mgr.iommu_sec_hdl);
+	icp_hw_mgr.iommu_sec_hdl = -1;
+secure_hdl_failed:
 	cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
 icp_attach_failed:
 	cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index d4f5482..d1793e6 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -114,6 +114,7 @@
 	uint64_t request_id[CAM_FRAME_CMD_MAX];
 	uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
 	uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
+	uint32_t in_resource[CAM_FRAME_CMD_MAX];
 	uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
 	struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
 };
@@ -226,6 +227,7 @@
  * @icp_debug_clk: Set clock based on debug value
  * @icp_default_clk: Set this clok if user doesn't supply
  * @clk_info: Clock info of hardware
+ * @secure_mode: Flag to enable/disable secure camera
  */
 struct cam_icp_hw_mgr {
 	struct mutex hw_mgr_mutex;
@@ -255,6 +257,7 @@
 	uint64_t icp_debug_clk;
 	uint64_t icp_default_clk;
 	struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
+	bool secure_mode;
 };
 
 static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index d3aaf2b..4ecb36e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -24,6 +24,46 @@
 #include "cam_trace.h"
 #include "cam_debug_util.h"
 
+static int __cam_isp_ctx_enqueue_request_in_order(
+	struct cam_context *ctx, struct cam_ctx_request *req)
+{
+	struct cam_ctx_request           *req_current;
+	struct cam_ctx_request           *req_prev;
+	struct list_head                  temp_list;
+
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock_bh(&ctx->lock);
+	if (list_empty(&ctx->pending_req_list)) {
+		list_add_tail(&req->list, &ctx->pending_req_list);
+	} else {
+		list_for_each_entry_safe_reverse(
+			req_current, req_prev, &ctx->pending_req_list, list) {
+			if (req->request_id < req_current->request_id) {
+				list_del_init(&req_current->list);
+				list_add(&req_current->list, &temp_list);
+				continue;
+			} else if (req->request_id == req_current->request_id) {
+				CAM_WARN(CAM_ISP,
+					"Received duplicated request %lld",
+					req->request_id);
+			}
+			break;
+		}
+		list_add_tail(&req->list, &ctx->pending_req_list);
+
+		if (!list_empty(&temp_list)) {
+			list_for_each_entry_safe(
+				req_current, req_prev, &temp_list, list) {
+				list_del_init(&req_current->list);
+				list_add_tail(&req_current->list,
+					&ctx->pending_req_list);
+			}
+		}
+	}
+	spin_unlock_bh(&ctx->lock);
+	return 0;
+}
+
 static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
 {
 	uint64_t ts = 0;
@@ -256,7 +296,7 @@
 		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 	} else {
-		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
 		rc = -EFAULT;
 	}
 
@@ -574,7 +614,8 @@
 	 */
 
 	if (list_empty(&ctx->active_req_list)) {
-		CAM_ERR(CAM_ISP, "handling error with no active request");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"handling error with no active request");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -588,10 +629,10 @@
 		notify.req_id = req->request_id;
 
 		ctx->ctx_crm_intf->notify_err(&notify);
-		CAM_ERR(CAM_ISP, "Notify CRM about ERROR frame %lld",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Notify CRM about ERROR frame %lld",
 			ctx_isp->frame_id);
 	} else {
-		CAM_ERR(CAM_ISP, "Can not notify ERRROR to CRM");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify ERRROR to CRM");
 		rc = -EFAULT;
 	}
 
@@ -724,7 +765,7 @@
 
 	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Can not apply the configuration");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
 	} else {
 		spin_lock_bh(&ctx->lock);
 		ctx_isp->substate_activated = next_state;
@@ -964,7 +1005,7 @@
 		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 	} else {
-		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
 	}
 
 	if (list_empty(&ctx->active_req_list))
@@ -1477,9 +1518,7 @@
 	CAM_DBG(CAM_ISP, "Packet request id 0x%llx",
 		packet->header.request_id);
 
-	spin_lock_bh(&ctx->lock);
-	list_add_tail(&req->list, &ctx->pending_req_list);
-	spin_unlock_bh(&ctx->lock);
+	__cam_isp_ctx_enqueue_request_in_order(ctx, req);
 
 	CAM_DBG(CAM_ISP, "Preprocessing Config %lld successful",
 		req->request_id);
@@ -1883,14 +1922,16 @@
 		rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
 			crm_ops.apply_req(ctx, apply);
 	} else {
-		CAM_ERR(CAM_ISP, "No handle function in activated substate %d",
-			 ctx_isp->substate_activated);
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No handle function in activated substate %d",
+			ctx_isp->substate_activated);
 		rc = -EFAULT;
 	}
 
 	if (rc)
-		CAM_ERR(CAM_ISP, "Apply failed in active substate %d",
-			 ctx_isp->substate_activated);
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Apply failed in active substate %d",
+			ctx_isp->substate_activated);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index d0b0751..d84be30 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -64,7 +64,7 @@
 		sizeof(struct cam_isp_query_cap_cmd)))
 		rc = -EFAULT;
 
-	CAM_DBG(CAM_ISP, "exit rc :%d !", rc);
+	CAM_DBG(CAM_ISP, "exit rc :%d", rc);
 
 	return rc;
 }
@@ -111,7 +111,7 @@
 
 	return 0;
 err:
-	CAM_ERR(CAM_ISP, "INIT HW res failed! (type:%d, id:%d)",
+	CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)",
 		isp_hw_res->res_type, isp_hw_res->res_id);
 	return rc;
 }
@@ -132,7 +132,7 @@
 				isp_hw_res->hw_res[i],
 				sizeof(struct cam_isp_resource_node));
 			if (rc) {
-				CAM_ERR(CAM_ISP, "Can not start HW resources!");
+				CAM_ERR(CAM_ISP, "Can not start HW resources");
 				goto err;
 			}
 		} else {
@@ -143,7 +143,7 @@
 
 	return 0;
 err:
-	CAM_ERR(CAM_ISP, "Start hw res failed! (type:%d, id:%d)",
+	CAM_ERR(CAM_ISP, "Start hw res failed (type:%d, id:%d)",
 		isp_hw_res->res_type, isp_hw_res->res_id);
 	return rc;
 }
@@ -210,7 +210,7 @@
 			struct cam_ife_hw_mgr_res, list);
 		list_del_init(&res_ptr->list);
 	} else {
-		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
 		rc = -1;
 	}
 	*res = res_ptr;
@@ -235,7 +235,7 @@
 				sizeof(struct cam_isp_resource_node));
 			if (rc)
 				CAM_ERR(CAM_ISP,
-					"Release hw resrouce id %d failed!",
+					"Release hw resrouce id %d failed",
 					isp_hw_res->res_id);
 			isp_hw_res->hw_res[i] = NULL;
 		} else
@@ -362,7 +362,7 @@
 			struct cam_ife_hw_mgr_ctx, list);
 		list_del_init(&ctx_ptr->list);
 	} else {
-		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
 		rc = -1;
 	}
 	*ife_ctx = ctx_ptr;
@@ -417,7 +417,7 @@
 	uint32_t i;
 
 	if (list_empty(&ctx->res_list_ife_src)) {
-		CAM_ERR(CAM_ISP, "Error! Mux List empty");
+		CAM_ERR(CAM_ISP, "Mux List empty");
 		return -ENODEV;
 	}
 
@@ -474,6 +474,8 @@
 		CAM_ERR(CAM_ISP, "invalid resource type");
 		goto err;
 	}
+	CAM_DBG(CAM_ISP, "vfe_in_res_id = %d, vfe_out_red_id = %d",
+		vfe_in_res_id, vfe_out_res_id);
 
 	vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
 	vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
@@ -482,6 +484,9 @@
 	for (i = 0; i < in_port->num_out_res; i++) {
 		out_port = &in_port->data[i];
 
+		CAM_DBG(CAM_ISP, "i = %d, vfe_out_res_id = %d, out_port: %d",
+			i, vfe_out_res_id, out_port->res_type);
+
 		if (vfe_out_res_id != out_port->res_type)
 			continue;
 
@@ -503,7 +508,9 @@
 	}
 
 	if (i == in_port->num_out_res) {
-		CAM_ERR(CAM_ISP, "Can not acquire out resource");
+		CAM_ERR(CAM_ISP,
+			"Cannot acquire out resource, i=%d, num_out_res=%d",
+			i, in_port->num_out_res);
 		goto err;
 	}
 
@@ -511,6 +518,7 @@
 	ife_out_res->is_dual_vfe = 0;
 	ife_out_res->res_id = vfe_out_res_id;
 	ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
 
 	return 0;
 err:
@@ -633,7 +641,8 @@
 				ife_src_res, in_port);
 			break;
 		default:
-			CAM_ERR(CAM_ISP, "Fatal: Unknown IFE SRC resource!");
+			CAM_ERR(CAM_ISP, "Unknown IFE SRC resource: %d",
+				ife_src_res->res_id);
 			break;
 		}
 		if (rc)
@@ -667,7 +676,7 @@
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
 			&ife_src_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource");
 			goto err;
 		}
 		cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
@@ -706,7 +715,7 @@
 			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
 			break;
 		default:
-			CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node!");
+			CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node");
 			goto err;
 		}
 		ife_src_res->res_type = vfe_acquire.rsrc_type;
@@ -748,9 +757,11 @@
 		 * csid resource and ife source resource
 		 */
 		csid_res->child[0] = ife_src_res;
-		csid_res->num_children = 1;
 		ife_src_res->parent = csid_res;
 		csid_res->child[csid_res->num_children++] = ife_src_res;
+		CAM_DBG(CAM_ISP, "csid_res=%d  num_children=%d ife_src_res=%d",
+			csid_res->res_id, csid_res->num_children,
+			ife_src_res->res_id);
 	}
 
 	return 0;
@@ -776,7 +787,7 @@
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
 		goto err;
 	}
 	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
@@ -808,11 +819,11 @@
 	}
 
 	if (i == CAM_IFE_CSID_HW_NUM_MAX) {
-		CAM_ERR(CAM_ISP, "Can not acquire ife csid ipp resrouce!");
+		CAM_ERR(CAM_ISP, "Can not acquire ife csid ipp resource");
 		goto err;
 	}
 
-	CAM_DBG(CAM_ISP, "acquired csid(%d) left ipp resrouce successfully!",
+	CAM_DBG(CAM_ISP, "acquired csid(%d) left ipp resource successfully",
 		 i);
 
 	csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
@@ -839,18 +850,17 @@
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
 			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resrouce!");
+				"Can not acquire ife csid rdi resrouce");
 			goto err;
 		}
 		csid_res->hw_res[1] = csid_acquire.node_res;
 
 		CAM_DBG(CAM_ISP,
-			"acquired csid(%d)right ipp resrouce successfully!", j);
+			"acquired csid(%d)right ipp resrouce successfully", j);
 
 	}
 	csid_res->parent = &ife_ctx->res_list_ife_in;
-	ife_ctx->res_list_ife_in.child[
-		ife_ctx->res_list_ife_in.num_children++] = csid_res;
+	CAM_DBG(CAM_ISP, "acquire res %d", csid_acquire.res_id);
 
 	return 0;
 err:
@@ -862,7 +872,6 @@
 	uint32_t                 out_port_type)
 {
 	enum cam_ife_pix_path_res_id path_id;
-
 	switch (out_port_type) {
 	case CAM_ISP_IFE_OUT_RES_RDI_0:
 		path_id = CAM_IFE_PIX_PATH_RES_RDI_0;
@@ -882,6 +891,8 @@
 		break;
 	}
 
+	CAM_DBG(CAM_ISP, "out_port %d path_id %d", out_port_type, path_id);
+
 	return path_id;
 }
 
@@ -909,7 +920,7 @@
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
 			&csid_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "No more free hw mgr resource!",
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource",
 				__func__);
 			goto err;
 		}
@@ -945,7 +956,7 @@
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
 			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resrouce!");
+				"Can not acquire ife csid rdi resource");
 			goto err;
 		}
 
@@ -954,10 +965,8 @@
 		csid_res->is_dual_vfe = 0;
 		csid_res->hw_res[0] = csid_acquire.node_res;
 		csid_res->hw_res[1] = NULL;
-
+		CAM_DBG(CAM_ISP, "acquire res %d", csid_acquire.res_id);
 		csid_res->parent = &ife_ctx->res_list_ife_in;
-		ife_ctx->res_list_ife_in.child[
-			ife_ctx->res_list_ife_in.num_children++] = csid_res;
 	}
 
 	return 0;
@@ -978,7 +987,7 @@
 		ife_ctx->res_list_ife_in.res_id = in_port->res_type;
 		ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
 	} else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
-		CAM_ERR(CAM_ISP, "No Free resource for this context!");
+		CAM_ERR(CAM_ISP, "No Free resource for this context");
 		goto err;
 	} else {
 		/* else do nothing */
@@ -1032,7 +1041,7 @@
 	/* no dual vfe for TPG */
 	if ((in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) &&
 		(in_port->usage_type != 0)) {
-		CAM_ERR(CAM_ISP, "No Dual VFE on TPG input!");
+		CAM_ERR(CAM_ISP, "No Dual VFE on TPG input");
 		goto err;
 	}
 
@@ -1040,7 +1049,7 @@
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
 		goto err;
 	}
 	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
@@ -1062,7 +1071,7 @@
 	}
 
 	if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
-		CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resrouce!");
+		CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resource");
 		goto err;
 	}
 
@@ -1093,7 +1102,7 @@
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
 			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resrouce!");
+				"Can not acquire ife csid rdi resource");
 			goto err;
 		}
 		cid_res->hw_res[1] = csid_acquire.node_res;
@@ -1123,14 +1132,14 @@
 	/* get root node resource */
 	rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Can not acquire csid rx resource!");
+		CAM_ERR(CAM_ISP, "Can not acquire csid rx resource");
 		goto err;
 	}
 
 	/* get cid resource */
 	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed!");
+		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
 		goto err;
 	}
 
@@ -1138,7 +1147,7 @@
 		&pixel_count, &rdi_count);
 
 	if (!pixel_count && !rdi_count) {
-		CAM_ERR(CAM_ISP, "Error! no PIX or RDI resource");
+		CAM_ERR(CAM_ISP, "No PIX or RDI resource");
 		return -EINVAL;
 	}
 
@@ -1148,7 +1157,7 @@
 				cid_res_id);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
-				"Acquire IFE CSID IPP resource Failed!");
+				"Acquire IFE CSID IPP resource Failed");
 			goto err;
 		}
 	}
@@ -1159,7 +1168,7 @@
 			cid_res_id);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
-				"Acquire IFE CSID RDI resource Failed!");
+				"Acquire IFE CSID RDI resource Failed");
 			goto err;
 		}
 	}
@@ -1167,13 +1176,13 @@
 	/* get ife src resource */
 	rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed!");
+		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed");
 		goto err;
 	}
 
 	rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed!");
+		CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed");
 		goto err;
 	}
 
@@ -1206,8 +1215,10 @@
 	struct cam_isp_in_port_info       *in_port = NULL;
 	struct cam_isp_resource           *isp_resource = NULL;
 	struct cam_cdm_acquire_data        cdm_acquire;
-	uint32_t                           num_pix_port = 0;
-	uint32_t                           num_rdi_port = 0;
+	uint32_t                           num_pix_port_per_in = 0;
+	uint32_t                           num_rdi_port_per_in = 0;
+	uint32_t                           total_pix_port = 0;
+	uint32_t                           total_rdi_port = 0;
 
 	CAM_DBG(CAM_ISP, "Enter...");
 
@@ -1219,7 +1230,7 @@
 	/* get the ife ctx */
 	rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
 	if (rc || !ife_ctx) {
-		CAM_ERR(CAM_ISP, "Get ife hw context failed!");
+		CAM_ERR(CAM_ISP, "Get ife hw context failed");
 		goto err;
 	}
 
@@ -1271,7 +1282,10 @@
 			isp_resource[i].length);
 		if (in_port > 0) {
 			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
-				&num_pix_port, &num_rdi_port);
+				&num_pix_port_per_in, &num_rdi_port_per_in);
+			total_pix_port += num_pix_port_per_in;
+			total_rdi_port += num_rdi_port_per_in;
+
 			kfree(in_port);
 			if (rc) {
 				CAM_ERR(CAM_ISP, "can not acquire resource");
@@ -1279,7 +1293,7 @@
 			}
 		} else {
 			CAM_ERR(CAM_ISP,
-				"copy from user failed with in_port = %pK",
+				"Copy from user failed with in_port = %pK",
 				in_port);
 			rc = -EFAULT;
 			goto free_res;
@@ -1287,7 +1301,7 @@
 	}
 
 	/* Check whether context has only RDI resource */
-	if (!num_pix_port) {
+	if (!total_pix_port) {
 		ife_ctx->is_rdi_only_context = 1;
 		CAM_DBG(CAM_ISP, "RDI only context");
 	}
@@ -1295,7 +1309,7 @@
 	/* Process base info */
 	rc = cam_ife_mgr_process_base_info(ife_ctx);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Error process) base info!");
+		CAM_ERR(CAM_ISP, "Process base info failed");
 		return -EINVAL;
 	}
 
@@ -1304,14 +1318,14 @@
 
 	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
 
-	CAM_DBG(CAM_ISP, "Exit...(success)!");
+	CAM_DBG(CAM_ISP, "Exit...(success)");
 
 	return 0;
 free_res:
 	cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
 	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
 err:
-	CAM_DBG(CAM_ISP, "Exit...(rc=%d)!", rc);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
 	return rc;
 }
 
@@ -1334,12 +1348,12 @@
 	cfg = config_hw_args;
 	ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
 	if (!ctx) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
 	if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
-		CAM_ERR(CAM_ISP, "Invalid context parameters !");
+		CAM_ERR(CAM_ISP, "Invalid context parameters");
 		return -EPERM;
 	}
 
@@ -1388,7 +1402,7 @@
 	}
 	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1397,7 +1411,7 @@
 
 	/* stop resource will remove the irq mask from the hardware */
 	if (!ctx->num_base) {
-		CAM_ERR(CAM_ISP, "error number of bases are zero");
+		CAM_ERR(CAM_ISP, "Number of bases are zero");
 		return -EINVAL;
 	}
 
@@ -1474,7 +1488,7 @@
 	}
 	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1484,7 +1498,7 @@
 	/* Note:stop resource will remove the irq mask from the hardware */
 
 	if (!ctx->num_base) {
-		CAM_ERR(CAM_ISP, "error number of bases are zero");
+		CAM_ERR(CAM_ISP, "number of bases are zero");
 		return -EINVAL;
 	}
 
@@ -1628,7 +1642,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1639,7 +1653,7 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!", i);
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)", i);
 			goto err;
 		}
 	}
@@ -1649,7 +1663,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1660,7 +1674,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1671,7 +1685,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1702,7 +1716,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1721,7 +1735,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE CID.(id :%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CID(id :%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1735,7 +1749,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE CSID.(id :%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1747,7 +1761,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1760,7 +1774,7 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)",
 				 ctx->res_list_ife_out[i].res_id);
 			goto err;
 		}
@@ -1769,7 +1783,7 @@
 	CAM_DBG(CAM_ISP, "start cdm interface");
 	rc = cam_cdm_stream_on(ctx->cdm_handle);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Can not start cdm (%d)!",
+		CAM_ERR(CAM_ISP, "Can not start cdm (%d)",
 			 ctx->cdm_handle);
 		goto err;
 	}
@@ -1788,7 +1802,7 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)",
 				 i);
 			goto err;
 		}
@@ -1800,7 +1814,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1812,7 +1826,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1824,7 +1838,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1864,7 +1878,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1923,7 +1937,8 @@
 		return rc;
 
 	rc = cam_packet_util_process_patches(prepare->packet,
-		hw_mgr->mgr_common.cmd_iommu_hdl);
+		hw_mgr->mgr_common.cmd_iommu_hdl,
+		hw_mgr->mgr_common.cmd_iommu_hdl_secure);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
 		return rc;
@@ -1982,6 +1997,7 @@
 
 		/* get IO buffers */
 		rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
+			hw_mgr->mgr_common.img_iommu_hdl_secure,
 			prepare, ctx->base[i].idx,
 			&kmd_buf, ctx->res_list_ife_out,
 			CAM_IFE_HW_OUT_RES_MAX, fill_fence);
@@ -2049,7 +2065,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
 		return -EPERM;
 	}
 
@@ -2117,7 +2133,7 @@
 	}
 end:
 	if (rc)
-		CAM_ERR(CAM_ISP, "error in getting sof time stamp");
+		CAM_ERR(CAM_ISP, "Getting sof time stamp failed");
 
 	return rc;
 }
@@ -2135,7 +2151,7 @@
 	struct cam_ife_hw_mgr_ctx        *ctx = NULL;
 
 	/* Here recovery is performed */
-	CAM_DBG(CAM_ISP, "Enter: ErrorType = %d", error_type);
+	CAM_DBG(CAM_ISP, "ErrorType = %d", error_type);
 
 	switch (error_type) {
 	case CAM_ISP_HW_ERROR_OVERFLOW:
@@ -2757,7 +2773,7 @@
 		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)!", sof_status);
+	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);
 
 	return 0;
 }
@@ -2949,7 +2965,7 @@
 		}
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (eof_status = %d)!", eof_status);
+	CAM_DBG(CAM_ISP, "Exit (eof_status = %d)", eof_status);
 
 	return 0;
 }
@@ -3080,7 +3096,7 @@
 	 * for the first phase, we are going to reset entire HW.
 	 */
 
-	CAM_DBG(CAM_ISP, "Exit (buf_done_status (Error) = %d)!",
+	CAM_DBG(CAM_ISP, "Exit buf_done_status Error = %d",
 		buf_done_status);
 	return rc;
 }
@@ -3212,8 +3228,8 @@
 	mutex_init(&g_ife_hw_mgr.ctx_mutex);
 
 	if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
-		CAM_ERR(CAM_ISP, "Fatal, CSID num is different then IFE num!");
-		goto end;
+		CAM_ERR(CAM_ISP, "CSID num is different then IFE num");
+		return -EINVAL;
 	}
 
 	/* fill ife hw intf information */
@@ -3237,8 +3253,8 @@
 		}
 	}
 	if (j == 0) {
-		CAM_ERR(CAM_ISP, "no valid IFE HW!");
-		goto end;
+		CAM_ERR(CAM_ISP, "no valid IFE HW");
+		return -EINVAL;
 	}
 
 	/* fill csid hw intf information */
@@ -3248,8 +3264,8 @@
 			j++;
 	}
 	if (!j) {
-		CAM_ERR(CAM_ISP, "no valid IFE CSID HW!");
-		goto end;
+		CAM_ERR(CAM_ISP, "no valid IFE CSID HW");
+		return -EINVAL;
 	}
 
 	cam_ife_hw_mgr_sort_dev_with_caps(&g_ife_hw_mgr);
@@ -3267,19 +3283,25 @@
 	 */
 	if (cam_smmu_get_handle("ife",
 		&g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
-		CAM_ERR(CAM_ISP, "Can not get iommu handle.");
-		goto end;
+		CAM_ERR(CAM_ISP, "Can not get iommu handle");
+		return -EINVAL;
 	}
 
 	if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
 		CAM_SMMU_ATTACH)) {
 		CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
-		goto end;
+		goto attach_fail;
 	}
 
-	CAM_DBG(CAM_ISP, "got iommu_handle=%d",
-		g_ife_hw_mgr.mgr_common.img_iommu_hdl);
-	g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+	if (cam_smmu_get_handle("cam-secure",
+		&g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure)) {
+		CAM_ERR(CAM_ISP, "Failed to get secure iommu handle");
+		goto secure_fail;
+	}
+
+	CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]",
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
 
 	if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
 		CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
@@ -3341,7 +3363,6 @@
 	/* Create Worker for ife_hw_mgr with 10 tasks */
 	rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
 			&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
-
 	if (rc < 0) {
 		CAM_ERR(CAM_ISP, "Unable to create worker");
 		goto end;
@@ -3359,14 +3380,28 @@
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
-
 	CAM_DBG(CAM_ISP, "Exit");
+
 	return 0;
 end:
 	if (rc) {
-		for (i = 0; i < CAM_CTX_MAX; i++)
+		for (i = 0; i < CAM_CTX_MAX; i++) {
+			cam_tasklet_deinit(
+				&g_ife_hw_mgr.mgr_common.tasklet_pool[i]);
 			kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);
+			g_ife_hw_mgr.ctx_pool[i].cdm_cmd = NULL;
+			g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = NULL;
+		}
 	}
+	cam_smmu_destroy_handle(
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
+	g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+secure_fail:
+	cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		CAM_SMMU_DETACH);
+attach_fail:
+	cam_smmu_destroy_handle(g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+	g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 698a4c8..c58578e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -408,6 +408,7 @@
 
 int cam_isp_add_io_buffers(
 	int                                   iommu_hdl,
+	int                                   sec_iommu_hdl,
 	struct cam_hw_prepare_update_args    *prepare,
 	uint32_t                              base_idx,
 	struct cam_kmd_buf_info              *kmd_buf_info,
@@ -425,7 +426,10 @@
 	uint32_t                            i, j, num_out_buf, num_in_buf;
 	uint32_t                            res_id_out, res_id_in, plane_id;
 	uint32_t                            io_cfg_used_bytes, num_ent;
-	size_t size;
+	size_t                              size;
+	int32_t                             hdl;
+	int                                 mmu_hdl;
+	bool                                mode, is_buf_secure;
 
 	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
 			&prepare->packet->payload +
@@ -536,9 +540,31 @@
 				if (!io_cfg[i].mem_handle[plane_id])
 					break;
 
+				hdl = io_cfg[i].mem_handle[plane_id];
+				if (res->process_cmd(res,
+						CAM_VFE_HW_CMD_GET_SECURE_MODE,
+						&mode,
+						sizeof(bool)))
+					return -EINVAL;
+
+				is_buf_secure = cam_mem_is_secure_buf(hdl);
+				if ((mode == CAM_SECURE_MODE_SECURE) &&
+					is_buf_secure) {
+					mmu_hdl = sec_iommu_hdl;
+				} else if (
+					(mode == CAM_SECURE_MODE_NON_SECURE) &&
+					(!is_buf_secure)) {
+					mmu_hdl = iommu_hdl;
+				} else {
+					CAM_ERR_RATE_LIMIT(CAM_ISP,
+						"Invalid hdl: port mode[%u], buf mode[%u]",
+						mode, is_buf_secure);
+					return -EINVAL;
+				}
+
 				rc = cam_mem_get_io_buf(
 					io_cfg[i].mem_handle[plane_id],
-					iommu_hdl, &io_addr[plane_id], &size);
+					mmu_hdl, &io_addr[plane_id], &size);
 				if (rc) {
 					CAM_ERR(CAM_ISP,
 						"no io addr for plane%d",
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index 187e5bc..24b532e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -69,6 +69,8 @@
  *                         index and update the HW entries list
  *
  * @iommu_hdl:             Iommu handle to get the IO buf from memory manager
+ * @sec_iommu_hdl:         Secure iommu handle to get the IO buf from
+ *                         memory manager
  * @prepare:               Contain the  packet and HW update variables
  * @base_idx:              Base or dev index of the IFE/VFE HW instance
  * @kmd_buf_info:          Kmd buffer to store the change base command
@@ -79,7 +81,9 @@
  * @return:                0 for success
  *                         -EINVAL for Fail
  */
-int cam_isp_add_io_buffers(int	 iommu_hdl,
+int cam_isp_add_io_buffers(
+	int                                   iommu_hdl,
+	int                                   sec_iommu_hdl,
 	struct cam_hw_prepare_update_args    *prepare,
 	uint32_t                              base_idx,
 	struct cam_kmd_buf_info              *kmd_buf_info,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index 2341b38..dcbea8d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -329,6 +329,111 @@
 	return rc;
 }
 
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
+{
+	struct cam_irq_controller  *controller  = irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+	struct cam_irq_evt_handler *evt_handler_temp;
+	unsigned long               flags;
+	unsigned int                i;
+	uint32_t                    irq_mask;
+	uint32_t                    found = 0;
+	int                         rc = -EINVAL;
+
+	if (!controller)
+		return rc;
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CAM_DBG(CAM_ISP, "enable item %d", handle);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (!found)
+		return rc;
+
+	write_lock_irqsave(&controller->rw_lock, flags);
+	for (i = 0; i < controller->num_registers; i++) {
+		controller->irq_register_arr[i].
+		top_half_enable_mask[evt_handler->priority] |=
+			evt_handler->evt_bit_mask_arr[i];
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			controller->irq_register_arr[i].
+			mask_reg_offset);
+		irq_mask |= evt_handler->evt_bit_mask_arr[i];
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+		controller->irq_register_arr[i].mask_reg_offset);
+	}
+	write_unlock_irqrestore(&controller->rw_lock, flags);
+
+	return rc;
+}
+
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
+{
+	struct cam_irq_controller  *controller  = irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+	struct cam_irq_evt_handler *evt_handler_temp;
+	unsigned long               flags;
+	unsigned int                i;
+	uint32_t                    irq_mask;
+	uint32_t                    found = 0;
+	int                         rc = -EINVAL;
+
+	if (!controller)
+		return rc;
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CAM_DBG(CAM_ISP, "disable item %d", handle);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (!found)
+		return rc;
+
+	write_lock_irqsave(&controller->rw_lock, flags);
+	for (i = 0; i < controller->num_registers; i++) {
+		controller->irq_register_arr[i].
+		top_half_enable_mask[evt_handler->priority] &=
+			~(evt_handler->evt_bit_mask_arr[i]);
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			controller->irq_register_arr[i].
+			mask_reg_offset);
+		irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+			controller->irq_register_arr[i].
+			mask_reg_offset);
+
+		/* Clear the IRQ bits of this handler */
+		cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+			controller->mem_base +
+			controller->irq_register_arr[i].
+			clear_reg_offset);
+
+		if (controller->global_clear_offset)
+			cam_io_w_mb(
+				controller->global_clear_bitmask,
+				controller->mem_base +
+				controller->global_clear_offset);
+	}
+	write_unlock_irqrestore(&controller->rw_lock, flags);
+
+	return rc;
+}
+
 int cam_irq_controller_unsubscribe_irq(void *irq_controller,
 	uint32_t handle)
 {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
index 1990c51..7e307b5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
@@ -214,4 +214,40 @@
  */
 irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv);
 
+/*
+ * cam_irq_controller_disable_irq()
+ *
+ * @brief:              Disable the interrupts on given controller.
+ *                      Unsubscribe will disable the IRQ by default, so this is
+ *                      only needed if between subscribe/unsubscribe there is
+ *                      need to disable IRQ again
+ *
+ * @irq_controller:     Pointer to IRQ Controller that controls the registered
+ *                      events to it.
+ * @handle:             Handle returned on successful subscribe, used to
+ *                      identify the handler object
+ *
+ * @return:             0: events found and disabled
+ *                      Negative: events not registered on this controller
+ */
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle);
+
+/*
+ * cam_irq_controller_enable_irq()
+ *
+ * @brief:              Enable the interrupts on given controller.
+ *                      Subscribe will enable the IRQ by default, so this is
+ *                      only needed if between subscribe/unsubscribe there is
+ *                      need to enable IRQ again
+ *
+ * @irq_controller:     Pointer to IRQ Controller that controls the registered
+ *                      events to it.
+ * @handle:             Handle returned on successful subscribe, used to
+ *                      identify the handler object
+ *
+ * @return:             0: events found and enabled
+ *                      Negative: events not registered on this controller
+ */
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle);
+
 #endif /* _CAM_IRQ_CONTROLLER_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index a08248d..a64379c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -49,6 +49,7 @@
 	CAM_VFE_HW_CMD_GET_BUF_UPDATE,
 	CAM_VFE_HW_CMD_GET_REG_UPDATE,
 	CAM_VFE_HW_CMD_GET_HFR_UPDATE,
+	CAM_VFE_HW_CMD_GET_SECURE_MODE,
 	CAM_VFE_HW_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 1115112..1472e09 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -94,6 +94,9 @@
 	struct cam_vfe_bus_irq_evt_payload          evt_payload[
 		CAM_VFE_BUS_VER2_PAYLOAD_MAX];
 	struct list_head                            free_payload_list;
+	struct mutex                                bus_mutex;
+	uint32_t                                    secure_mode;
+	uint32_t                                    num_sec_out;
 };
 
 struct cam_vfe_bus_ver2_wm_resource_data {
@@ -168,6 +171,7 @@
 	uint32_t                         max_width;
 	uint32_t                         max_height;
 	struct cam_cdm_utils_ops        *cdm_util_ops;
+	uint32_t                         secure_mode;
 };
 
 struct cam_vfe_bus_ver2_priv {
@@ -184,6 +188,10 @@
 	uint32_t                            irq_handle;
 };
 
+static int cam_vfe_bus_process_cmd(
+	struct cam_isp_resource_node *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+
 static int cam_vfe_bus_get_evt_payload(
 	struct cam_vfe_bus_ver2_common_data  *common_data,
 	struct cam_vfe_bus_irq_evt_payload  **evt_payload)
@@ -328,6 +336,34 @@
 	return rc;
 }
 
+static bool cam_vfe_bus_can_be_secure(uint32_t out_type)
+{
+	switch (out_type) {
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+	case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+		return true;
+
+	case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+	default:
+		return false;
+	}
+}
+
 static enum cam_vfe_bus_ver2_vfe_out_type
 	cam_vfe_bus_get_out_res_id(uint32_t res_type)
 {
@@ -846,7 +882,7 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
@@ -862,7 +898,7 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
@@ -877,7 +913,7 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
@@ -891,12 +927,12 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
 		default:
-			CAM_ERR(CAM_ISP, "Invalid format %d\n",
+			CAM_ERR(CAM_ISP, "Invalid format %d",
 				rsrc_data->format);
 			return -EINVAL;
 		}
@@ -1068,7 +1104,7 @@
 
 	wm_res = th_payload->handler_priv;
 	if (!wm_res) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error: No resource");
 		return -ENODEV;
 	}
 
@@ -1080,7 +1116,7 @@
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue\n");
+			"No tasklet_cmd is free in queue");
 		return rc;
 	}
 
@@ -1177,10 +1213,8 @@
 
 	rsrc_data = wm_res->res_priv;
 	wm_res->res_priv = NULL;
-	if (!rsrc_data) {
-		CAM_ERR(CAM_ISP, "Error! WM res priv is NULL");
+	if (!rsrc_data)
 		return -ENOMEM;
-	}
 	kfree(rsrc_data);
 
 	return 0;
@@ -1472,7 +1506,7 @@
 
 	comp_grp = th_payload->handler_priv;
 	if (!comp_grp) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
 		return -ENODEV;
 	}
 
@@ -1484,7 +1518,7 @@
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue\n");
+			"No tasklet_cmd is free in queue");
 		return rc;
 	}
 
@@ -1611,7 +1645,7 @@
 		break;
 	default:
 		rc = CAM_VFE_IRQ_STATUS_ERR;
-		CAM_ERR(CAM_ISP, "Error! Invalid comp_grp_type %u",
+		CAM_ERR(CAM_ISP, "Invalid comp_grp_type %u",
 			rsrc_data->comp_grp_type);
 		break;
 	}
@@ -1632,10 +1666,9 @@
 
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
 		GFP_KERNEL);
-	if (!rsrc_data) {
-		CAM_DBG(CAM_ISP, "Failed to alloc for comp_grp_priv");
+	if (!rsrc_data)
 		return -ENOMEM;
-	}
+
 	comp_grp->res_priv = rsrc_data;
 
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
@@ -1682,7 +1715,7 @@
 	comp_grp->res_priv = NULL;
 
 	if (!rsrc_data) {
-		CAM_ERR(CAM_ISP, "Error! comp_grp_priv is NULL");
+		CAM_ERR(CAM_ISP, "comp_grp_priv is NULL");
 		return -ENODEV;
 	}
 	kfree(rsrc_data);
@@ -1690,6 +1723,22 @@
 	return 0;
 }
 
+static int cam_vfe_bus_get_secure_mode(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	bool *mode = cmd_args;
+	struct cam_isp_resource_node *res =
+		(struct cam_isp_resource_node *) priv;
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data =
+		(struct cam_vfe_bus_ver2_vfe_out_data *)res->res_priv;
+
+	*mode =
+		(rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ?
+		true : false;
+
+	return 0;
+}
+
 static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
 	uint32_t args_size)
 {
@@ -1697,7 +1746,7 @@
 	int                                     i;
 	enum cam_vfe_bus_ver2_vfe_out_type      vfe_out_res_id;
 	uint32_t                                format;
-	uint32_t                                num_wm;
+	int                                     num_wm;
 	uint32_t                                subscribe_irq;
 	uint32_t                                client_done_mask;
 	struct cam_vfe_bus_ver2_priv           *ver2_bus_priv = bus_priv;
@@ -1705,6 +1754,7 @@
 	struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args;
 	struct cam_isp_resource_node           *rsrc_node = NULL;
 	struct cam_vfe_bus_ver2_vfe_out_data   *rsrc_data = NULL;
+	uint32_t                                secure_caps = 0, mode;
 
 	if (!bus_priv || !acquire_args) {
 		CAM_ERR(CAM_ISP, "Invalid Param");
@@ -1734,6 +1784,33 @@
 	}
 
 	rsrc_data = rsrc_node->res_priv;
+	secure_caps = cam_vfe_bus_can_be_secure(
+		rsrc_data->out_type);
+	mode = out_acquire_args->out_port_info->secure_mode;
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (!rsrc_data->common_data->num_sec_out) {
+			rsrc_data->secure_mode = mode;
+			rsrc_data->common_data->secure_mode = mode;
+		} else {
+			if (mode == rsrc_data->common_data->secure_mode) {
+				rsrc_data->secure_mode =
+					rsrc_data->common_data->secure_mode;
+			} else {
+				rc = -EINVAL;
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"Mismatch: Acquire mode[%d], drvr mode[%d]",
+					rsrc_data->common_data->secure_mode,
+					mode);
+				mutex_unlock(
+					&rsrc_data->common_data->bus_mutex);
+				return -EINVAL;
+			}
+		}
+		rsrc_data->common_data->num_sec_out++;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
 	rsrc_data->num_wm = num_wm;
 	rsrc_node->res_id = out_acquire_args->out_port_info->res_type;
 	rsrc_node->tasklet_info = acq_args->tasklet;
@@ -1814,6 +1891,7 @@
 	uint32_t i;
 	struct cam_isp_resource_node          *vfe_out = NULL;
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
+	uint32_t                               secure_caps = 0;
 
 	if (!bus_priv || !release_args) {
 		CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
@@ -1825,7 +1903,7 @@
 	rsrc_data = vfe_out->res_priv;
 
 	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
 			vfe_out->res_state);
 	}
 
@@ -1841,6 +1919,32 @@
 	vfe_out->cdm_ops = NULL;
 	rsrc_data->cdm_util_ops = NULL;
 
+	secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type);
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (rsrc_data->secure_mode ==
+			rsrc_data->common_data->secure_mode) {
+			rsrc_data->common_data->num_sec_out--;
+			rsrc_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+		} else {
+			/*
+			 * The validity of the mode is properly
+			 * checked while acquiring the output port.
+			 * not expected to reach here, unless there is
+			 * some corruption.
+			 */
+			CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch",
+				rsrc_data->common_data->secure_mode,
+				rsrc_data->secure_mode);
+		}
+
+		if (!rsrc_data->common_data->num_sec_out)
+			rsrc_data->common_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
 	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
 		vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 
@@ -1865,7 +1969,7 @@
 	CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
 
 	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
 			vfe_out->res_state);
 		return -EACCES;
 	}
@@ -1972,7 +2076,6 @@
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
 		GFP_KERNEL);
 	if (!rsrc_data) {
-		CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe out priv");
 		rc = -ENOMEM;
 		return rc;
 	}
@@ -1988,12 +2091,14 @@
 		ver2_hw_info->vfe_out_hw_info[index].max_width;
 	rsrc_data->max_height  =
 		ver2_hw_info->vfe_out_hw_info[index].max_height;
+	rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 
 	vfe_out->start = cam_vfe_bus_start_vfe_out;
 	vfe_out->stop = cam_vfe_bus_stop_vfe_out;
 	vfe_out->top_half_handler = cam_vfe_bus_handle_vfe_out_done_top_half;
 	vfe_out->bottom_half_handler =
 		cam_vfe_bus_handle_vfe_out_done_bottom_half;
+	vfe_out->process_cmd = cam_vfe_bus_process_cmd;
 	vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf;
 
 	return 0;
@@ -2014,10 +2119,8 @@
 	INIT_LIST_HEAD(&vfe_out->list);
 	vfe_out->res_priv = NULL;
 
-	if (!rsrc_data) {
-		CAM_ERR(CAM_ISP, "Error! vfe out priv is NULL");
+	if (!rsrc_data)
 		return -ENOMEM;
-	}
 	kfree(rsrc_data);
 
 	return 0;
@@ -2394,7 +2497,7 @@
 	uint32_t                         top_irq_reg_mask[2] = {0};
 
 	if (!bus_priv) {
-		CAM_ERR(CAM_ISP, "Error! Invalid args");
+		CAM_ERR(CAM_ISP, "Invalid args");
 		return -EINVAL;
 	}
 
@@ -2425,7 +2528,7 @@
 	int                              rc;
 
 	if (!bus_priv || (bus_priv->irq_handle <= 0)) {
-		CAM_ERR(CAM_ISP, "Error! Invalid args");
+		CAM_ERR(CAM_ISP, "Error: Invalid args");
 		return -EINVAL;
 	}
 
@@ -2438,13 +2541,20 @@
 	return rc;
 }
 
-static int cam_vfe_bus_process_cmd(void *priv,
+static int __cam_vfe_bus_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_process_cmd(priv, cmd_type, cmd_args, arg_size);
+}
+
+static int cam_vfe_bus_process_cmd(
+	struct cam_isp_resource_node *priv,
 	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
 {
 	int rc = -EINVAL;
 
 	if (!priv || !cmd_args) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! Invalid input arguments\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -2455,8 +2565,11 @@
 	case CAM_VFE_HW_CMD_GET_HFR_UPDATE:
 		rc = cam_vfe_bus_update_hfr(priv, cmd_args, arg_size);
 		break;
+	case CAM_VFE_HW_CMD_GET_SECURE_MODE:
+		rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size);
+		break;
 	default:
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Inval camif process command:%d\n",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
 			cmd_type);
 		break;
 	}
@@ -2503,18 +2616,21 @@
 	}
 	vfe_bus_local->bus_priv = bus_priv;
 
+	bus_priv->common_data.num_sec_out        = 0;
+	bus_priv->common_data.secure_mode        = CAM_SECURE_MODE_NON_SECURE;
 	bus_priv->common_data.core_index         = soc_info->index;
 	bus_priv->common_data.mem_base           =
 		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX);
 	bus_priv->common_data.hw_intf            = hw_intf;
 	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
 	bus_priv->common_data.common_reg         = &ver2_hw_info->common_reg;
+	mutex_init(&bus_priv->common_data.bus_mutex);
 
 	rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
 		&ver2_hw_info->common_reg.irq_reg_info,
 		&bus_priv->common_data.bus_irq_controller);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
+		CAM_ERR(CAM_ISP, "cam_irq_controller_init failed");
 		goto free_bus_priv;
 	}
 
@@ -2526,7 +2642,7 @@
 		rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
 			&bus_priv->bus_client[i]);
 		if (rc < 0) {
-			CAM_ERR(CAM_ISP, "Error! Init WM failed rc=%d", rc);
+			CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc);
 			goto deinit_wm;
 		}
 	}
@@ -2564,7 +2680,7 @@
 	vfe_bus_local->hw_ops.deinit       = cam_vfe_bus_deinit_hw;
 	vfe_bus_local->top_half_handler    = cam_vfe_bus_ver2_handle_irq;
 	vfe_bus_local->bottom_half_handler = NULL;
-	vfe_bus_local->hw_ops.process_cmd  = cam_vfe_bus_process_cmd;
+	vfe_bus_local->hw_ops.process_cmd  = __cam_vfe_bus_process_cmd;
 
 	*vfe_bus = vfe_bus_local;
 
@@ -2607,14 +2723,14 @@
 	struct cam_vfe_bus              *vfe_bus_local;
 
 	if (!vfe_bus || !*vfe_bus) {
-		CAM_ERR(CAM_ISP, "Error! Invalid input");
+		CAM_ERR(CAM_ISP, "Invalid input");
 		return -EINVAL;
 	}
 	vfe_bus_local = *vfe_bus;
 
 	bus_priv = vfe_bus_local->bus_priv;
 	if (!bus_priv) {
-		CAM_ERR(CAM_ISP, "Error! bus_priv is NULL");
+		CAM_ERR(CAM_ISP, "bus_priv is NULL");
 		rc = -ENODEV;
 		goto free_bus_local;
 	}
@@ -2627,21 +2743,21 @@
 		rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
 		if (rc < 0)
 			CAM_ERR(CAM_ISP,
-				"Error! Deinit WM failed rc=%d", rc);
+				"Deinit WM failed rc=%d", rc);
 	}
 
 	for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
 		rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
 		if (rc < 0)
 			CAM_ERR(CAM_ISP,
-				"Error! Deinit Comp Grp failed rc=%d", rc);
+				"Deinit Comp Grp failed rc=%d", rc);
 	}
 
 	for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
 		rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
 		if (rc < 0)
 			CAM_ERR(CAM_ISP,
-				"Error! Deinit VFE Out failed rc=%d", rc);
+				"Deinit VFE Out failed rc=%d", rc);
 	}
 
 	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
@@ -2652,8 +2768,9 @@
 		&bus_priv->common_data.bus_irq_controller);
 	if (rc)
 		CAM_ERR(CAM_ISP,
-			"Error! Deinit IRQ Controller failed rc=%d", rc);
+			"Deinit IRQ Controller failed rc=%d", rc);
 
+	mutex_destroy(&bus_priv->common_data.bus_mutex);
 	kfree(vfe_bus_local->bus_priv);
 
 free_bus_local:
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index bdfa785..2cd6b04 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -579,7 +579,7 @@
 		(void *)packet, (void *)cmd_desc,
 		sizeof(struct cam_cmd_buf_desc));
 
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl, -1);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
 		return rc;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index c150244..9d454e9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -52,6 +52,8 @@
 		rc = DMA_FROM_DEVICE;
 	else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
 		rc = DMA_BIDIRECTIONAL;
+	else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		rc = DMA_BIDIRECTIONAL;
 
 	return rc;
 }
@@ -211,10 +213,16 @@
 		goto handle_mismatch;
 	}
 
-	rc = cam_smmu_get_iova(mmu_handle,
-		tbl.bufq[idx].fd,
-		iova_ptr,
-		len_ptr);
+	if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
+		rc = cam_smmu_get_stage2_iova(mmu_handle,
+			tbl.bufq[idx].fd,
+			iova_ptr,
+			len_ptr);
+	else
+		rc = cam_smmu_get_iova(mmu_handle,
+			tbl.bufq[idx].fd,
+			iova_ptr,
+			len_ptr);
 	if (rc < 0)
 		CAM_ERR(CAM_CRM, "fail to get buf hdl :%d", buf_handle);
 
@@ -376,10 +384,12 @@
 	uint32_t ion_flag = 0;
 	int rc;
 
-	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
 		heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
-	else
+		ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
+	} else {
 		heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+	}
 
 	if (cmd->flags & CAM_MEM_FLAG_CACHE)
 		ion_flag |= ION_FLAG_CACHED;
@@ -466,10 +476,11 @@
 
 	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
 		for (i = 0; i < num_hdls; i++) {
-			rc = cam_smmu_map_sec_iova(mmu_hdls[i],
+			rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
 				fd,
 				dir,
-				(dma_addr_t *)hw_vaddr,
+				tbl.client,
+				(ion_phys_addr_t *)hw_vaddr,
 				len);
 
 			if (rc < 0) {
@@ -498,7 +509,7 @@
 multi_map_fail:
 	if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
 		for (--i; i > 0; i--)
-			cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+			cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
 	else
 		for (--i; i > 0; i--)
 			cam_smmu_unmap_iova(mmu_hdls[i],
@@ -543,8 +554,9 @@
 		goto slot_fail;
 	}
 
-	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
-		cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+	if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+		(cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
 
 		enum cam_smmu_region_id region;
 
@@ -570,6 +582,8 @@
 	tbl.bufq[idx].fd = ion_fd;
 	tbl.bufq[idx].flags = cmd->flags;
 	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
 	tbl.bufq[idx].kmdvaddr = 0;
 
 	if (cmd->num_hdl > 0)
@@ -630,7 +644,8 @@
 		return -EINVAL;
 	}
 
-	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) {
+	if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
 		rc = cam_mem_util_map_hw_va(cmd->flags,
 			cmd->mmu_hdls,
 			cmd->num_hdl,
@@ -652,6 +667,8 @@
 	tbl.bufq[idx].fd = cmd->fd;
 	tbl.bufq[idx].flags = cmd->flags;
 	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
 	tbl.bufq[idx].kmdvaddr = 0;
 
 	if (cmd->num_hdl > 0)
@@ -699,7 +716,7 @@
 
 	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
 		for (i = 0; i < num_hdls; i++) {
-			rc = cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+			rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
 			if (rc < 0)
 				goto unmap_end;
 		}
@@ -741,8 +758,9 @@
 			region = CAM_SMMU_REGION_IO;
 	}
 
-	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE ||
-		tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+	if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+		(tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE))
 		rc = cam_mem_util_unmap_hw_va(idx, region);
 
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index 0858b8a..af7962a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -95,4 +95,9 @@
 int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr,
 	size_t *len);
 
+static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
+{
+	return CAM_MEM_MGR_IS_SECURE_HDL(buf_handle);
+}
+
 #endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 96d5b6e..681504d 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -414,7 +414,7 @@
 		}
 	}
 	if (rc < 0) {
-		CAM_ERR(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
 			dev->dev_info.p_delay, apply_req.request_id);
 		/* Apply req failed notify already applied devs */
 		for (; i >= 0; i--) {
@@ -525,7 +525,8 @@
 
 	if (trigger == CAM_TRIGGER_POINT_SOF) {
 		if (link->trigger_mask) {
-			CAM_ERR(CAM_CRM, "Applying for last EOF fails");
+			CAM_ERR_RATE_LIMIT(CAM_CRM,
+				"Applying for last EOF fails");
 			return -EINVAL;
 		}
 		rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
@@ -542,7 +543,7 @@
 				 * ready, don't expect to enter here.
 				 * @TODO: gracefully handle if recovery fails.
 				 */
-				CAM_ERR(CAM_CRM,
+				CAM_ERR_RATE_LIMIT(CAM_CRM,
 					"FATAL recovery cant finish idx %d status %d",
 					in_q->rd_idx,
 					in_q->slot[in_q->rd_idx].status);
@@ -553,7 +554,7 @@
 	}
 	if (trigger == CAM_TRIGGER_POINT_EOF &&
 			(!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
-		CAM_ERR(CAM_CRM, "Applying for last SOF fails");
+		CAM_DBG(CAM_CRM, "Applying for last SOF fails");
 		return -EINVAL;
 	}
 
@@ -902,7 +903,13 @@
 		CAM_ERR(CAM_CRM, "failed to create link, no mem");
 		return NULL;
 	}
-	in_q = &session->in_q;
+	in_q = (struct cam_req_mgr_req_queue *)
+		kzalloc(sizeof(struct cam_req_mgr_req_queue), GFP_KERNEL);
+	if (!in_q) {
+		CAM_ERR(CAM_CRM, "failed to create input queue, no mem");
+		kfree(link);
+		return NULL;
+	}
 	mutex_init(&link->lock);
 
 	mutex_lock(&link->lock);
@@ -928,7 +935,7 @@
 }
 
 /**
- * __cam_req_mgr_reserve_link()
+ * __cam_req_mgr_unreserve_link()
  *
  * @brief  : Reserves one link data struct within session
  * @session: session identifier
@@ -960,6 +967,8 @@
 		CAM_DBG(CAM_CRM, "Active session links (%d)",
 			session->num_links);
 	}
+	kfree((*link)->req.in_q);
+	(*link)->req.in_q = NULL;
 	kfree(*link);
 	*link = NULL;
 	mutex_unlock(&session->lock);
@@ -1178,7 +1187,7 @@
 		}
 	}
 	if (!tbl) {
-		CAM_ERR(CAM_CRM, "dev_hdl not found %x, %x %x",
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "dev_hdl not found %x, %x %x",
 			add_req->dev_hdl,
 			link->l_dev[0].dev_hdl,
 			link->l_dev[1].dev_hdl);
@@ -1267,8 +1276,9 @@
 	if (err_info->error == CRM_KMD_ERR_BUBBLE) {
 		idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
 		if (idx < 0) {
-			CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
-			err_info->req_id);
+			CAM_ERR_RATE_LIMIT(CAM_CRM,
+				"req_id %lld not found in input queue",
+				err_info->req_id);
 		} else {
 			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
 				err_info->req_id, idx);
@@ -1430,7 +1440,7 @@
 
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
-		CAM_ERR(CAM_CRM, "no empty task dev %x req %lld",
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task dev %x req %lld",
 			add_req->dev_hdl, add_req->req_id);
 		rc = -EBUSY;
 		goto end;
@@ -1902,10 +1912,7 @@
 	cam_destroy_device_hdl(link->link_hdl);
 	link_info->link_hdl = 0;
 link_hdl_fail:
-	mutex_lock(&link->lock);
-	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
-	mutex_unlock(&link->lock);
-
+	__cam_req_mgr_unreserve_link(cam_session, &link);
 	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 98a2a4f..e45d634 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -315,7 +315,6 @@
 	int32_t                       session_hdl;
 	uint32_t                      num_links;
 	struct cam_req_mgr_core_link *links[MAX_LINKS_PER_SESSION];
-	struct cam_req_mgr_req_queue  in_q;
 	struct list_head              entry;
 	struct mutex                  lock;
 	int32_t                       force_err_recovery;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index a9134fb..1d2169b 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -203,29 +203,29 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Hdl tbl is NULL");
 		goto device_priv_fail;
 	}
 
 	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
 	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
-		CAM_ERR(CAM_CRM, "Invalid idx");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx");
 		goto device_priv_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
-		CAM_ERR(CAM_CRM, "Invalid state");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state");
 		goto device_priv_fail;
 	}
 
 	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
 	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
-		CAM_ERR(CAM_CRM, "Invalid type");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type");
 		goto device_priv_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
-		CAM_ERR(CAM_CRM, "Invalid hdl");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid hdl");
 		goto device_priv_fail;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index e58f737..b58f5d8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -286,6 +286,7 @@
 	}
 
 	/* Fill platform device id*/
+	a_ctrl->id = a_ctrl->soc_info.index;
 	pdev->id = a_ctrl->id;
 
 	rc = cam_actuator_init_subdev(a_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index bdd3e72..1a3f947 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -14,10 +14,32 @@
 #include "cam_csiphy_core.h"
 #include "cam_csiphy_dev.h"
 #include "cam_csiphy_soc.h"
+
+#include <soc/qcom/scm.h>
 #include <cam_mem_mgr.h>
 
 static int cam_csiphy_mem_dmp_param;
 module_param(cam_csiphy_mem_dmp_param, int, 0644);
+#define SCM_SVC_CAMERASS 0x18
+#define SECURE_SYSCALL_ID 0x6
+
+static int cam_csiphy_notify_secure_mode(int phy, bool protect)
+{
+	struct scm_desc desc = {0};
+
+	desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+	desc.args[0] = protect;
+	desc.args[1] = phy;
+
+	CAM_DBG(CAM_CSIPHY, "phy : %d, protect : %d", phy, protect);
+	if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID),
+		&desc)) {
+		CAM_ERR(CAM_CSIPHY, "scm call to hypervisor failed");
+		return -EINVAL;
+	}
+
+	return 0;
+}
 
 void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
 	struct cam_csiphy_query_cap *csiphy_cap)
@@ -70,17 +92,10 @@
 		return -EINVAL;
 	}
 
-	csiphy_dev->csiphy_info =
-		kzalloc(sizeof(struct cam_csiphy_info), GFP_KERNEL);
-	if (!csiphy_dev->csiphy_info)
-		return -ENOMEM;
-
 	rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
 		(uint64_t *)&generic_ptr, &len);
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
-		kfree(csiphy_dev->csiphy_info);
-		csiphy_dev->csiphy_info = NULL;
 		return rc;
 	}
 
@@ -88,8 +103,6 @@
 		CAM_ERR(CAM_CSIPHY,
 			"offset is out of bounds: offset: %lld len: %zu",
 			cfg_dev->offset, len);
-		kfree(csiphy_dev->csiphy_info);
-		csiphy_dev->csiphy_info = NULL;
 		return -EINVAL;
 	}
 
@@ -104,8 +117,6 @@
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY,
 			"Failed to get cmd buf Mem address : %d", rc);
-		kfree(csiphy_dev->csiphy_info);
-		csiphy_dev->csiphy_info = NULL;
 		return rc;
 	}
 
@@ -113,13 +124,26 @@
 	cmd_buf += cmd_desc->offset / 4;
 	cam_cmd_csiphy_info = (struct cam_csiphy_info *)cmd_buf;
 
-	csiphy_dev->csiphy_info->lane_cnt = cam_cmd_csiphy_info->lane_cnt;
-	csiphy_dev->csiphy_info->lane_mask = cam_cmd_csiphy_info->lane_mask;
-	csiphy_dev->csiphy_info->csiphy_3phase =
+	csiphy_dev->config_count++;
+	csiphy_dev->csiphy_info.lane_cnt += cam_cmd_csiphy_info->lane_cnt;
+	csiphy_dev->csiphy_info.lane_mask |= cam_cmd_csiphy_info->lane_mask;
+	csiphy_dev->csiphy_info.csiphy_3phase =
 		cam_cmd_csiphy_info->csiphy_3phase;
-	csiphy_dev->csiphy_info->combo_mode = cam_cmd_csiphy_info->combo_mode;
-	csiphy_dev->csiphy_info->settle_time = cam_cmd_csiphy_info->settle_time;
-	csiphy_dev->csiphy_info->data_rate = cam_cmd_csiphy_info->data_rate;
+	csiphy_dev->csiphy_info.combo_mode |= cam_cmd_csiphy_info->combo_mode;
+	if (cam_cmd_csiphy_info->combo_mode == 1)
+		csiphy_dev->csiphy_info.settle_time_combo_sensor =
+			cam_cmd_csiphy_info->settle_time;
+	else
+		csiphy_dev->csiphy_info.settle_time =
+			cam_cmd_csiphy_info->settle_time;
+	csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
+	csiphy_dev->csiphy_info.secure_mode = cam_cmd_csiphy_info->secure_mode;
+
+	if (csiphy_dev->csiphy_info.secure_mode &&
+		(csiphy_dev->config_count == 1))
+		rc = cam_csiphy_notify_secure_mode(
+			csiphy_dev->soc_info.index,
+			CAM_SECURE_MODE_SECURE);
 
 	return rc;
 }
@@ -205,14 +229,8 @@
 	void __iomem *csiphybase;
 	struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
 
-	if (csiphy_dev->csiphy_info == NULL) {
-		CAM_ERR(CAM_CSIPHY, "csiphy_info is NULL, No/Fail CONFIG_DEV?");
-		return -EINVAL;
-	}
-
-	lane_cnt = csiphy_dev->csiphy_info->lane_cnt;
-	lane_mask = csiphy_dev->csiphy_info->lane_mask & 0x1f;
-	settle_cnt = (csiphy_dev->csiphy_info->settle_time / 200000000);
+	lane_cnt = csiphy_dev->csiphy_info.lane_cnt;
+	lane_mask = csiphy_dev->csiphy_info.lane_mask & 0x1f;
 	csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
 
 	if (!csiphybase) {
@@ -231,8 +249,8 @@
 		mask <<= 1;
 	}
 
-	if (!csiphy_dev->csiphy_info->csiphy_3phase) {
-		if (csiphy_dev->csiphy_info->combo_mode == 1)
+	if (!csiphy_dev->csiphy_info.csiphy_3phase) {
+		if (csiphy_dev->csiphy_info.combo_mode == 1)
 			reg_array =
 				csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg;
 		else
@@ -242,7 +260,7 @@
 		cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.
 			csiphy_2ph_config_array_size;
 	} else {
-		if (csiphy_dev->csiphy_info->combo_mode == 1)
+		if (csiphy_dev->csiphy_info.combo_mode == 1)
 			reg_array =
 				csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg;
 		else
@@ -283,6 +301,12 @@
 			continue;
 		}
 
+		settle_cnt = (csiphy_dev->csiphy_info.settle_time / 200000000);
+		if (csiphy_dev->csiphy_info.combo_mode == 1 &&
+			(lane_pos >= 3))
+			settle_cnt =
+				(csiphy_dev->csiphy_info.
+				settle_time_combo_sensor / 200000000);
 		for (i = 0; i < cfg_size; i++) {
 			switch (reg_array[lane_pos][i].csiphy_param_type) {
 			case CSIPHY_LANE_ENABLE:
@@ -353,6 +377,14 @@
 
 		csiphy_acq_params.combo_mode = 0;
 
+		if (copy_from_user(&csiphy_acq_params,
+			(void __user *)csiphy_acq_dev.info_handle,
+			sizeof(csiphy_acq_params))) {
+			CAM_ERR(CAM_CSIPHY,
+				"Failed copying from User");
+			goto release_mutex;
+		}
+
 		if (csiphy_dev->acquire_count == 2) {
 			CAM_ERR(CAM_CSIPHY,
 					"CSIPHY device do not allow more than 2 acquires");
@@ -360,6 +392,27 @@
 			goto release_mutex;
 		}
 
+		if ((csiphy_acq_params.combo_mode == 1) &&
+			(csiphy_dev->is_acquired_dev_combo_mode == 1)) {
+			CAM_ERR(CAM_CSIPHY,
+				"Multiple Combo Acq are not allowed: cm: %d, acm: %d",
+				csiphy_acq_params.combo_mode,
+				csiphy_dev->is_acquired_dev_combo_mode);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if ((csiphy_acq_params.combo_mode != 1) &&
+			(csiphy_dev->is_acquired_dev_combo_mode != 1) &&
+			(csiphy_dev->acquire_count == 1)) {
+			CAM_ERR(CAM_CSIPHY,
+				"Multiple Acquires are not allowed cm: %d acm: %d",
+				csiphy_acq_params.combo_mode,
+				csiphy_dev->is_acquired_dev_combo_mode);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
 		bridge_params.ops = NULL;
 		bridge_params.session_hdl = csiphy_acq_dev.session_handle;
 		bridge_params.v4l2_sub_dev_flag = 0;
@@ -384,7 +437,9 @@
 		}
 		if (csiphy_acq_params.combo_mode == 1)
 			csiphy_dev->is_acquired_dev_combo_mode = 1;
+
 		csiphy_dev->acquire_count++;
+		csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
 	}
 		break;
 	case CAM_QUERY_CAP: {
@@ -400,6 +455,19 @@
 	}
 		break;
 	case CAM_STOP_DEV: {
+		if (csiphy_dev->csiphy_state !=
+			CAM_CSIPHY_START) {
+			CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
+				csiphy_dev->csiphy_state);
+			goto release_mutex;
+		}
+
+		if (--csiphy_dev->start_dev_count) {
+			CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d",
+				csiphy_dev->start_dev_count);
+			goto release_mutex;
+		}
+
 		rc = cam_csiphy_disable_hw(csiphy_dev);
 		if (rc < 0) {
 			CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
@@ -411,6 +479,8 @@
 			CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
 			goto release_mutex;
 		}
+		csiphy_dev->csiphy_info.combo_mode = 0;
+		csiphy_dev->csiphy_state = CAM_CSIPHY_STOP;
 	}
 		break;
 	case CAM_RELEASE_DEV: {
@@ -441,8 +511,22 @@
 			csiphy_dev->bridge_intf.link_hdl[1] = -1;
 			csiphy_dev->bridge_intf.
 				session_hdl[1] = -1;
+			csiphy_dev->is_acquired_dev_combo_mode = 0;
 		}
+
+		csiphy_dev->config_count--;
 		csiphy_dev->acquire_count--;
+		if (csiphy_dev->acquire_count == 0)
+			csiphy_dev->csiphy_state = CAM_CSIPHY_RELEASE;
+
+		if (csiphy_dev->csiphy_info.secure_mode &&
+			(!csiphy_dev->config_count)) {
+			csiphy_dev->csiphy_info.secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+			rc = cam_csiphy_notify_secure_mode(
+				csiphy_dev->soc_info.index,
+				CAM_SECURE_MODE_NON_SECURE);
+		}
 	}
 		break;
 	case CAM_CONFIG_DEV: {
@@ -464,6 +548,11 @@
 		struct cam_ahb_vote ahb_vote;
 		struct cam_axi_vote axi_vote;
 
+		csiphy_dev->start_dev_count++;
+
+		if (csiphy_dev->csiphy_state == CAM_CSIPHY_START)
+			goto release_mutex;
+
 		ahb_vote.type = CAM_VOTE_ABSOLUTE;
 		ahb_vote.vote.level = CAM_SVS_VOTE;
 		axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
@@ -491,6 +580,7 @@
 			cam_cpas_stop(csiphy_dev->cpas_handle);
 			goto release_mutex;
 		}
+		csiphy_dev->csiphy_state = CAM_CSIPHY_START;
 	}
 		break;
 	case CAM_SD_SHUTDOWN:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
index 6e74344..a136fa7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -157,6 +157,7 @@
 		NULL;
 
 	new_csiphy_dev->acquire_count = 0;
+	new_csiphy_dev->start_dev_count = 0;
 	new_csiphy_dev->is_acquired_dev_combo_mode = 0;
 
 	cpas_parms.cam_cpas_client_cb = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 8ed5ba4..25891c5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -67,6 +67,13 @@
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
 #endif
 
+enum cam_csiphy_state {
+	CAM_CSIPHY_ACQUIRE,
+	CAM_CSIPHY_RELEASE,
+	CAM_CSIPHY_START,
+	CAM_CSIPHY_STOP,
+};
+
 /**
  * struct csiphy_reg_parms_t
  * @mipi_csiphy_glbl_irq_cmd_addr: CSIPhy irq addr
@@ -150,6 +157,32 @@
 };
 
 /**
+ * cam_csiphy_param: Provides cmdbuffer structre
+ * @lane_mask     :  Lane mask details
+ * @lane_assign   :  Lane sensor will be using
+ * @csiphy_3phase :  Mentions DPHY or CPHY
+ * @combo_mode    :  Info regarding combo_mode is enable / disable
+ * @lane_cnt      :  Total number of lanes
+ * @reserved
+ * @3phase        :  Details whether 3Phase / 2Phase operation
+ * @settle_time   :  Settling time in ms
+ * @settle_time_combo_sensor   :  Settling time in ms
+ * @data_rate     :  Data rate in mbps
+ *
+ */
+struct cam_csiphy_param {
+	uint16_t    lane_mask;
+	uint16_t    lane_assign;
+	uint8_t     csiphy_3phase;
+	uint8_t     combo_mode;
+	uint8_t     lane_cnt;
+	uint8_t     secure_mode;
+	uint64_t    settle_time;
+	uint64_t    settle_time_combo_sensor;
+	uint64_t    data_rate;
+};
+
+/**
  * struct csiphy_device
  * @pdev: Platform device
  * @irq: Interrupt structure
@@ -171,6 +204,7 @@
  * @ref_count: Reference count
  * @clk_lane: Clock lane
  * @acquire_count: Acquire device count
+ * @start_dev_count: Start count
  * @is_acquired_dev_combo_mode:
  *    Flag that mentions whether already acquired
  *   device is for combo mode
@@ -178,7 +212,7 @@
 struct csiphy_device {
 	struct mutex mutex;
 	uint32_t hw_version;
-	uint32_t csiphy_state;
+	enum cam_csiphy_state csiphy_state;
 	struct csiphy_ctrl_t *ctrl_reg;
 	uint32_t csiphy_max_clk;
 	struct msm_cam_clk_info csiphy_3p_clk_info[2];
@@ -190,14 +224,16 @@
 	uint8_t is_csiphy_3phase_hw;
 	uint8_t num_irq_registers;
 	struct cam_subdev v4l2_dev_str;
-	struct cam_csiphy_info *csiphy_info;
+	struct cam_csiphy_param csiphy_info;
 	struct intf_params bridge_intf;
 	uint32_t clk_lane;
 	uint32_t acquire_count;
+	uint32_t start_dev_count;
 	char device_name[20];
 	uint32_t is_acquired_dev_combo_mode;
 	struct cam_hw_soc_info   soc_info;
 	uint32_t cpas_handle;
+	uint32_t config_count;
 };
 
 #endif /* _CAM_CSIPHY_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 02b2c51..6eab59a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -243,7 +243,8 @@
 	struct camera_io_master *client = &e_ctrl->io_master_info;
 	uint8_t                  id[2];
 
-	rc = cam_spi_query_id(client, 0, &id[0], 2);
+	rc = cam_spi_query_id(client, 0, CAMERA_SENSOR_I2C_TYPE_WORD,
+		&id[0], 2);
 	if (rc)
 		return rc;
 	CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x",
@@ -310,6 +311,8 @@
 data_mem_free:
 	kfree(e_ctrl->cal_data.mapdata);
 	kfree(e_ctrl->cal_data.map);
+	e_ctrl->cal_data.num_data = 0;
+	e_ctrl->cal_data.num_map = 0;
 	return rc;
 }
 
@@ -483,7 +486,7 @@
 	uint16_t                        cmd_length_in_bytes = 0;
 	struct cam_cmd_i2c_info        *i2c_info = NULL;
 	int                             num_map = -1;
-	struct cam_eeprom_memory_map_t *map;
+	struct cam_eeprom_memory_map_t *map = NULL;
 	struct cam_eeprom_soc_private  *soc_private =
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
 	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
@@ -664,53 +667,60 @@
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
 	case CAM_EEPROM_PACKET_OPCODE_INIT:
 		if (e_ctrl->userspace_probe == false) {
-			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
-			CAM_ERR(CAM_EEPROM,
-				"Eeprom already probed at kernel boot");
-			rc = -EINVAL;
-		break;
-		}
-		if (e_ctrl->cal_data.num_data == 0) {
-			rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
-			if (rc) {
-				CAM_ERR(CAM_EEPROM,
-					"Failed in parsing the pkt");
+			rc = cam_eeprom_parse_read_memory_map(
+					e_ctrl->pdev->dev.of_node, e_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_EEPROM, "Failed: rc : %d", rc);
 				return rc;
 			}
-
-			e_ctrl->cal_data.mapdata =
-				kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
-			if (!e_ctrl->cal_data.mapdata) {
-				rc = -ENOMEM;
-				CAM_ERR(CAM_EEPROM, "failed");
-				goto error;
-			}
-
-			rc = cam_eeprom_power_up(e_ctrl,
-				&soc_private->power_info);
-			if (rc) {
-				CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
-				goto memdata_free;
-			}
-
-			rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
-			if (rc) {
-				CAM_ERR(CAM_EEPROM,
-					"read_eeprom_memory failed");
-				goto power_down;
-			}
-
 			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
-			rc = cam_eeprom_power_down(e_ctrl);
-		} else {
-			CAM_DBG(CAM_EEPROM, "Already read eeprom");
+			kfree(e_ctrl->cal_data.mapdata);
+			kfree(e_ctrl->cal_data.map);
+			e_ctrl->cal_data.num_data = 0;
+			e_ctrl->cal_data.num_map = 0;
+			CAM_DBG(CAM_EEPROM,
+				"Returning the data using kernel probe");
+		break;
 		}
+		rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM,
+				"Failed in parsing the pkt");
+			return rc;
+		}
+
+		e_ctrl->cal_data.mapdata =
+			kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+		if (!e_ctrl->cal_data.mapdata) {
+			rc = -ENOMEM;
+			CAM_ERR(CAM_EEPROM, "failed");
+			goto error;
+		}
+
+		rc = cam_eeprom_power_up(e_ctrl,
+			&soc_private->power_info);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+			goto memdata_free;
+		}
+
+		rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM,
+				"read_eeprom_memory failed");
+			goto power_down;
+		}
+
+		rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+		rc = cam_eeprom_power_down(e_ctrl);
+		kfree(e_ctrl->cal_data.mapdata);
+		kfree(e_ctrl->cal_data.map);
+		e_ctrl->cal_data.num_data = 0;
+		e_ctrl->cal_data.num_map = 0;
 		break;
 	default:
 		break;
 	}
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
 	return rc;
 power_down:
 	rc = cam_eeprom_power_down(e_ctrl);
@@ -718,6 +728,8 @@
 	kfree(e_ctrl->cal_data.mapdata);
 error:
 	kfree(e_ctrl->cal_data.map);
+	e_ctrl->cal_data.num_data = 0;
+	e_ctrl->cal_data.num_map = 0;
 	return rc;
 }
 
@@ -764,6 +776,23 @@
 			goto release_mutex;
 		}
 		break;
+	case CAM_RELEASE_DEV:
+		if (e_ctrl->bridge_intf.device_hdl == -1) {
+			CAM_ERR(CAM_EEPROM,
+				"Invalid Handles: link hdl: %d device hdl: %d",
+				e_ctrl->bridge_intf.device_hdl,
+				e_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_EEPROM,
+				"failed in destroying the device hdl");
+		e_ctrl->bridge_intf.device_hdl = -1;
+		e_ctrl->bridge_intf.link_hdl = -1;
+		e_ctrl->bridge_intf.session_hdl = -1;
+		break;
 	case CAM_CONFIG_DEV:
 		rc = cam_eeprom_pkt_parse(e_ctrl, arg);
 		if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index 9bbc6df..88d7665 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -152,6 +152,12 @@
 		goto probe_failure;
 	}
 
+	soc_private = kzalloc(sizeof(*soc_private), GFP_KERNEL);
+	if (!soc_private)
+		goto ectrl_free;
+
+	e_ctrl->soc_info.soc_private = soc_private;
+
 	i2c_set_clientdata(client, e_ctrl);
 
 	mutex_init(&(e_ctrl->eeprom_mutex));
@@ -175,15 +181,6 @@
 		goto free_soc;
 	}
 
-	if (e_ctrl->userspace_probe == false) {
-		rc = cam_eeprom_parse_read_memory_map(soc_info->dev->of_node,
-			e_ctrl);
-		if (rc) {
-			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
-			goto free_soc;
-		}
-	}
-
 	soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
 	if (!soc_private) {
 		CAM_ERR(CAM_EEPROM, "board info NULL");
@@ -242,8 +239,6 @@
 		return -EINVAL;
 	}
 
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
 	if (soc_private) {
 		kfree(soc_private->power_info.gpio_num_info);
 		kfree(soc_private);
@@ -307,13 +302,10 @@
 		goto board_free;
 	}
 
-	if (e_ctrl->userspace_probe == false) {
-		rc = cam_eeprom_parse_read_memory_map(soc_info->dev->of_node,
-			e_ctrl);
-		if (rc) {
-			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
-			goto board_free;
-		}
+	rc = cam_eeprom_spi_parse_of(spi_client);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "Device tree parsing error");
+		goto board_free;
 	}
 
 	rc = cam_eeprom_init_subdev(e_ctrl);
@@ -369,8 +361,6 @@
 	}
 
 	kfree(e_ctrl->io_master_info.spi_client);
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
 	soc_private =
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
 	if (soc_private) {
@@ -428,15 +418,6 @@
 		goto free_soc;
 	}
 
-	if (e_ctrl->userspace_probe == false) {
-		rc = cam_eeprom_parse_read_memory_map(pdev->dev.of_node,
-			e_ctrl);
-		if (rc) {
-			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
-			goto free_soc;
-		}
-	}
-
 	rc = cam_eeprom_init_subdev(e_ctrl);
 	if (rc)
 		goto free_soc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
index 5e12f4a..70c40fd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -20,6 +20,96 @@
 #include "cam_eeprom_soc.h"
 #include "cam_debug_util.h"
 
+#define cam_eeprom_spi_parse_cmd(spi_dev, name, out)          \
+	{                                                     \
+		spi_dev->cmd_tbl.name.opcode = out[0];        \
+		spi_dev->cmd_tbl.name.addr_len = out[1];      \
+		spi_dev->cmd_tbl.name.dummy_len = out[2];     \
+		spi_dev->cmd_tbl.name.delay_intv = out[3];    \
+		spi_dev->cmd_tbl.name.delay_count = out[4];   \
+	}
+
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *spi_dev)
+{
+	int rc = -EFAULT;
+	uint32_t tmp[5];
+
+	rc  = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-read", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get read data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-readseq", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read_seq, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get readseq data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-queryid", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, query_id, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get queryid data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-pprog", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, page_program, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get page program data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-wenable", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, write_enable, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get write enable data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-readst", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read_status, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get readdst data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-erase", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, erase, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get erase data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"eeprom-id", tmp, 2);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "Failed to get eeprom id");
+		return rc;
+	}
+
+	spi_dev->mfr_id0 = tmp[0];
+	spi_dev->device_id0 = tmp[1];
+
+	return 0;
+}
+
 /*
  * cam_eeprom_parse_memory_map() - parse memory map in device node
  * @of:         device node
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
index 08c436c..d311549 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
@@ -14,6 +14,8 @@
 
 #include "cam_eeprom_dev.h"
 
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *client);
+
 int cam_eeprom_parse_dt_memory_map(struct device_node *of,
 	struct cam_eeprom_memory_block_t *data);
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index b84ce70..f455715 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -334,7 +334,8 @@
 		flash_data = &fctrl->per_frame[frame_offset];
 
 		if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) &&
-			(flash_data->cmn_attr.is_settings_valid)) {
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
 			/* Turn On Flash */
 			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
 				rc = cam_flash_high(fctrl, flash_data);
@@ -348,7 +349,8 @@
 			}
 		} else if ((flash_data->opcode ==
 			CAMERA_SENSOR_FLASH_OP_FIRELOW) &&
-			(flash_data->cmn_attr.is_settings_valid)) {
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
 			/* Turn On Torch */
 			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
 				rc = cam_flash_low(fctrl, flash_data);
@@ -361,7 +363,8 @@
 				fctrl->flash_state = CAM_FLASH_STATE_LOW;
 			}
 		} else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
-			(flash_data->cmn_attr.is_settings_valid)) {
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
 			if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) ||
 				(fctrl->flash_state != CAM_FLASH_STATE_INIT)) {
 				rc = cam_flash_off(fctrl);
@@ -374,8 +377,7 @@
 				}
 			}
 		} else {
-			CAM_DBG(CAM_FLASH, "NOP opcode");
-			return rc;
+			CAM_DBG(CAM_FLASH, "NOP opcode: req_id: %u", req_id);
 		}
 	}
 
@@ -468,6 +470,19 @@
 			csl_packet->cmd_buf_offset);
 		frame_offset = csl_packet->header.request_id %
 			MAX_PER_FRAME_ARRAY;
+		if (fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid
+			== true) {
+			fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+			fctrl->per_frame[frame_offset].
+				cmn_attr.is_settings_valid = false;
+			for (i = 0;
+			i < fctrl->per_frame[frame_offset].cmn_attr.count;
+			i++) {
+				fctrl->per_frame[frame_offset].
+					led_current_ma[i] = 0;
+			}
+		}
+
 		fctrl->per_frame[frame_offset].cmn_attr.request_id =
 			csl_packet->header.request_id;
 		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
@@ -477,6 +492,10 @@
 			(uint64_t *)&generic_ptr, &len_of_buffer);
 		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
 			cmd_desc->offset);
+
+		if (!cmd_buf)
+			return -EINVAL;
+
 		cmn_hdr = (struct common_header *)cmd_buf;
 
 		switch (cmn_hdr->cmd_type) {
@@ -485,6 +504,11 @@
 				"CAMERA_FLASH_CMD_TYPE_OPS case called");
 			flash_operation_info =
 				(struct cam_flash_set_on_off *) cmd_buf;
+			if (!flash_operation_info) {
+				CAM_ERR(CAM_FLASH, "flash_operation_info Null");
+				return -EINVAL;
+			}
+
 			fctrl->per_frame[frame_offset].opcode =
 				flash_operation_info->opcode;
 			fctrl->per_frame[frame_offset].cmn_attr.count =
@@ -500,7 +524,6 @@
 				cmn_hdr->cmd_type);
 			return -EINVAL;
 		}
-
 		break;
 	}
 	case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
@@ -594,6 +617,8 @@
 		break;
 	}
 	case CAM_PKT_NOP_OPCODE: {
+		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+			csl_packet->header.request_id);
 		goto update_req_mgr;
 	}
 	default:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index cd9f83f..f0c1bca 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -813,29 +813,27 @@
 					return rc;
 				}
 			}
-			del_req_id = (req_id +
-				MAX_PER_FRAME_ARRAY -
-				MAX_SYSTEM_PIPELINE_DELAY) %
-				MAX_PER_FRAME_ARRAY;
-			CAM_DBG(CAM_SENSOR, "Deleting the Request: %d",
-				del_req_id);
-			if (req_id >
-				s_ctrl->i2c_data.per_frame[del_req_id].
-				request_id) {
-				s_ctrl->i2c_data.per_frame[del_req_id].
-					request_id = 0;
-				rc = delete_request(
-					&(s_ctrl->i2c_data.
-					per_frame[del_req_id]));
-				if (rc < 0)
-					CAM_ERR(CAM_SENSOR,
-						"Delete request Fail:%d rc:%d",
-						del_req_id, rc);
-			}
 		} else {
 			CAM_DBG(CAM_SENSOR,
 				"Invalid/NOP request to apply: %lld", req_id);
 		}
+
+		del_req_id = (req_id + MAX_PER_FRAME_ARRAY -
+			MAX_SYSTEM_PIPELINE_DELAY) % MAX_PER_FRAME_ARRAY;
+		CAM_DBG(CAM_SENSOR, "Deleting the Request: %d", del_req_id);
+
+		if ((req_id >
+			 s_ctrl->i2c_data.per_frame[del_req_id].request_id) &&
+			(s_ctrl->i2c_data.per_frame[del_req_id].
+				is_settings_valid == 1)) {
+			s_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
+			rc = delete_request(
+				&(s_ctrl->i2c_data.per_frame[del_req_id]));
+			if (rc < 0)
+				CAM_ERR(CAM_SENSOR,
+					"Delete request Fail:%d rc:%d",
+					del_req_id, rc);
+		}
 	}
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 3257703..2c1f520 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -38,7 +38,7 @@
 	rc = v4l2_subdev_call(cci_client->cci_subdev,
 		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
 	if (rc < 0) {
-		CAM_ERR(CAM_SENSOR, "line %d rc = %d", rc);
+		CAM_ERR(CAM_SENSOR, "rc = %d", rc);
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index c9cf088..6e169cf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -58,7 +58,7 @@
 			addr, data, addr_type, data_type);
 	} else if (io_master_info->master_type == SPI_MASTER) {
 		return cam_spi_read(io_master_info,
-			addr, data, addr_type);
+			addr, data, addr_type, data_type);
 	} else {
 		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
 			io_master_info->master_type);
@@ -78,8 +78,8 @@
 		return cam_qup_i2c_read_seq(io_master_info->client,
 			addr, data, addr_type, num_bytes);
 	} else if (io_master_info->master_type == SPI_MASTER) {
-		return cam_spi_read(io_master_info,
-			addr, (uint32_t *)data, addr_type);
+		return cam_spi_read_seq(io_master_info,
+			addr, data, addr_type, num_bytes);
 	} else {
 		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
 			io_master_info->master_type);
@@ -153,6 +153,9 @@
 			cam_cci_get_subdev();
 		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
 			MSM_CCI_INIT);
+	} else if ((io_master_info->master_type == I2C_MASTER) ||
+			(io_master_info->master_type == SPI_MASTER)) {
+		return 0;
 	} else {
 		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
 			io_master_info->master_type);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
index 4011aa0..131b0ae 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
@@ -59,25 +59,27 @@
  * instruction and the data length.
  */
 static void cam_set_addr(uint32_t addr, uint8_t addr_len,
-	enum camera_sensor_i2c_type type,
+	enum camera_sensor_i2c_type addr_type,
 	char *str)
 {
-	int i, len;
-
 	if (!addr_len)
 		return;
 
-	if (addr_len < type)
-		CAM_DBG(CAM_EEPROM, "omitting higher bits in address");
-
-	/* only support transfer MSB first for now */
-	len = addr_len - type;
-	for (i = len; i < addr_len; i++) {
-		if (i >= 0)
-			str[i] = (addr >> (BITS_PER_BYTE * (addr_len - i - 1)))
-				& 0xFF;
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		str[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		str[0] = addr >> 8;
+		str[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		str[0] = addr >> 16;
+		str[1] = addr >> 8;
+		str[2] = addr;
+	} else {
+		str[0] = addr >> 24;
+		str[1] = addr >> 16;
+		str[2] = addr >> 8;
+		str[3] = addr;
 	}
-
 }
 
 /**
@@ -105,6 +107,7 @@
  */
 static int32_t cam_spi_tx_helper(struct camera_io_master *client,
 	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	uint32_t num_byte, char *tx, char *rx)
 {
 	int32_t rc = -EINVAL;
@@ -112,10 +115,6 @@
 	char *ctx = NULL, *crx = NULL;
 	uint32_t len, hlen;
 	uint8_t retries = client->spi_client->retries;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
-
-	if (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
-		return rc;
 
 	hlen = cam_camera_spi_get_hlen(inst);
 	len = hlen + num_byte;
@@ -166,6 +165,7 @@
 
 static int32_t cam_spi_tx_read(struct camera_io_master *client,
 	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	uint32_t num_byte, char *tx, char *rx)
 {
 	int32_t rc = -EINVAL;
@@ -173,12 +173,6 @@
 	char *ctx = NULL, *crx = NULL;
 	uint32_t hlen;
 	uint8_t retries = client->spi_client->retries;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
-
-	if ((addr_type != CAMERA_SENSOR_I2C_TYPE_WORD)
-		&& (addr_type != CAMERA_SENSOR_I2C_TYPE_BYTE)
-		&& (addr_type != CAMERA_SENSOR_I2C_TYPE_3B))
-		return rc;
 
 	hlen = cam_camera_spi_get_hlen(inst);
 	if (tx) {
@@ -204,14 +198,8 @@
 	}
 
 	ctx[0] = inst->opcode;
-	if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
-		cam_set_addr(addr, inst->addr_len, addr_type,
-			ctx + 1);
-	} else {
-		ctx[1] = (addr >> BITS_PER_BYTE) & 0xFF;
-		ctx[2] = (addr & 0xFF);
-		ctx[3] = 0;
-	}
+	cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+
 	CAM_DBG(CAM_EEPROM, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0],
 		ctx[1], ctx[2],	ctx[3]);
 	while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
@@ -235,18 +223,23 @@
 
 int cam_spi_read(struct camera_io_master *client,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type)
 {
 	int rc = -EINVAL;
 	uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX];
 
-	if ((data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
-		|| (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verification");
 		return rc;
+	}
 
 	rc = cam_spi_tx_read(client,
 		&client->spi_client->cmd_tbl.read, addr, &temp[0],
-		data_type, NULL, NULL);
+		addr_type, data_type, NULL, NULL);
 	if (rc < 0) {
 		CAM_ERR(CAM_SENSOR, "failed %d", rc);
 		return rc;
@@ -254,23 +247,50 @@
 
 	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
 		*data = temp[0];
-	else
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
 		*data = (temp[0] << BITS_PER_BYTE) | temp[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = (temp[0] << 16 | temp[1] << 8 | temp[2]);
+	else
+		*data = (temp[0] << 24 | temp[1] << 16 | temp[2] << 8 |
+			temp[3]);
 
 	CAM_DBG(CAM_SENSOR, "addr 0x%x, data %u", addr, *data);
 	return rc;
 }
 
+int32_t cam_spi_read_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+{
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
+		return -EINVAL;
+	}
+
+	if (num_bytes == 0) {
+		CAM_ERR(CAM_SENSOR, "num_byte: 0x%x", num_bytes);
+		return -EINVAL;
+	}
+
+	return cam_spi_tx_helper(client,
+		&client->spi_client->cmd_tbl.read_seq, addr, data,
+		addr_type, num_bytes, NULL, NULL);
+}
+
 int cam_spi_query_id(struct camera_io_master *client,
-	uint32_t addr, uint8_t *data, uint32_t num_byte)
+	uint32_t addr, enum camera_sensor_i2c_type addr_type,
+	uint8_t *data, uint32_t num_byte)
 {
 	return cam_spi_tx_helper(client,
-		&client->spi_client->cmd_tbl.query_id, addr, data, num_byte,
-		NULL, NULL);
+		&client->spi_client->cmd_tbl.query_id,
+		addr, data, addr_type, num_byte, NULL, NULL);
 }
 
 static int32_t cam_spi_read_status_reg(
-	struct camera_io_master *client, uint8_t *status)
+	struct camera_io_master *client, uint8_t *status,
+	enum camera_sensor_i2c_type addr_type)
 {
 	struct cam_camera_spi_inst *rs =
 		&client->spi_client->cmd_tbl.read_status;
@@ -279,16 +299,17 @@
 		CAM_ERR(CAM_SENSOR, "not implemented yet");
 		return -ENXIO;
 	}
-	return cam_spi_tx_helper(client, rs, 0, status, 1, NULL, NULL);
+	return cam_spi_tx_helper(client, rs, 0, status,
+		addr_type, 1, NULL, NULL);
 }
 
 static int32_t cam_spi_device_busy(struct camera_io_master *client,
-	uint8_t *busy)
+	uint8_t *busy, enum camera_sensor_i2c_type addr_type)
 {
 	int rc;
 	uint8_t st = 0;
 
-	rc = cam_spi_read_status_reg(client,  &st);
+	rc = cam_spi_read_status_reg(client, &st, addr_type);
 	if (rc < 0) {
 		CAM_ERR(CAM_SENSOR, "failed to read status reg");
 		return rc;
@@ -298,14 +319,15 @@
 }
 
 static int32_t cam_spi_wait(struct camera_io_master *client,
-	struct cam_camera_spi_inst *inst)
+	struct cam_camera_spi_inst *inst,
+	enum camera_sensor_i2c_type addr_type)
 {
 	uint8_t busy;
 	int i, rc;
 
 	CAM_DBG(CAM_SENSOR, "op 0x%x wait start", inst->opcode);
 	for (i = 0; i < inst->delay_count; i++) {
-		rc = cam_spi_device_busy(client, &busy);
+		rc = cam_spi_device_busy(client, &busy, addr_type);
 		if (rc < 0)
 			return rc;
 		if (!busy)
@@ -321,8 +343,8 @@
 	return 0;
 }
 
-static int32_t cam_spi_write_enable(
-	struct camera_io_master *client)
+static int32_t cam_spi_write_enable(struct camera_io_master *client,
+	enum camera_sensor_i2c_type addr_type)
 {
 	struct cam_camera_spi_inst *we =
 		&client->spi_client->cmd_tbl.write_enable;
@@ -334,7 +356,8 @@
 		CAM_ERR(CAM_SENSOR, "not implemented yet");
 		return -EINVAL;
 	}
-	rc = cam_spi_tx_helper(client, we, 0, NULL, 0, NULL, NULL);
+	rc = cam_spi_tx_helper(client, we, 0, NULL, addr_type,
+		0, NULL, NULL);
 	if (rc < 0)
 		CAM_ERR(CAM_SENSOR, "write enable failed");
 	return rc;
@@ -354,7 +377,9 @@
  * used outside cam_spi_write_seq().
  */
 static int32_t cam_spi_page_program(struct camera_io_master *client,
-	uint32_t addr, uint8_t *data, uint16_t len, uint8_t *tx)
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint16_t len, uint8_t *tx)
 {
 	int rc;
 	struct cam_camera_spi_inst *pg =
@@ -362,10 +387,9 @@
 	struct spi_device *spi = client->spi_client->spi_master;
 	uint8_t retries = client->spi_client->retries;
 	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
 
 	CAM_DBG(CAM_SENSOR, "addr 0x%x, size 0x%x", addr, len);
-	rc = cam_spi_write_enable(client);
+	rc = cam_spi_write_enable(client, addr_type);
 	if (rc < 0)
 		return rc;
 	memset(tx, 0, header_len);
@@ -375,7 +399,7 @@
 	CAM_DBG(CAM_SENSOR, "tx(%u): %02x %02x %02x %02x",
 		len, tx[0], tx[1], tx[2], tx[3]);
 	while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
-		rc = cam_spi_wait(client, pg);
+		rc = cam_spi_wait(client, pg, addr_type);
 		msleep(client->spi_client->retry_delay);
 		retries--;
 	}
@@ -383,41 +407,54 @@
 		CAM_ERR(CAM_SENSOR, "failed %d", rc);
 		return rc;
 	}
-	rc = cam_spi_wait(client, pg);
+	rc = cam_spi_wait(client, pg, addr_type);
 		return rc;
 }
 
 int cam_spi_write(struct camera_io_master *client,
-	uint32_t addr, uint16_t data,
+	uint32_t addr, uint32_t data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type)
 {
 	struct cam_camera_spi_inst *pg =
 		&client->spi_client->cmd_tbl.page_program;
 	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
 	uint16_t len = 0;
-	char buf[2];
+	char buf[CAMERA_SENSOR_I2C_TYPE_MAX];
 	char *tx;
 	int rc = -EINVAL;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
 
-	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
-		|| (data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
-		&& data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (data_type != CAMERA_SENSOR_I2C_TYPE_MAX))
 		return rc;
+
 	CAM_DBG(CAM_EEPROM, "Data: 0x%x", data);
 	len = header_len + (uint8_t)data_type;
 	tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
 	if (!tx)
 		goto NOMEM;
+
 	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
 		buf[0] = data;
 		CAM_DBG(CAM_EEPROM, "Byte %d: 0x%x", len, buf[0]);
 	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
 		buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
 		buf[1] = (data & 0x00FF);
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = (data >> 16) & 0x00FF;
+		buf[1] = (data >> 8) & 0x00FF;
+		buf[2] = (data & 0x00FF);
+	} else {
+		buf[0] = (data >> 24) & 0x00FF;
+		buf[1] = (data >> 16) & 0x00FF;
+		buf[2] = (data >> 8) & 0x00FF;
+		buf[3] = (data & 0x00FF);
 	}
+
 	rc = cam_spi_page_program(client, addr, buf,
-		(uint16_t)data_type, tx);
+		addr_type, data_type, tx);
 	if (rc < 0)
 		goto ERROR;
 	goto OUT;
@@ -442,18 +479,22 @@
 
 	if (!client || !write_setting)
 		return rc;
-	if (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
-		|| (write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
-		&& write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+
+	if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
 		return rc;
+
 	reg_setting = write_setting->reg_setting;
-	client_addr_type = addr_type;
+	client_addr_type = write_setting->addr_type;
 	addr_type = write_setting->addr_type;
 	for (i = 0; i < write_setting->size; i++) {
 		CAM_DBG(CAM_SENSOR, "addr %x data %x",
 			reg_setting->reg_addr, reg_setting->reg_data);
-		rc = cam_spi_write(client, reg_setting->reg_addr,
-			reg_setting->reg_data, write_setting->data_type);
+		rc = cam_spi_write(client,
+			reg_setting->reg_addr, reg_setting->reg_data,
+			write_setting->addr_type, write_setting->data_type);
 		if (rc < 0)
 			break;
 		reg_setting++;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
index a497491..ec1bede 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
@@ -78,13 +78,22 @@
 
 int cam_spi_read(struct camera_io_master *client,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type);
 
+int cam_spi_read_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	int32_t num_bytes);
+
 int cam_spi_query_id(struct camera_io_master *client,
-	uint32_t addr, uint8_t *data, uint32_t num_byte);
+	uint32_t addr,
+	enum camera_sensor_i2c_type addr_type,
+	uint8_t *data, uint32_t num_byte);
 
 int cam_spi_write(struct camera_io_master *client,
-	uint32_t addr, uint16_t data,
+	uint32_t addr, uint32_t data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type);
 
 int cam_spi_write_table(struct camera_io_master *client,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 33cbcb6..bcdaf6d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -217,9 +217,19 @@
 		sizeof(cam_cmd_i2c_continuous_wr->reg_addr) +
 		sizeof(struct cam_cmd_read) *
 		(cam_cmd_i2c_continuous_wr->header.count));
-	i2c_list->op_code = cam_cmd_i2c_continuous_wr->header.op_code;
+	if (cam_cmd_i2c_continuous_wr->header.op_code ==
+		CAMERA_SENSOR_I2C_OP_CONT_WR_BRST)
+		i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST;
+	else if (cam_cmd_i2c_continuous_wr->header.op_code ==
+		CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN)
+		i2c_list->op_code = CAM_SENSOR_I2C_WRITE_SEQ;
+	else
+		return -EINVAL;
+
 	i2c_list->i2c_settings.addr_type =
 		cam_cmd_i2c_continuous_wr->header.addr_type;
+	i2c_list->i2c_settings.data_type =
+		cam_cmd_i2c_continuous_wr->header.data_type;
 
 	for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
 		cnt++) {
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index ff7a0e5..cbf54f7 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -21,7 +21,8 @@
 #include <linux/msm_dma_iommu_mapping.h>
 #include <linux/workqueue.h>
 #include <linux/genalloc.h>
-
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
 #include "cam_smmu_api.h"
 #include "cam_debug_util.h"
 
@@ -118,6 +119,7 @@
 		int, void*);
 	void *token[CAM_SMMU_CB_MAX];
 	int cb_count;
+	int secure_count;
 };
 
 struct cam_iommu_cb_set {
@@ -151,6 +153,17 @@
 	size_t phys_len;
 };
 
+struct cam_sec_buff_info {
+	struct ion_handle *i_hdl;
+	struct ion_client *i_client;
+	enum dma_data_direction dir;
+	int ref_count;
+	dma_addr_t paddr;
+	struct list_head list;
+	int ion_fd;
+	size_t len;
+};
+
 static struct cam_iommu_cb_set iommu_cb_set;
 
 static enum dma_data_direction cam_smmu_translate_dir(
@@ -166,6 +179,9 @@
 static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
 	int ion_fd);
 
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+	int ion_fd);
+
 static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
 	dma_addr_t base, size_t size,
 	int order);
@@ -544,7 +560,15 @@
 					"Error: %s already got handle 0x%x",
 					name,
 					iommu_cb_set.cb_info[i].handle);
+
+				if (iommu_cb_set.cb_info[i].is_secure)
+					iommu_cb_set.cb_info[i].secure_count++;
+
 				mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+				if (iommu_cb_set.cb_info[i].is_secure) {
+					*hdl = iommu_cb_set.cb_info[i].handle;
+					return 0;
+				}
 				return -EINVAL;
 			}
 
@@ -556,6 +580,8 @@
 			/* put handle in the table */
 			iommu_cb_set.cb_info[i].handle = handle;
 			iommu_cb_set.cb_info[i].cb_count = 0;
+			if (iommu_cb_set.cb_info[i].is_secure)
+				iommu_cb_set.cb_info[i].secure_count++;
 			*hdl = handle;
 			CAM_DBG(CAM_SMMU, "%s creates handle 0x%x",
 				name, handle);
@@ -693,6 +719,24 @@
 
 	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
 		ion_fd, idx);
+
+	return NULL;
+}
+
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+	int ion_fd)
+{
+	struct cam_sec_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+		list) {
+		if (mapping->ion_fd == ion_fd) {
+			CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
+			return mapping;
+		}
+	}
+	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
+		ion_fd, idx);
 	return NULL;
 }
 
@@ -1339,6 +1383,25 @@
 	return CAM_SMMU_BUFF_NOT_EXIST;
 }
 
+static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
+					int ion_fd, dma_addr_t *paddr_ptr,
+					size_t *len_ptr)
+{
+	struct cam_sec_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->ion_fd == ion_fd) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
 int cam_smmu_get_handle(char *identifier, int *handle_ptr)
 {
 	int ret = 0;
@@ -1722,14 +1785,200 @@
 	return rc;
 }
 
-int cam_smmu_map_sec_iova(int handle, int ion_fd,
-	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
-	size_t *len_ptr)
+static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
+		 enum dma_data_direction dma_dir, struct ion_client *client,
+		 dma_addr_t *paddr_ptr,
+		 size_t *len_ptr)
 {
-	/* not implemented yet */
-	return -EPERM;
+	int rc = 0;
+	struct ion_handle *i_handle = NULL;
+	struct cam_sec_buff_info *mapping_info;
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	i_handle = ion_import_dma_buf_fd(client, ion_fd);
+	if (IS_ERR_OR_NULL((void *)(i_handle))) {
+		CAM_ERR(CAM_SMMU, "ion import dma buffer failed");
+		return -EINVAL;
+	}
+
+	/* return addr and len to client */
+	rc = ion_phys(client, i_handle, paddr_ptr, len_ptr);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "ION Get Physical failed, rc: %d",
+			rc);
+		return rc;
+	}
+
+	/* fill up mapping_info */
+	mapping_info = kzalloc(sizeof(struct cam_sec_buff_info), GFP_KERNEL);
+	if (!mapping_info)
+		return -ENOMEM;
+
+	mapping_info->ion_fd = ion_fd;
+	mapping_info->paddr = *paddr_ptr;
+	mapping_info->len = *len_ptr;
+	mapping_info->dir = dma_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->i_hdl = i_handle;
+	mapping_info->i_client = client;
+
+	CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
+			ion_fd,
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+	/* add to the list */
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	return rc;
 }
-EXPORT_SYMBOL(cam_smmu_map_sec_iova);
+
+int cam_smmu_map_stage2_iova(int handle,
+		int ion_fd, enum cam_smmu_map_dir dir,
+		struct ion_client *client, ion_phys_addr_t *paddr_ptr,
+		size_t *len_ptr)
+{
+	int idx, rc;
+	enum dma_data_direction dma_dir;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid inputs, paddr_ptr:%pK, len_ptr: %pK",
+			paddr_ptr, len_ptr);
+		return -EINVAL;
+	}
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	if (dma_dir == DMA_NONE) {
+		CAM_ERR(CAM_SMMU,
+			"Error: translate direction failed. dir = %d", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if ((handle == HANDLE_INIT) ||
+		(idx < 0) ||
+		(idx >= iommu_cb_set.cb_num)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map secure mem to non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, paddr_ptr,
+			len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CAM_DBG(CAM_SMMU, "fd:%d already in list, give same addr back",
+			ion_fd);
+		rc = 0;
+		goto get_addr_end;
+	}
+	rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+			client, paddr_ptr, len_ptr);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_stage2_iova);
+
+static int cam_smmu_secure_unmap_buf_and_remove_from_list(
+		struct cam_sec_buff_info *mapping_info,
+		int idx)
+{
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU, "Error: List doesn't exist");
+		return -EINVAL;
+	}
+	ion_free(mapping_info->i_client, mapping_info->i_hdl);
+	list_del_init(&mapping_info->list);
+
+	CAM_DBG(CAM_SMMU, "unmap fd: %d, idx : %d", mapping_info->ion_fd, idx);
+
+	/* free one buffer */
+	kfree(mapping_info);
+	return 0;
+}
+
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd)
+{
+	int idx, rc;
+	struct cam_sec_buff_info *mapping_info;
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if ((handle == HANDLE_INIT) ||
+		(idx < 0) ||
+		(idx >= iommu_cb_set.cb_num)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap secure mem from non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_sec_buf_idx(idx, ion_fd);
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params! idx = %d, fd = %d",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* unmapping one buffer from device */
+	rc = cam_smmu_secure_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+		goto put_addr_end;
+	}
+
+put_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_stage2_iova);
 
 int cam_smmu_map_iova(int handle, int ion_fd,
 	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
@@ -1767,6 +2016,12 @@
 		return -EINVAL;
 	}
 
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map non-secure mem to secure cb");
+		return -EINVAL;
+	}
+
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
@@ -1786,9 +2041,7 @@
 	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
 		len_ptr);
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
-		CAM_ERR(CAM_SMMU,
-			"ion_fd:%d already in the list, give same addr back",
-			 ion_fd);
+		CAM_ERR(CAM_SMMU, "ion_fd:%d already in the list", ion_fd);
 		rc = -EALREADY;
 		goto get_addr_end;
 	}
@@ -1832,6 +2085,12 @@
 		return -EINVAL;
 	}
 
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't get non-secure mem from secure cb");
+		return -EINVAL;
+	}
+
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU,
@@ -1854,12 +2113,65 @@
 }
 EXPORT_SYMBOL(cam_smmu_get_iova);
 
-int cam_smmu_unmap_sec_iova(int handle, int ion_fd)
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr)
 {
-	/* not implemented yet */
-	return -EPERM;
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't get secure mem from non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_secure_fd_in_list(idx,
+		ion_fd,
+		paddr_ptr,
+		len_ptr);
+
+	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
 }
-EXPORT_SYMBOL(cam_smmu_unmap_sec_iova);
+EXPORT_SYMBOL(cam_smmu_get_stage2_iova);
 
 int cam_smmu_unmap_iova(int handle,
 	int ion_fd,
@@ -1882,6 +2194,12 @@
 		return -EINVAL;
 	}
 
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap non-secure mem from secure cb");
+		return -EINVAL;
+	}
+
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU,
@@ -1989,6 +2307,22 @@
 		cam_smmu_clean_buffer_list(idx);
 	}
 
+	if (&iommu_cb_set.cb_info[idx].is_secure) {
+		if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -EPERM;
+		}
+
+		iommu_cb_set.cb_info[idx].secure_count--;
+		if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+			iommu_cb_set.cb_info[idx].cb_count = 0;
+			iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+		}
+
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return 0;
+	}
+
 	iommu_cb_set.cb_info[idx].cb_count = 0;
 	iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -2168,7 +2502,15 @@
 	}
 
 	mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
+	cb->is_secure = of_property_read_bool(of_node, "qcom,secure-cb");
+
+	/*
+	 * We always expect a memory map node, except when it is a secure
+	 * context bank.
+	 */
 	if (!mem_map_node) {
+		if (cb->is_secure)
+			return 0;
 		CAM_ERR(CAM_SMMU, "iova-mem-map not present");
 		return -EINVAL;
 	}
@@ -2292,6 +2634,12 @@
 		return rc;
 	}
 
+	if (cb->is_secure) {
+		/* increment count to next bank */
+		iommu_cb_set.cb_init_count++;
+		return 0;
+	}
+
 	/* set up the iommu mapping for the  context bank */
 	if (type == CAM_QSMMU) {
 		CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s",
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 20445f3..4cb6efb 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -204,6 +204,19 @@
  */
 int cam_smmu_get_iova(int handle, int ion_fd,
 	dma_addr_t *paddr_ptr, size_t *len_ptr);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the secure context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr);
 /**
  * @brief Unmaps memory from context bank
  *
@@ -217,27 +230,28 @@
 /**
  * @brief Maps secure memory for SMMU handle
  *
- * @param handle: SMMU handle identifying context bank
+ * @param handle: SMMU handle identifying secure context bank
  * @param ion_fd: ION fd to map securely
  * @param dir: DMA Direction for the mapping
+ * @param client: Ion client passed by caller
  * @param dma_addr: Returned IOVA address after mapping
  * @param len_ptr: Length of memory mapped
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_map_sec_iova(int handle,
-	int ion_fd, enum cam_smmu_map_dir dir,
-	dma_addr_t *dma_addr, size_t *len_ptr);
+int cam_smmu_map_stage2_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir, struct ion_client *client,
+	ion_phys_addr_t *dma_addr, size_t *len_ptr);
 
 /**
  * @brief Unmaps secure memopry for SMMU handle
  *
- * @param handle: SMMU handle identifying context bank
+ * @param handle: SMMU handle identifying secure context bank
  * @param ion_fd: ION fd to unmap
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_unmap_sec_iova(int handle, int ion_fd);
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd);
 
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 4323358..44d5d48 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -124,7 +124,7 @@
 }
 
 int cam_packet_util_process_patches(struct cam_packet *packet,
-	int32_t iommu_hdl)
+	int32_t iommu_hdl, int32_t sec_mmu_hdl)
 {
 	struct cam_patch_desc *patch_desc = NULL;
 	uint64_t   iova_addr;
@@ -136,6 +136,7 @@
 	size_t     src_buf_size;
 	int        i;
 	int        rc = 0;
+	int32_t    hdl;
 
 	/* process patch descriptor */
 	patch_desc = (struct cam_patch_desc *)
@@ -146,8 +147,11 @@
 			sizeof(struct cam_patch_desc));
 
 	for (i = 0; i < packet->num_patches; i++) {
+
+		hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ?
+			sec_mmu_hdl : iommu_hdl;
 		rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
-			iommu_hdl, &iova_addr, &src_buf_size);
+			hdl, &iova_addr, &src_buf_size);
 		if (rc < 0) {
 			CAM_ERR(CAM_UTIL, "unable to get src buf address");
 			return rc;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
index 2fa7585..323a75a 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -83,12 +83,14 @@
  *
  * @packet:             Input packet containing Command Buffers and Patches
  * @iommu_hdl:          IOMMU handle of the HW Device that received the packet
+ * @sec_iommu_hdl:      Secure IOMMU handle of the HW Device that
+ *                      received the packet
  *
  * @return:             0: Success
  *                      Negative: Failure
  */
 int cam_packet_util_process_patches(struct cam_packet *packet,
-	int32_t iommu_hdl);
+	int32_t iommu_hdl, int32_t sec_mmu_hdl);
 
 /**
  * cam_packet_util_process_generic_cmd_buffer()
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index d749384..a54f028 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2394,6 +2394,10 @@
 		config->input.format, config->output.width,
 		config->output.height, config->output.format,
 		config->frame_rate, perf->clk_rate, perf->bw);
+	SDEROT_EVTLOG(config->session_id, config->input.width,
+			config->input.height, config->input.format,
+			config->output.width, config->output.height,
+			config->output.format, config->frame_rate);
 done:
 	return ret;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 76c9367..f81cd2f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -2184,6 +2184,7 @@
 	if (ret < 0)
 		SDEDEV_ERR(ctx->rot_dev->dev, "fail qbuf s:%d t:%d r:%d\n",
 				ctx->session_id, buf->type, ret);
+	SDEROT_EVTLOG(buf->type, buf->bytesused, buf->length, buf->m.fd, ret);
 
 	return ret;
 }
@@ -2366,6 +2367,7 @@
 	a->pixelaspect.numerator = 1;
 	a->pixelaspect.denominator = 1;
 
+	SDEROT_EVTLOG(format->fmt.pix.width, format->fmt.pix.height, a->type);
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 9ef4282..ac4ab54 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -35,6 +35,7 @@
 
 #include "sde_rotator_util.h"
 #include "sde_rotator_smmu.h"
+#include "sde_rotator_debug.h"
 
 #define Y_TILEWIDTH     48
 #define Y_TILEHEIGHT    4
@@ -1038,6 +1039,8 @@
 			break;
 		}
 	}
+	SDEROT_EVTLOG(data->num_planes, dir, data->p[0].addr, data->p[0].len,
+			data->p[0].mapped);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 7688f89..5198bc3 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -80,6 +80,8 @@
 			dprintk(VIDC_ERR,
 				"Size mismatch! Dmabuf size: %zu Expected Size: %lu",
 				buf->size, *buffer_size);
+			msm_vidc_res_handle_fatal_hw_error(smem_client->res,
+					true);
 			goto mem_buf_size_mismatch;
 		}
 		/* Prepare a dma buf for dma on the given device */
@@ -327,6 +329,7 @@
 	if (ion_flags & ION_FLAG_SECURE)
 		smem->flags |= SMEM_SECURE;
 
+	buffer_size = smem->size;
 	rc = msm_ion_get_device_address(inst->mem_client, ion_handle,
 			align, &iova, &buffer_size, smem->flags,
 			smem->buffer_type, &smem->mapping_info);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index a82b598..56524ccd4 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -289,6 +289,7 @@
 		.compatible = "qcom,sdm670-vidc",
 		.data = &sdm670_data,
 	},
+	{},
 };
 
 MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index c8663ae..e260886 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1757,9 +1757,8 @@
 	if (rc || __iface_cmdq_write(dev, &version_pkt))
 		dprintk(VIDC_WARN, "Failed to send image version pkt to f/w\n");
 
-	rc = __enable_subcaches(device);
-	if (!rc)
-		__set_subcaches(device);
+	__enable_subcaches(device);
+	__set_subcaches(device);
 
 	if (dev->res->pm_qos_latency_us) {
 #ifdef CONFIG_SMP
@@ -3797,8 +3796,9 @@
 	venus_hfi_for_each_subcache(device, sinfo) {
 		rc = llcc_slice_activate(sinfo->subcache);
 		if (rc) {
-			dprintk(VIDC_ERR, "Failed to activate %s: %d\n",
+			dprintk(VIDC_WARN, "Failed to activate %s: %d\n",
 				sinfo->name, rc);
+			msm_vidc_res_handle_fatal_hw_error(device->res, true);
 			goto err_activate_fail;
 		}
 		sinfo->isactive = true;
@@ -3813,7 +3813,7 @@
 err_activate_fail:
 	__release_subcaches(device);
 	__disable_subcaches(device);
-	return -EINVAL;
+	return 0;
 }
 
 static int __set_subcaches(struct venus_hfi_device *device)
@@ -3855,25 +3855,25 @@
 
 		rc = __core_set_resource(device, &rhdr, (void *)sc_res_info);
 		if (rc) {
-			dprintk(VIDC_ERR, "Failed to set subcaches %d\n", rc);
+			dprintk(VIDC_WARN, "Failed to set subcaches %d\n", rc);
 			goto err_fail_set_subacaches;
 		}
-	}
 
-	venus_hfi_for_each_subcache(device, sinfo) {
-		if (sinfo->isactive == true)
-			sinfo->isset = true;
-	}
+		venus_hfi_for_each_subcache(device, sinfo) {
+			if (sinfo->isactive == true)
+				sinfo->isset = true;
+		}
 
-	dprintk(VIDC_DBG, "Set Subcaches done to Venus\n");
-	device->res->sys_cache_res_set = true;
+		dprintk(VIDC_DBG, "Set Subcaches done to Venus\n");
+		device->res->sys_cache_res_set = true;
+	}
 
 	return 0;
 
 err_fail_set_subacaches:
 	__disable_subcaches(device);
 
-	return rc;
+	return 0;
 }
 
 static int __release_subcaches(struct venus_hfi_device *device)
@@ -3912,13 +3912,13 @@
 
 		rc = __core_release_resource(device, &rhdr);
 		if (rc)
-			dprintk(VIDC_ERR,
+			dprintk(VIDC_WARN,
 				"Failed to release %d subcaches\n", c);
 	}
 
 	device->res->sys_cache_res_set = false;
 
-	return rc;
+	return 0;
 }
 
 static int __disable_subcaches(struct venus_hfi_device *device)
@@ -3936,7 +3936,7 @@
 				sinfo->name);
 			rc = llcc_slice_deactivate(sinfo->subcache);
 			if (rc) {
-				dprintk(VIDC_ERR,
+				dprintk(VIDC_WARN,
 					"Failed to de-activate %s: %d\n",
 					sinfo->name, rc);
 			}
@@ -3944,7 +3944,7 @@
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 static int __venus_power_on(struct venus_hfi_device *device)
@@ -4118,9 +4118,8 @@
 
 	__sys_set_debug(device, msm_vidc_fw_debug);
 
-	rc = __enable_subcaches(device);
-	if (!rc)
-		__set_subcaches(device);
+	__enable_subcaches(device);
+	__set_subcaches(device);
 
 	dprintk(VIDC_PROF, "Resumed from power collapse\n");
 exit:
diff --git a/drivers/mfd/qcom-i2c-pmic.c b/drivers/mfd/qcom-i2c-pmic.c
index 590e4c1..28c3343 100644
--- a/drivers/mfd/qcom-i2c-pmic.c
+++ b/drivers/mfd/qcom-i2c-pmic.c
@@ -62,8 +62,12 @@
 	struct irq_domain	*domain;
 	struct i2c_pmic_periph	*periph;
 	struct pinctrl		*pinctrl;
+	struct mutex		irq_complete;
 	const char		*pinctrl_name;
 	int			num_periphs;
+	int			summary_irq;
+	bool			resume_completed;
+	bool			irq_waiting;
 };
 
 static void i2c_pmic_irq_bus_lock(struct irq_data *d)
@@ -400,6 +404,16 @@
 	unsigned int summary_status;
 	int rc, i;
 
+	mutex_lock(&chip->irq_complete);
+	chip->irq_waiting = true;
+	if (!chip->resume_completed) {
+		pr_debug("IRQ triggered before device-resume\n");
+		disable_irq_nosync(irq);
+		mutex_unlock(&chip->irq_complete);
+		return IRQ_HANDLED;
+	}
+	chip->irq_waiting = false;
+
 	for (i = 0; i < DIV_ROUND_UP(chip->num_periphs, BITS_PER_BYTE); i++) {
 		rc = regmap_read(chip->regmap, I2C_INTR_STATUS_BASE + i,
 				&summary_status);
@@ -416,6 +430,8 @@
 		i2c_pmic_summary_status_handler(chip, periph, summary_status);
 	}
 
+	mutex_unlock(&chip->irq_complete);
+
 	return IRQ_HANDLED;
 }
 
@@ -559,6 +575,9 @@
 		}
 	}
 
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
 	rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
 				       i2c_pmic_irq_handler,
 				       IRQF_ONESHOT | IRQF_SHARED,
@@ -568,6 +587,7 @@
 		goto cleanup;
 	}
 
+	chip->summary_irq = client->irq;
 	enable_irq_wake(client->irq);
 
 probe_children:
@@ -594,6 +614,17 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
+static int i2c_pmic_suspend_noirq(struct device *dev)
+{
+	struct i2c_pmic *chip = dev_get_drvdata(dev);
+
+	if (chip->irq_waiting) {
+		pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
 static int i2c_pmic_suspend(struct device *dev)
 {
 	struct i2c_pmic *chip = dev_get_drvdata(dev);
@@ -618,6 +649,11 @@
 			pr_err_ratelimited("Couldn't enable 0x%04x wake irqs 0x%02x rc=%d\n",
 			       periph->addr, periph->wake, rc);
 	}
+	if (!rc) {
+		mutex_lock(&chip->irq_complete);
+		chip->resume_completed = false;
+		mutex_unlock(&chip->irq_complete);
+	}
 
 	return rc;
 }
@@ -647,10 +683,38 @@
 			       periph->addr, periph->synced[IRQ_EN_SET], rc);
 	}
 
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = true;
+	if (chip->irq_waiting) {
+		mutex_unlock(&chip->irq_complete);
+		/* irq was pending, call the handler */
+		i2c_pmic_irq_handler(chip->summary_irq, chip);
+		enable_irq(chip->summary_irq);
+	} else {
+		mutex_unlock(&chip->irq_complete);
+	}
+
 	return rc;
 }
+#else
+static int i2c_pmic_suspend(struct device *dev)
+{
+	return 0;
+}
+static int i2c_pmic_resume(struct device *dev)
+{
+	return 0;
+}
+static int i2c_pmic_suspend_noirq(struct device *dev)
+{
+	return 0
+}
 #endif
-static SIMPLE_DEV_PM_OPS(i2c_pmic_pm_ops, i2c_pmic_suspend, i2c_pmic_resume);
+static const struct dev_pm_ops i2c_pmic_pm_ops = {
+	.suspend	= i2c_pmic_suspend,
+	.suspend_noirq	= i2c_pmic_suspend_noirq,
+	.resume		= i2c_pmic_resume,
+};
 
 static const struct of_device_id i2c_pmic_match_table[] = {
 	{ .compatible = "qcom,i2c-pmic", },
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 120fd54..af409aa 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3518,15 +3518,23 @@
 	/* RED error - Fatal: requires reset */
 	if (mrq->cmdq_req->resp_err) {
 		err = mrq->cmdq_req->resp_err;
+		goto reset;
+	}
+
+	/*
+	 * TIMEOUT errrors can happen because of execution error
+	 * in the last command. So send cmd 13 to get device status
+	 */
+	if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+			(mrq->data && (mrq->data->error == -ETIMEDOUT))) {
 		if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
 			ret = get_card_status(host->card, &status, 0);
 			if (ret)
 				pr_err("%s: CMD13 failed with err %d\n",
 						mmc_hostname(host), ret);
 		}
-		pr_err("%s: Response error detected with device status 0x%08x\n",
+		pr_err("%s: Timeout error detected with device status 0x%08x\n",
 			mmc_hostname(host), status);
-		goto reset;
 	}
 
 	/*
@@ -3572,7 +3580,7 @@
 	else if (mrq->data && mrq->data->error)
 		err = mrq->data->error;
 
-	if (err || cmdq_req->resp_err) {
+	if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) {
 		pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
 				mmc_hostname(mrq->host), __func__, err,
 				cmdq_req->resp_err);
@@ -3609,6 +3617,17 @@
 		blk_end_request_all(rq, err);
 		goto out;
 	}
+	/*
+	 * In case of error, cmdq_req->data.bytes_xfered is set to 0.
+	 * If we call blk_end_request() with nr_bytes as 0 then the request
+	 * never gets completed. So in case of error, to complete a request
+	 * with error we should use blk_end_request_all().
+	 */
+	if (err && cmdq_req->skip_err_handling) {
+		cmdq_req->skip_err_handling = false;
+		blk_end_request_all(rq, err);
+		goto out;
+	}
 
 	blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
 
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9552d2a..e2a459e 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1802,9 +1802,13 @@
 
 	wil_dbg_pm(wil, "suspending\n");
 
+	mutex_lock(&wil->mutex);
 	wil_p2p_stop_discovery(wil);
 
+	mutex_lock(&wil->p2p_wdev_mutex);
 	wil_abort_scan(wil, true);
+	mutex_unlock(&wil->p2p_wdev_mutex);
+	mutex_unlock(&wil->mutex);
 
 out:
 	return rc;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 45a8081..831780a 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -242,12 +242,19 @@
 static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
 
 	wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, tx));
 	wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, rx));
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -265,15 +272,38 @@
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-	writel(val, (void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	writel_relaxed(val, (void __iomem *)d->offset);
+
 	wmb(); /* make sure write propagated to HW */
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-	*val = readl((void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	*val = readl_relaxed((void __iomem *)d->offset);
+
+	wil_pm_runtime_put(wil);
 
 	return 0;
 }
@@ -284,10 +314,21 @@
 static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
 						   umode_t mode,
 						   struct dentry *parent,
-						   void *value)
+						   void *value,
+						   struct wil6210_priv *wil)
 {
-	return debugfs_create_file(name, mode, parent, value,
-				   &fops_iomem_x32);
+	struct dentry *file;
+	struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+					      wil->dbg_data.iomem_data_count];
+
+	data->wil = wil;
+	data->offset = value;
+
+	file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+	if (!IS_ERR_OR_NULL(file))
+		wil->dbg_data.iomem_data_count++;
+
+	return file;
 }
 
 static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -346,7 +387,8 @@
 		case doff_io32:
 			f = wil_debugfs_create_iomem_x32(tbl[i].name,
 							 tbl[i].mode, dbg,
-							 base + tbl[i].off);
+							 base + tbl[i].off,
+							 wil);
 			break;
 		case doff_u8:
 			f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
@@ -475,13 +517,22 @@
 static int wil_memread_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
-	void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+	void __iomem *a;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
 	if (a)
 		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
 	else
 		seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -502,10 +553,12 @@
 {
 	enum { max_count = 4096 };
 	struct wil_blob_wrapper *wil_blob = file->private_data;
+	struct wil6210_priv *wil = wil_blob->wil;
 	loff_t pos = *ppos;
 	size_t available = wil_blob->blob.size;
 	void *buf;
 	size_t ret;
+	int rc;
 
 	if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
 	    test_bit(wil_status_suspended, wil_blob->wil->status))
@@ -526,10 +579,19 @@
 	if (!buf)
 		return -ENOMEM;
 
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0) {
+		kfree(buf);
+		return rc;
+	}
+
 	wil_memcpy_fromio_32(buf, (const void __iomem *)
 			     wil_blob->blob.data + pos, count);
 
 	ret = copy_to_user(user_buf, buf, count);
+
+	wil_pm_runtime_put(wil);
+
 	kfree(buf);
 	if (ret == count)
 		return -EFAULT;
@@ -1786,6 +1848,13 @@
 	{},
 };
 
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+				ARRAY_SIZE(dbg_wil_regs) - 1 +
+				ARRAY_SIZE(pseudo_isr_off) - 1 +
+				ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+				ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+				ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
 int wil6210_debugfs_init(struct wil6210_priv *wil)
 {
 	struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
@@ -1794,6 +1863,17 @@
 	if (IS_ERR_OR_NULL(dbg))
 		return -ENODEV;
 
+	wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+					 sizeof(struct wil_debugfs_iomem_data),
+					 GFP_KERNEL);
+	if (!wil->dbg_data.data_arr) {
+		debugfs_remove_recursive(dbg);
+		wil->debug = NULL;
+		return -ENOMEM;
+	}
+
+	wil->dbg_data.iomem_data_count = 0;
+
 	wil_pmc_init(wil);
 
 	wil6210_debugfs_init_files(wil, dbg);
@@ -1818,6 +1898,8 @@
 	debugfs_remove_recursive(wil->debug);
 	wil->debug = NULL;
 
+	kfree(wil->dbg_data.data_arr);
+
 	/* free pmc memory without sending command to fw, as it will
 	 * be reset on the way down anyway
 	 */
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index adcfef4..66200f6 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -47,9 +47,14 @@
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
 	u32 tx_itr_en, tx_itr_val = 0;
 	u32 rx_itr_en, rx_itr_val = 0;
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
 
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
 	if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
 		tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
@@ -58,6 +63,8 @@
 	if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
 		rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
+	wil_pm_runtime_put(wil);
+
 	cp->tx_coalesce_usecs = tx_itr_val;
 	cp->rx_coalesce_usecs = rx_itr_val;
 	return 0;
@@ -67,6 +74,7 @@
 				       struct ethtool_coalesce *cp)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
 		     cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
@@ -86,8 +94,15 @@
 
 	wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
 	wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	wil_configure_interrupt_moderation(wil);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 
 out_bad:
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index f8d2c20..25bc439 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -221,6 +221,10 @@
 {
 	int ret;
 
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	switch (cmd) {
 	case WIL_IOCTL_MEMIO:
 		ret = wil_ioc_memio_dword(wil, data);
@@ -233,9 +237,12 @@
 		break;
 	default:
 		wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
+		wil_pm_runtime_put(wil);
 		return -ENOIOCTLCMD;
 	}
 
+	wil_pm_runtime_put(wil);
+
 	wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
 	return ret;
 }
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index d80e7f4..40cd32a 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -26,6 +26,7 @@
 static int wil_open(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "open\n");
 
@@ -35,16 +36,29 @@
 		return -EINVAL;
 	}
 
-	return wil_up(wil);
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0)
+		return rc;
+
+	rc = wil_up(wil);
+	if (rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static int wil_stop(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "stop\n");
 
-	return wil_down(wil);
+	rc = wil_down(wil);
+	if (!rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static int wil_change_mtu(struct net_device *ndev, int new_mtu)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 5432b31..663e163 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -21,6 +21,7 @@
 #include <linux/suspend.h>
 #include "wil6210.h"
 #include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
 
 static bool use_msi = true;
 module_param(use_msi, bool, 0444);
@@ -316,6 +317,8 @@
 	wil6210_debugfs_init(wil);
 	wil6210_sysfs_init(wil);
 
+	wil_pm_runtime_allow(wil);
+
 	return 0;
 
 bus_disable:
@@ -348,6 +351,8 @@
 #endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
+	wil_pm_runtime_forbid(wil);
+
 	wil6210_sysfs_remove(wil);
 	wil6210_debugfs_remove(wil);
 	rtnl_lock();
@@ -477,10 +482,40 @@
 }
 #endif /* CONFIG_PM_SLEEP */
 
+static int wil6210_pm_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "Runtime idle\n");
+
+	return wil_can_suspend(wil, true);
+}
+
+static int wil6210_pm_runtime_resume(struct device *dev)
+{
+	return wil6210_resume(dev, true);
+}
+
+static int wil6210_pm_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 1;
+	}
+
+	return wil6210_suspend(dev, true);
+}
 #endif /* CONFIG_PM */
 
 static const struct dev_pm_ops wil6210_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+	SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+			   wil6210_pm_runtime_resume,
+			   wil6210_pm_runtime_idle)
 };
 
 static struct pci_driver wil6210_driver = {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 8f5d1b44..2ef2f34 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -16,15 +16,26 @@
 
 #include "wil6210.h"
 #include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 {
 	int rc = 0;
 	struct wireless_dev *wdev = wil->wdev;
 	struct net_device *ndev = wil_to_ndev(wil);
+	bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+				 wil->fw_capabilities);
 
 	wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
 
+	if (wmi_only || debug_fw) {
+		wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+			   wmi_only ? "wmi_only" : "debug_fw");
+		rc = -EPERM;
+		goto out;
+	}
 	if (!(ndev->flags & IFF_UP)) {
 		/* can always sleep when down */
 		wil_dbg_pm(wil, "Interface is down\n");
@@ -44,6 +55,10 @@
 	/* interface is running */
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_MONITOR:
+		wil_dbg_pm(wil, "Sniffer\n");
+		rc = -EBUSY;
+		goto out;
+	/* for STA-like interface, don't runtime suspend */
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (test_bit(wil_status_fwconnecting, wil->status)) {
@@ -51,6 +66,12 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		/* Runtime pm not supported in case the interface is up */
+		if (is_runtime) {
+			wil_dbg_pm(wil, "STA-like interface\n");
+			rc = -EBUSY;
+			goto out;
+		}
 		break;
 	/* AP-like interface - can't suspend */
 	default:
@@ -348,3 +369,44 @@
 		   is_runtime ? "runtime" : "system", rc, suspend_time_usec);
 	return rc;
 }
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_forbid(dev);
+	pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+	int rc;
+	struct device *dev = wil_to_dev(wil);
+
+	rc = pm_runtime_get_sync(dev);
+	if (rc < 0) {
+		wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
+		pm_runtime_put_noidle(dev);
+		return rc;
+	}
+
+	return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1c13b0b..8616f86 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -621,6 +621,16 @@
 	u32 off_ms;
 };
 
+struct wil_debugfs_iomem_data {
+	void *offset;
+	struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+	struct wil_debugfs_iomem_data *data_arr;
+	int iomem_data_count;
+};
+
 extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
 extern u8 led_id;
 extern u8 led_polarity;
@@ -713,6 +723,7 @@
 	u8 abft_len;
 	u8 wakeup_trigger;
 	struct wil_suspend_stats suspend_stats;
+	struct wil_debugfs_data dbg_data;
 
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
@@ -1015,6 +1026,11 @@
 			 bool load);
 bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
 
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_resume(struct wil6210_priv *wil, bool is_runtime);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
index 4a21eb6..6ceb39a 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
@@ -85,6 +85,32 @@
 		.intr_enable_bit = -1,			\
 		.intr_status_bit = -1,			\
 		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
 		.intr_polarity_bit = -1,		\
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
@@ -190,12 +216,13 @@
 	PINCTRL_PIN(97, "GPIO_97"),
 	PINCTRL_PIN(98, "GPIO_98"),
 	PINCTRL_PIN(99, "GPIO_99"),
-	PINCTRL_PIN(100, "SDC1_CLK"),
-	PINCTRL_PIN(101, "SDC1_CMD"),
-	PINCTRL_PIN(102, "SDC1_DATA"),
-	PINCTRL_PIN(103, "SDC2_CLK"),
-	PINCTRL_PIN(104, "SDC2_CMD"),
-	PINCTRL_PIN(105, "SDC2_DATA"),
+	PINCTRL_PIN(100, "SDC1_RCLK"),
+	PINCTRL_PIN(101, "SDC1_CLK"),
+	PINCTRL_PIN(102, "SDC1_CMD"),
+	PINCTRL_PIN(103, "SDC1_DATA"),
+	PINCTRL_PIN(104, "SDC2_CLK"),
+	PINCTRL_PIN(105, "SDC2_CMD"),
+	PINCTRL_PIN(106, "SDC2_DATA"),
 };
 
 #define DECLARE_MSM_GPIO_PINS(pin) \
@@ -301,21 +328,22 @@
 DECLARE_MSM_GPIO_PINS(98);
 DECLARE_MSM_GPIO_PINS(99);
 
-static const unsigned int sdc1_clk_pins[] = { 100 };
-static const unsigned int sdc1_cmd_pins[] = { 101 };
-static const unsigned int sdc1_data_pins[] = { 102 };
-static const unsigned int sdc2_clk_pins[] = { 103 };
-static const unsigned int sdc2_cmd_pins[] = { 104 };
-static const unsigned int sdc2_data_pins[] = { 105 };
+static const unsigned int sdc1_rclk_pins[] = { 100 };
+static const unsigned int sdc1_clk_pins[] = { 101 };
+static const unsigned int sdc1_cmd_pins[] = { 102 };
+static const unsigned int sdc1_data_pins[] = { 103 };
+static const unsigned int sdc2_clk_pins[] = { 104 };
+static const unsigned int sdc2_cmd_pins[] = { 105 };
+static const unsigned int sdc2_data_pins[] = { 106 };
 
 enum sdxpoorwills_functions {
-	msm_mux_qdss_stm31,
-	msm_mux_blsp_uart1,
-	msm_mux_gpio,
 	msm_mux_uim2_data,
+	msm_mux_gpio,
+	msm_mux_qdss_stm31,
 	msm_mux_ebi0_wrcdc,
 	msm_mux_uim2_present,
 	msm_mux_qdss_stm30,
+	msm_mux_blsp_uart1,
 	msm_mux_uim2_reset,
 	msm_mux_blsp_i2c1,
 	msm_mux_qdss_stm29,
@@ -340,14 +368,22 @@
 	msm_mux_blsp_i2c3,
 	msm_mux_gcc_gp3,
 	msm_mux_qdss_stm19,
-	msm_mux_qdss12,
+	msm_mux_qdss4,
 	msm_mux_qdss_stm18,
-	msm_mux_qdss13,
+	msm_mux_qdss5,
 	msm_mux_qdss_stm17,
-	msm_mux_qdss14,
+	msm_mux_qdss6,
 	msm_mux_bimc_dte0,
 	msm_mux_native_tsens,
-	msm_mux_vsense_trigger,
+	msm_mux_qdss_stm16,
+	msm_mux_qdss7,
+	msm_mux_bimc_dte1,
+	msm_mux_sec_mi2s,
+	msm_mux_blsp_spi4,
+	msm_mux_blsp_uart4,
+	msm_mux_qdss_cti,
+	msm_mux_qdss_stm27,
+	msm_mux_qdss8,
 	msm_mux_qdss_stm26,
 	msm_mux_qdss9,
 	msm_mux_blsp_i2c4,
@@ -358,26 +394,19 @@
 	msm_mux_gcc_gp2,
 	msm_mux_qdss_stm24,
 	msm_mux_qdss11,
-	msm_mux_qdss_stm16,
-	msm_mux_qdss15,
-	msm_mux_bimc_dte1,
-	msm_mux_sec_mi2s,
-	msm_mux_blsp_spi4,
-	msm_mux_blsp_uart4,
-	msm_mux_qdss_cti,
-	msm_mux_qdss_stm27,
-	msm_mux_qdss8,
 	msm_mux_ebi2_a,
-	msm_mux_qdss_stm3,
 	msm_mux_ebi2_lcd,
-	msm_mux_qdss_stm2,
 	msm_mux_pll_bist,
-	msm_mux_qdss_stm1,
-	msm_mux_qdss_stm0,
 	msm_mux_adsp_ext,
-	msm_mux_epm1,
+	msm_mux_qdss_stm11,
 	msm_mux_m_voc,
+	msm_mux_qdss_stm10,
 	msm_mux_native_char,
+	msm_mux_native_char3,
+	msm_mux_nav_pps,
+	msm_mux_nav_dr,
+	msm_mux_native_char2,
+	msm_mux_native_tsense,
 	msm_mux_native_char1,
 	msm_mux_pa_indicator,
 	msm_mux_qdss_traceclk,
@@ -386,92 +415,69 @@
 	msm_mux_qlink_req,
 	msm_mux_pll_test,
 	msm_mux_cri_trng,
-	msm_mux_wmss_reset,
-	msm_mux_native_char3,
-	msm_mux_nav_pps,
-	msm_mux_nav_dr,
-	msm_mux_native_char2,
-	msm_mux_native_tsense,
 	msm_mux_prng_rosc,
 	msm_mux_cri_trng0,
 	msm_mux_cri_trng1,
 	msm_mux_pll_ref,
 	msm_mux_coex_uart,
-	msm_mux_qdss_stm11,
-	msm_mux_qdss_stm10,
-	msm_mux_ddr_pxi0,
-	msm_mux_ap2mdm_status,
+	msm_mux_qdss_tracectl,
 	msm_mux_ddr_bist,
-	msm_mux_mdm2ap_status,
-	msm_mux_ap2mdm_err,
-	msm_mux_mdm2ap_err,
-	msm_mux_ap2mdm_vdd,
-	msm_mux_mdm2ap_vdd,
-	msm_mux_ap2mdm_wake,
-	msm_mux_pciehost_rst,
 	msm_mux_blsp_spi1,
-	msm_mux_qdss_stm14,
-	msm_mux_pcie_wake,
-	msm_mux_mdm2ap_wake,
 	msm_mux_pci_e,
+	msm_mux_tgu_ch0,
+	msm_mux_pcie_clkreq,
+	msm_mux_qdss_stm15,
+	msm_mux_qdss_stm14,
 	msm_mux_qdss_stm13,
+	msm_mux_mgpi_clk,
+	msm_mux_qdss_stm12,
+	msm_mux_qdss_stm9,
 	msm_mux_i2s_mclk,
 	msm_mux_audio_ref,
 	msm_mux_ldo_update,
 	msm_mux_qdss_stm8,
 	msm_mux_qdss_stm7,
-	msm_mux_qdss4,
-	msm_mux_tgu_ch0,
-	msm_mux_pcie_clkreq,
-	msm_mux_qdss_stm9,
-	msm_mux_qdss_stm15,
-	msm_mux_mgpi_clk,
-	msm_mux_qdss_stm12,
-	msm_mux_qdss_tracectl,
-	msm_mux_atest_char,
+	msm_mux_qdss12,
 	msm_mux_qdss_stm6,
-	msm_mux_qdss5,
-	msm_mux_atest_char3,
+	msm_mux_qdss13,
 	msm_mux_qdss_stm5,
-	msm_mux_qdss6,
-	msm_mux_atest_char2,
+	msm_mux_qdss14,
 	msm_mux_qdss_stm4,
-	msm_mux_qdss7,
-	msm_mux_atest_char1,
+	msm_mux_qdss15,
 	msm_mux_uim1_data,
-	msm_mux_atest_char0,
+	msm_mux_qdss_stm3,
 	msm_mux_uim1_present,
+	msm_mux_qdss_stm2,
 	msm_mux_uim1_reset,
+	msm_mux_qdss_stm1,
 	msm_mux_uim1_clk,
+	msm_mux_qdss_stm0,
 	msm_mux_dbg_out,
 	msm_mux_gcc_plltest,
-	msm_mux_usb2phy_ac,
 	msm_mux_NA,
 };
 
-static const char * const qdss_stm31_groups[] = {
+static const char * const uim2_data_groups[] = {
 	"gpio0",
 };
-static const char * const blsp_uart1_groups[] = {
-	"gpio0", "gpio1", "gpio2", "gpio3", "gpio20", "gpio21", "gpio22",
-	"gpio23",
-};
 static const char * const gpio_groups[] = {
 	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
 	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
 	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
-	"gpio22", "gpio23", "gpio24", "gpio26", "gpio27", "gpio28", "gpio29",
-	"gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
-	"gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
-	"gpio44", "gpio45", "gpio54", "gpio55", "gpio56", "gpio58", "gpio59",
-	"gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66",
-	"gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio73",
-	"gpio74", "gpio75", "gpio76", "gpio77", "gpio78", "gpio79", "gpio80",
-	"gpio81", "gpio82", "gpio83", "gpio84", "gpio85", "gpio86", "gpio87",
-	"gpio88", "gpio89", "gpio90", "gpio91", "gpio92", "gpio93", "gpio94",
-	"gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
+	"gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
+	"gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
+	"gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
+	"gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
+	"gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+	"gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
+	"gpio97", "gpio98", "gpio99",
 };
-static const char * const uim2_data_groups[] = {
+static const char * const qdss_stm31_groups[] = {
 	"gpio0",
 };
 static const char * const ebi0_wrcdc_groups[] = {
@@ -483,6 +489,10 @@
 static const char * const qdss_stm30_groups[] = {
 	"gpio1",
 };
+static const char * const blsp_uart1_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio20", "gpio21", "gpio22",
+	"gpio23",
+};
 static const char * const uim2_reset_groups[] = {
 	"gpio2",
 };
@@ -557,19 +567,19 @@
 static const char * const qdss_stm19_groups[] = {
 	"gpio12",
 };
-static const char * const qdss12_groups[] = {
+static const char * const qdss4_groups[] = {
 	"gpio12",
 };
 static const char * const qdss_stm18_groups[] = {
 	"gpio13",
 };
-static const char * const qdss13_groups[] = {
+static const char * const qdss5_groups[] = {
 	"gpio13",
 };
 static const char * const qdss_stm17_groups[] = {
 	"gpio14",
 };
-static const char * const qdss14_groups[] = {
+static const char * const qdss6_groups[] = {
 	"gpio14",
 };
 static const char * const bimc_dte0_groups[] = {
@@ -578,8 +588,36 @@
 static const char * const native_tsens_groups[] = {
 	"gpio14",
 };
-static const char * const vsense_trigger_groups[] = {
-	"gpio14",
+static const char * const qdss_stm16_groups[] = {
+	"gpio15",
+};
+static const char * const qdss7_groups[] = {
+	"gpio15",
+};
+static const char * const bimc_dte1_groups[] = {
+	"gpio15", "gpio60",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+	"gpio23",
+};
+static const char * const blsp_spi4_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio52", "gpio62", "gpio71",
+};
+static const char * const blsp_uart4_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+	"gpio23",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio16", "gpio16", "gpio17", "gpio17", "gpio22", "gpio22", "gpio23",
+	"gpio23", "gpio54", "gpio54", "gpio55", "gpio55", "gpio59", "gpio61",
+	"gpio88", "gpio88", "gpio89", "gpio89",
+};
+static const char * const qdss_stm27_groups[] = {
+	"gpio16",
+};
+static const char * const qdss8_groups[] = {
+	"gpio16",
 };
 static const char * const qdss_stm26_groups[] = {
 	"gpio17",
@@ -611,70 +649,45 @@
 static const char * const qdss11_groups[] = {
 	"gpio19",
 };
-static const char * const qdss_stm16_groups[] = {
-	"gpio15",
-};
-static const char * const qdss15_groups[] = {
-	"gpio15",
-};
-static const char * const bimc_dte1_groups[] = {
-	"gpio15", "gpio60",
-};
-static const char * const sec_mi2s_groups[] = {
-	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
-	"gpio23",
-};
-static const char * const blsp_spi4_groups[] = {
-	"gpio16", "gpio17", "gpio18", "gpio19", "gpio52", "gpio62", "gpio71",
-};
-static const char * const blsp_uart4_groups[] = {
-	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
-	"gpio23",
-};
-static const char * const qdss_cti_groups[] = {
-	"gpio16", "gpio16", "gpio17", "gpio17", "gpio22", "gpio22", "gpio23",
-	"gpio23", "gpio54", "gpio54", "gpio55", "gpio55", "gpio59", "gpio61",
-	"gpio88", "gpio88", "gpio89", "gpio89",
-};
-static const char * const qdss_stm27_groups[] = {
-	"gpio16",
-};
-static const char * const qdss8_groups[] = {
-	"gpio16",
-};
 static const char * const ebi2_a_groups[] = {
 	"gpio20",
 };
-static const char * const qdss_stm3_groups[] = {
-	"gpio20",
-};
 static const char * const ebi2_lcd_groups[] = {
 	"gpio21", "gpio22", "gpio23",
 };
-static const char * const qdss_stm2_groups[] = {
-	"gpio21",
-};
 static const char * const pll_bist_groups[] = {
 	"gpio22",
 };
-static const char * const qdss_stm1_groups[] = {
-	"gpio22",
-};
-static const char * const qdss_stm0_groups[] = {
-	"gpio23",
-};
 static const char * const adsp_ext_groups[] = {
 	"gpio24", "gpio25",
 };
-static const char * const epm1_groups[] = {
-	"gpio25",
+static const char * const qdss_stm11_groups[] = {
+	"gpio24",
 };
 static const char * const m_voc_groups[] = {
 	"gpio25", "gpio46", "gpio59", "gpio61",
 };
+static const char * const qdss_stm10_groups[] = {
+	"gpio25",
+};
 static const char * const native_char_groups[] = {
 	"gpio26",
 };
+static const char * const native_char3_groups[] = {
+	"gpio28",
+};
+static const char * const nav_pps_groups[] = {
+	"gpio29", "gpio42", "gpio62",
+};
+static const char * const nav_dr_groups[] = {
+	"gpio29", "gpio42", "gpio62",
+};
+static const char * const native_char2_groups[] = {
+	"gpio29",
+};
+static const char * const native_tsense_groups[] = {
+	"gpio29",
+};
 static const char * const native_char1_groups[] = {
 	"gpio32",
 };
@@ -699,24 +712,6 @@
 static const char * const cri_trng_groups[] = {
 	"gpio36",
 };
-static const char * const wmss_reset_groups[] = {
-	"gpio28",
-};
-static const char * const native_char3_groups[] = {
-	"gpio28",
-};
-static const char * const nav_pps_groups[] = {
-	"gpio29", "gpio42", "gpio62",
-};
-static const char * const nav_dr_groups[] = {
-	"gpio29", "gpio42", "gpio62",
-};
-static const char * const native_char2_groups[] = {
-	"gpio29",
-};
-static const char * const native_tsense_groups[] = {
-	"gpio29",
-};
 static const char * const prng_rosc_groups[] = {
 	"gpio38",
 };
@@ -732,59 +727,41 @@
 static const char * const coex_uart_groups[] = {
 	"gpio44", "gpio45",
 };
-static const char * const qdss_stm11_groups[] = {
+static const char * const qdss_tracectl_groups[] = {
 	"gpio44",
 };
-static const char * const qdss_stm10_groups[] = {
-	"gpio45",
-};
-static const char * const ddr_pxi0_groups[] = {
-	"gpio45", "gpio46",
-};
-static const char * const ap2mdm_status_groups[] = {
-	"gpio46",
-};
 static const char * const ddr_bist_groups[] = {
 	"gpio46", "gpio47", "gpio48", "gpio49",
 };
-static const char * const mdm2ap_status_groups[] = {
-	"gpio47",
-};
-static const char * const ap2mdm_err_groups[] = {
-	"gpio48",
-};
-static const char * const mdm2ap_err_groups[] = {
-	"gpio49",
-};
-static const char * const ap2mdm_vdd_groups[] = {
-	"gpio50",
-};
-static const char * const mdm2ap_vdd_groups[] = {
-	"gpio51",
-};
-static const char * const ap2mdm_wake_groups[] = {
-	"gpio52",
-};
-static const char * const pciehost_rst_groups[] = {
-	"gpio52",
-};
 static const char * const blsp_spi1_groups[] = {
 	"gpio52", "gpio62", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
 };
-static const char * const qdss_stm14_groups[] = {
-	"gpio52",
-};
-static const char * const pcie_wake_groups[] = {
-	"gpio53",
-};
-static const char * const mdm2ap_wake_groups[] = {
-	"gpio53",
-};
 static const char * const pci_e_groups[] = {
-	"gpio53", "gpio57",
+	"gpio53",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio55",
+};
+static const char * const pcie_clkreq_groups[] = {
+	"gpio56",
+};
+static const char * const qdss_stm15_groups[] = {
+	"gpio57",
+};
+static const char * const qdss_stm14_groups[] = {
+	"gpio58",
 };
 static const char * const qdss_stm13_groups[] = {
-	"gpio53",
+	"gpio59",
+};
+static const char * const mgpi_clk_groups[] = {
+	"gpio60", "gpio71",
+};
+static const char * const qdss_stm12_groups[] = {
+	"gpio60",
+};
+static const char * const qdss_stm9_groups[] = {
+	"gpio61",
 };
 static const char * const i2s_mclk_groups[] = {
 	"gpio62",
@@ -801,93 +778,66 @@
 static const char * const qdss_stm7_groups[] = {
 	"gpio63",
 };
-static const char * const qdss4_groups[] = {
-	"gpio63",
-};
-static const char * const tgu_ch0_groups[] = {
-	"gpio55",
-};
-static const char * const pcie_clkreq_groups[] = {
-	"gpio56",
-};
-static const char * const qdss_stm9_groups[] = {
-	"gpio56",
-};
-static const char * const qdss_stm15_groups[] = {
-	"gpio57",
-};
-static const char * const mgpi_clk_groups[] = {
-	"gpio60", "gpio71",
-};
-static const char * const qdss_stm12_groups[] = {
-	"gpio60",
-};
-static const char * const qdss_tracectl_groups[] = {
-	"gpio60",
-};
-static const char * const atest_char_groups[] = {
+static const char * const qdss12_groups[] = {
 	"gpio63",
 };
 static const char * const qdss_stm6_groups[] = {
 	"gpio64",
 };
-static const char * const qdss5_groups[] = {
-	"gpio64",
-};
-static const char * const atest_char3_groups[] = {
+static const char * const qdss13_groups[] = {
 	"gpio64",
 };
 static const char * const qdss_stm5_groups[] = {
 	"gpio65",
 };
-static const char * const qdss6_groups[] = {
-	"gpio65",
-};
-static const char * const atest_char2_groups[] = {
+static const char * const qdss14_groups[] = {
 	"gpio65",
 };
 static const char * const qdss_stm4_groups[] = {
 	"gpio66",
 };
-static const char * const qdss7_groups[] = {
-	"gpio66",
-};
-static const char * const atest_char1_groups[] = {
+static const char * const qdss15_groups[] = {
 	"gpio66",
 };
 static const char * const uim1_data_groups[] = {
 	"gpio67",
 };
-static const char * const atest_char0_groups[] = {
+static const char * const qdss_stm3_groups[] = {
 	"gpio67",
 };
 static const char * const uim1_present_groups[] = {
 	"gpio68",
 };
+static const char * const qdss_stm2_groups[] = {
+	"gpio68",
+};
 static const char * const uim1_reset_groups[] = {
 	"gpio69",
 };
+static const char * const qdss_stm1_groups[] = {
+	"gpio69",
+};
 static const char * const uim1_clk_groups[] = {
 	"gpio70",
 };
+static const char * const qdss_stm0_groups[] = {
+	"gpio70",
+};
 static const char * const dbg_out_groups[] = {
 	"gpio71",
 };
 static const char * const gcc_plltest_groups[] = {
 	"gpio73", "gpio74",
 };
-static const char * const usb2phy_ac_groups[] = {
-	"gpio87",
-};
 
 static const struct msm_function sdxpoorwills_functions[] = {
-	FUNCTION(qdss_stm31),
-	FUNCTION(blsp_uart1),
-	FUNCTION(gpio),
 	FUNCTION(uim2_data),
+	FUNCTION(gpio),
+	FUNCTION(qdss_stm31),
 	FUNCTION(ebi0_wrcdc),
 	FUNCTION(uim2_present),
 	FUNCTION(qdss_stm30),
+	FUNCTION(blsp_uart1),
 	FUNCTION(uim2_reset),
 	FUNCTION(blsp_i2c1),
 	FUNCTION(qdss_stm29),
@@ -912,14 +862,22 @@
 	FUNCTION(blsp_i2c3),
 	FUNCTION(gcc_gp3),
 	FUNCTION(qdss_stm19),
-	FUNCTION(qdss12),
+	FUNCTION(qdss4),
 	FUNCTION(qdss_stm18),
-	FUNCTION(qdss13),
+	FUNCTION(qdss5),
 	FUNCTION(qdss_stm17),
-	FUNCTION(qdss14),
+	FUNCTION(qdss6),
 	FUNCTION(bimc_dte0),
 	FUNCTION(native_tsens),
-	FUNCTION(vsense_trigger),
+	FUNCTION(qdss_stm16),
+	FUNCTION(qdss7),
+	FUNCTION(bimc_dte1),
+	FUNCTION(sec_mi2s),
+	FUNCTION(blsp_spi4),
+	FUNCTION(blsp_uart4),
+	FUNCTION(qdss_cti),
+	FUNCTION(qdss_stm27),
+	FUNCTION(qdss8),
 	FUNCTION(qdss_stm26),
 	FUNCTION(qdss9),
 	FUNCTION(blsp_i2c4),
@@ -930,26 +888,19 @@
 	FUNCTION(gcc_gp2),
 	FUNCTION(qdss_stm24),
 	FUNCTION(qdss11),
-	FUNCTION(qdss_stm16),
-	FUNCTION(qdss15),
-	FUNCTION(bimc_dte1),
-	FUNCTION(sec_mi2s),
-	FUNCTION(blsp_spi4),
-	FUNCTION(blsp_uart4),
-	FUNCTION(qdss_cti),
-	FUNCTION(qdss_stm27),
-	FUNCTION(qdss8),
 	FUNCTION(ebi2_a),
-	FUNCTION(qdss_stm3),
 	FUNCTION(ebi2_lcd),
-	FUNCTION(qdss_stm2),
 	FUNCTION(pll_bist),
-	FUNCTION(qdss_stm1),
-	FUNCTION(qdss_stm0),
 	FUNCTION(adsp_ext),
-	FUNCTION(epm1),
+	FUNCTION(qdss_stm11),
 	FUNCTION(m_voc),
+	FUNCTION(qdss_stm10),
 	FUNCTION(native_char),
+	FUNCTION(native_char3),
+	FUNCTION(nav_pps),
+	FUNCTION(nav_dr),
+	FUNCTION(native_char2),
+	FUNCTION(native_tsense),
 	FUNCTION(native_char1),
 	FUNCTION(pa_indicator),
 	FUNCTION(qdss_traceclk),
@@ -958,204 +909,200 @@
 	FUNCTION(qlink_req),
 	FUNCTION(pll_test),
 	FUNCTION(cri_trng),
-	FUNCTION(wmss_reset),
-	FUNCTION(native_char3),
-	FUNCTION(nav_pps),
-	FUNCTION(nav_dr),
-	FUNCTION(native_char2),
-	FUNCTION(native_tsense),
 	FUNCTION(prng_rosc),
 	FUNCTION(cri_trng0),
 	FUNCTION(cri_trng1),
 	FUNCTION(pll_ref),
 	FUNCTION(coex_uart),
-	FUNCTION(qdss_stm11),
-	FUNCTION(qdss_stm10),
-	FUNCTION(ddr_pxi0),
-	FUNCTION(ap2mdm_status),
+	FUNCTION(qdss_tracectl),
 	FUNCTION(ddr_bist),
-	FUNCTION(mdm2ap_status),
-	FUNCTION(ap2mdm_err),
-	FUNCTION(mdm2ap_err),
-	FUNCTION(ap2mdm_vdd),
-	FUNCTION(mdm2ap_vdd),
-	FUNCTION(ap2mdm_wake),
-	FUNCTION(pciehost_rst),
 	FUNCTION(blsp_spi1),
-	FUNCTION(qdss_stm14),
-	FUNCTION(pcie_wake),
-	FUNCTION(mdm2ap_wake),
 	FUNCTION(pci_e),
+	FUNCTION(tgu_ch0),
+	FUNCTION(pcie_clkreq),
+	FUNCTION(qdss_stm15),
+	FUNCTION(qdss_stm14),
 	FUNCTION(qdss_stm13),
+	FUNCTION(mgpi_clk),
+	FUNCTION(qdss_stm12),
+	FUNCTION(qdss_stm9),
 	FUNCTION(i2s_mclk),
 	FUNCTION(audio_ref),
 	FUNCTION(ldo_update),
 	FUNCTION(qdss_stm8),
 	FUNCTION(qdss_stm7),
-	FUNCTION(qdss4),
-	FUNCTION(tgu_ch0),
-	FUNCTION(pcie_clkreq),
-	FUNCTION(qdss_stm9),
-	FUNCTION(qdss_stm15),
-	FUNCTION(mgpi_clk),
-	FUNCTION(qdss_stm12),
-	FUNCTION(qdss_tracectl),
-	FUNCTION(atest_char),
+	FUNCTION(qdss12),
 	FUNCTION(qdss_stm6),
-	FUNCTION(qdss5),
-	FUNCTION(atest_char3),
+	FUNCTION(qdss13),
 	FUNCTION(qdss_stm5),
-	FUNCTION(qdss6),
-	FUNCTION(atest_char2),
+	FUNCTION(qdss14),
 	FUNCTION(qdss_stm4),
-	FUNCTION(qdss7),
-	FUNCTION(atest_char1),
+	FUNCTION(qdss15),
 	FUNCTION(uim1_data),
-	FUNCTION(atest_char0),
+	FUNCTION(qdss_stm3),
 	FUNCTION(uim1_present),
+	FUNCTION(qdss_stm2),
 	FUNCTION(uim1_reset),
+	FUNCTION(qdss_stm1),
 	FUNCTION(uim1_clk),
+	FUNCTION(qdss_stm0),
 	FUNCTION(dbg_out),
 	FUNCTION(gcc_plltest),
-	FUNCTION(usb2phy_ac),
 };
 
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
 static const struct msm_pingroup sdxpoorwills_groups[] = {
-	PINGROUP(0, uim2_data, blsp_uart1, qdss_stm31, ebi0_wrcdc, NA, NA, NA,
-		 NA, NA),
-	PINGROUP(1, uim2_present, blsp_uart1, qdss_stm30, NA, NA, NA, NA, NA,
-		 NA),
-	PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, qdss_stm29, ebi0_wrcdc,
-		 NA, NA, NA, NA),
-	PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, qdss_stm28, NA, NA, NA,
-		 NA, NA),
-	PINGROUP(4, blsp_spi2, blsp_uart2, NA, qdss_stm23, qdss3, NA, NA, NA,
-		 NA),
-	PINGROUP(5, blsp_spi2, blsp_uart2, NA, qdss_stm22, qdss2, NA, NA, NA,
-		 NA),
-	PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm21, qdss1,
-		 NA, NA, NA),
-	PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm20, qdss0,
-		 NA, NA, NA),
-	PINGROUP(8, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, NA, NA,
-		 NA, NA),
-	PINGROUP(9, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, NA, NA, NA, NA,
-		 NA),
-	PINGROUP(10, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, NA,
-		 NA, NA, NA),
-	PINGROUP(11, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg,
-		 gcc_gp3, NA, NA, NA),
-	PINGROUP(12, pri_mi2s, NA, qdss_stm19, qdss12, NA, NA, NA, NA, NA),
-	PINGROUP(13, pri_mi2s, NA, qdss_stm18, qdss13, NA, NA, NA, NA, NA),
-	PINGROUP(14, pri_mi2s, NA, NA, qdss_stm17, qdss14, bimc_dte0,
-		 native_tsens, vsense_trigger, NA),
-	PINGROUP(15, pri_mi2s, NA, NA, qdss_stm16, qdss15, NA, NA, bimc_dte1,
-		 NA),
-	PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, NA,
-		 NA, qdss_stm27, qdss8),
-	PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, NA,
-		 qdss_stm26, qdss9, NA),
-	PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, gcc_gp1, NA,
-		 qdss_stm25, qdss10, NA),
-	PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, jitter_bist,
-		 gcc_gp2, NA, qdss_stm24, qdss11),
-	PINGROUP(20, sec_mi2s, ebi2_a, blsp_uart1, blsp_uart4, NA, qdss_stm3,
-		 NA, NA, NA),
-	PINGROUP(21, sec_mi2s, ebi2_lcd, blsp_uart1, blsp_uart4, NA, NA,
-		 qdss_stm2, NA, NA),
-	PINGROUP(22, sec_mi2s, ebi2_lcd, blsp_uart1, qdss_cti, qdss_cti,
-		 blsp_uart4, pll_bist, NA, qdss_stm1),
-	PINGROUP(23, sec_mi2s, ebi2_lcd, qdss_cti, qdss_cti, blsp_uart1,
-		 blsp_uart4, NA, qdss_stm0, NA),
-	PINGROUP(24, adsp_ext, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(25, m_voc, adsp_ext, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(26, NA, NA, NA, native_char, NA, NA, NA, NA, NA),
-	PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(28, wmss_reset, native_char3, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(29, NA, NA, nav_pps, nav_dr, NA, native_char2, native_tsense,
-		 NA, NA),
-	PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(32, NA, native_char1, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(33, NA, pa_indicator, qdss_traceclk, native_char0, NA, NA, NA,
-		 NA, NA),
-	PINGROUP(34, qlink_en, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(35, qlink_req, pll_test, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(36, NA, NA, cri_trng, NA, NA, NA, NA, NA, NA),
-	PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(38, NA, NA, prng_rosc, NA, NA, NA, NA, NA, NA),
-	PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(40, NA, NA, cri_trng0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(41, NA, NA, cri_trng1, NA, NA, NA, NA, NA, NA),
-	PINGROUP(42, nav_pps, NA, nav_dr, pll_ref, NA, NA, NA, NA, NA),
-	PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(44, coex_uart, NA, qdss_stm11, NA, NA, NA, NA, NA, NA),
-	PINGROUP(45, coex_uart, NA, qdss_stm10, ddr_pxi0, NA, NA, NA, NA, NA),
-	PINGROUP(46, m_voc, ddr_bist, ddr_pxi0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(47, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(48, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(49, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(52, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, NA, NA,
-		 qdss_stm14, NA, NA),
-	PINGROUP(53, pci_e, NA, NA, qdss_stm13, NA, NA, NA, NA, NA),
-	PINGROUP(54, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(56, pcie_clkreq, NA, qdss_stm9, NA, NA, NA, NA, NA, NA),
-	PINGROUP(57, NA, qdss_stm15, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(59, qdss_cti, m_voc, bimc_dte0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(60, mgpi_clk, NA, qdss_stm12, qdss_tracectl, bimc_dte1, NA,
-		 NA, NA, NA),
-	PINGROUP(61, qdss_cti, NA, m_voc, NA, NA, NA, NA, NA, NA),
-	PINGROUP(62, i2s_mclk, nav_pps, nav_dr, audio_ref, blsp_spi1,
-		 blsp_spi2, blsp_spi3, blsp_spi4, ldo_update),
-	PINGROUP(63, blsp_uart2, NA, qdss_stm7, qdss4, atest_char, NA, NA, NA,
-		 NA),
-	PINGROUP(64, blsp_uart2, NA, qdss_stm6, qdss5, atest_char3, NA, NA, NA,
-		 NA),
-	PINGROUP(65, blsp_uart2, blsp_i2c2, NA, qdss_stm5, qdss6, atest_char2,
-		 NA, NA, NA),
-	PINGROUP(66, blsp_uart2, blsp_i2c2, NA, qdss_stm4, qdss7, atest_char1,
-		 NA, NA, NA),
-	PINGROUP(67, uim1_data, atest_char0, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(68, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(69, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(70, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(71, mgpi_clk, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4,
-		 dbg_out, NA, NA, NA),
-	PINGROUP(72, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(73, NA, blsp_spi1, NA, gcc_plltest, NA, NA, NA, NA, NA),
-	PINGROUP(74, NA, blsp_spi1, NA, blsp_i2c1, gcc_plltest, NA, NA, NA, NA),
-	PINGROUP(75, NA, blsp_spi1, NA, blsp_i2c1, NA, NA, NA, NA, NA),
-	PINGROUP(76, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(77, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(87, NA, NA, usb2phy_ac, NA, NA, NA, NA, NA, NA),
-	PINGROUP(88, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(89, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
-	SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
-	SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+	[0] = PINGROUP(0, uim2_data, blsp_uart1, qdss_stm31, ebi0_wrcdc, NA,
+		       NA, NA, NA, NA),
+	[1] = PINGROUP(1, uim2_present, blsp_uart1, qdss_stm30, NA, NA, NA, NA,
+		       NA, NA),
+	[2] = PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, qdss_stm29,
+		       ebi0_wrcdc, NA, NA, NA, NA),
+	[3] = PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, qdss_stm28, NA, NA,
+		       NA, NA, NA),
+	[4] = PINGROUP(4, blsp_spi2, blsp_uart2, NA, qdss_stm23, qdss3, NA, NA,
+		       NA, NA),
+	[5] = PINGROUP(5, blsp_spi2, blsp_uart2, NA, qdss_stm22, qdss2, NA, NA,
+		       NA, NA),
+	[6] = PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm21,
+		       qdss1, NA, NA, NA),
+	[7] = PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm20,
+		       qdss0, NA, NA, NA),
+	[8] = PINGROUP(8, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, NA,
+		       NA, NA, NA),
+	[9] = PINGROUP(9, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, NA, NA, NA,
+		       NA, NA),
+	[10] = PINGROUP(10, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3,
+			ext_dbg, NA, NA, NA, NA),
+	[11] = PINGROUP(11, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3,
+			ext_dbg, gcc_gp3, NA, NA, NA),
+	[12] = PINGROUP(12, pri_mi2s, NA, qdss_stm19, qdss4, NA, NA, NA, NA,
+			NA),
+	[13] = PINGROUP(13, pri_mi2s, NA, qdss_stm18, qdss5, NA, NA, NA, NA,
+			NA),
+	[14] = PINGROUP(14, pri_mi2s, NA, NA, qdss_stm17, qdss6, bimc_dte0,
+			native_tsens, NA, NA),
+	[15] = PINGROUP(15, pri_mi2s, NA, NA, qdss_stm16, qdss7, NA, NA,
+			bimc_dte1, NA),
+	[16] = PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti,
+			qdss_cti, NA, qdss_stm27, qdss8, NA),
+	[17] = PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti,
+			qdss_cti, qdss_stm26, qdss9, NA, NA),
+	[18] = PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4,
+			gcc_gp1, qdss_stm25, qdss10, NA, NA),
+	[19] = PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4,
+			jitter_bist, gcc_gp2, qdss_stm24, qdss11, NA),
+	[20] = PINGROUP(20, sec_mi2s, ebi2_a, blsp_uart1, blsp_uart4, NA, NA,
+			NA, NA, NA),
+	[21] = PINGROUP(21, sec_mi2s, ebi2_lcd, blsp_uart1, blsp_uart4, NA, NA,
+			NA, NA, NA),
+	[22] = PINGROUP(22, sec_mi2s, ebi2_lcd, blsp_uart1, qdss_cti, qdss_cti,
+			blsp_uart4, pll_bist, NA, NA),
+	[23] = PINGROUP(23, sec_mi2s, ebi2_lcd, qdss_cti, qdss_cti, blsp_uart1,
+			blsp_uart4, NA, NA, NA),
+	[24] = PINGROUP(24, adsp_ext, NA, qdss_stm11, NA, NA, NA, NA, NA, NA),
+	[25] = PINGROUP(25, m_voc, adsp_ext, NA, qdss_stm10, NA, NA, NA, NA,
+			NA),
+	[26] = PINGROUP(26, NA, NA, NA, native_char, NA, NA, NA, NA, NA),
+	[27] = PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[28] = PINGROUP(28, NA, native_char3, NA, NA, NA, NA, NA, NA, NA),
+	[29] = PINGROUP(29, NA, NA, nav_pps, nav_dr, NA, native_char2,
+			native_tsense, NA, NA),
+	[30] = PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[31] = PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[32] = PINGROUP(32, NA, native_char1, NA, NA, NA, NA, NA, NA, NA),
+	[33] = PINGROUP(33, NA, pa_indicator, qdss_traceclk, native_char0, NA,
+			NA, NA, NA, NA),
+	[34] = PINGROUP(34, qlink_en, NA, NA, NA, NA, NA, NA, NA, NA),
+	[35] = PINGROUP(35, qlink_req, pll_test, NA, NA, NA, NA, NA, NA, NA),
+	[36] = PINGROUP(36, NA, NA, cri_trng, NA, NA, NA, NA, NA, NA),
+	[37] = PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[38] = PINGROUP(38, NA, NA, prng_rosc, NA, NA, NA, NA, NA, NA),
+	[39] = PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[40] = PINGROUP(40, NA, NA, cri_trng0, NA, NA, NA, NA, NA, NA),
+	[41] = PINGROUP(41, NA, NA, cri_trng1, NA, NA, NA, NA, NA, NA),
+	[42] = PINGROUP(42, nav_pps, nav_dr, pll_ref, NA, NA, NA, NA, NA, NA),
+	[43] = PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[44] = PINGROUP(44, coex_uart, qdss_tracectl, NA, NA, NA, NA, NA, NA,
+			NA),
+	[45] = PINGROUP(45, coex_uart, NA, NA, NA, NA, NA, NA, NA, NA),
+	[46] = PINGROUP(46, m_voc, ddr_bist, NA, NA, NA, NA, NA, NA, NA),
+	[47] = PINGROUP(47, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+	[48] = PINGROUP(48, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+	[49] = PINGROUP(49, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+	[50] = PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[51] = PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[52] = PINGROUP(52, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, NA, NA,
+			NA, NA, NA),
+	[53] = PINGROUP(53, pci_e, NA, NA, NA, NA, NA, NA, NA, NA),
+	[54] = PINGROUP(54, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[55] = PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, NA, NA, NA, NA, NA,
+			NA),
+	[56] = PINGROUP(56, pcie_clkreq, NA, NA, NA, NA, NA, NA, NA, NA),
+	[57] = PINGROUP(57, NA, qdss_stm15, NA, NA, NA, NA, NA, NA, NA),
+	[58] = PINGROUP(58, NA, qdss_stm14, NA, NA, NA, NA, NA, NA, NA),
+	[59] = PINGROUP(59, qdss_cti, m_voc, NA, qdss_stm13, bimc_dte0, NA, NA,
+			NA, NA),
+	[60] = PINGROUP(60, mgpi_clk, NA, qdss_stm12, bimc_dte1, NA, NA, NA,
+			NA, NA),
+	[61] = PINGROUP(61, qdss_cti, NA, m_voc, NA, qdss_stm9, NA, NA, NA, NA),
+	[62] = PINGROUP(62, i2s_mclk, nav_pps, nav_dr, audio_ref, blsp_spi1,
+			blsp_spi2, blsp_spi3, blsp_spi4, ldo_update),
+	[63] = PINGROUP(63, blsp_uart2, NA, qdss_stm7, qdss12, NA, NA, NA, NA,
+			NA),
+	[64] = PINGROUP(64, blsp_uart2, qdss_stm6, qdss13, NA, NA, NA, NA, NA,
+			NA),
+	[65] = PINGROUP(65, blsp_uart2, blsp_i2c2, NA, qdss_stm5, qdss14, NA,
+			NA, NA, NA),
+	[66] = PINGROUP(66, blsp_uart2, blsp_i2c2, NA, qdss_stm4, qdss15, NA,
+			NA, NA, NA),
+	[67] = PINGROUP(67, uim1_data, NA, qdss_stm3, NA, NA, NA, NA, NA, NA),
+	[68] = PINGROUP(68, uim1_present, qdss_stm2, NA, NA, NA, NA, NA, NA,
+			NA),
+	[69] = PINGROUP(69, uim1_reset, qdss_stm1, NA, NA, NA, NA, NA, NA, NA),
+	[70] = PINGROUP(70, uim1_clk, NA, qdss_stm0, NA, NA, NA, NA, NA, NA),
+	[71] = PINGROUP(71, mgpi_clk, blsp_spi1, blsp_spi2, blsp_spi3,
+			blsp_spi4, dbg_out, NA, NA, NA),
+	[72] = PINGROUP(72, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA),
+	[73] = PINGROUP(73, NA, blsp_spi1, NA, gcc_plltest, NA, NA, NA, NA, NA),
+	[74] = PINGROUP(74, NA, blsp_spi1, NA, blsp_i2c1, gcc_plltest, NA, NA,
+			NA, NA),
+	[75] = PINGROUP(75, NA, blsp_spi1, NA, blsp_i2c1, NA, NA, NA, NA, NA),
+	[76] = PINGROUP(76, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
+	[77] = PINGROUP(77, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
+	[78] = PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[79] = PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[80] = PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[81] = PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[82] = PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[83] = PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[84] = PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[85] = PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[86] = PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[87] = PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[88] = PINGROUP(88, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[89] = PINGROUP(89, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[90] = PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[91] = PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[92] = PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[93] = PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[94] = PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[95] = PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[96] = PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[97] = PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[98] = PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[99] = PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[100] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
+	[101] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+	[102] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+	[103] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+	[104] = SDC_QDSD_PINGROUP(sdc2_clk, 0x0, 14, 6),
+	[105] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x0, 11, 3),
+	[106] = SDC_QDSD_PINGROUP(sdc2_data, 0x0, 9, 0),
 };
 
 static const struct msm_pinctrl_soc_data sdxpoorwills_pinctrl = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 70d74f0..6230356 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -536,6 +536,7 @@
 	int retval;
 	struct ipa_wan_msg *wan_msg;
 	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg cache_wan_msg;
 
 	wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
 	if (!wan_msg) {
@@ -549,6 +550,8 @@
 		return -EFAULT;
 	}
 
+	memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
 	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
 	msg_meta.msg_type = msg_type;
 	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -565,8 +568,8 @@
 		/* cache the cne event */
 		memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
 			ipa_ctx->num_ipa_cne_evt_req].wan_msg,
-			wan_msg,
-			sizeof(struct ipa_wan_msg));
+			&cache_wan_msg,
+			sizeof(cache_wan_msg));
 
 		memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
 			ipa_ctx->num_ipa_cne_evt_req].msg_meta,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index a52b4e0..e9df986 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -545,6 +545,7 @@
 	int retval;
 	struct ipa_wan_msg *wan_msg;
 	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg cache_wan_msg;
 
 	wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
 	if (!wan_msg) {
@@ -558,6 +559,8 @@
 		return -EFAULT;
 	}
 
+	memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
 	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
 	msg_meta.msg_type = msg_type;
 	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -574,8 +577,8 @@
 		/* cache the cne event */
 		memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
 			ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
-			wan_msg,
-			sizeof(struct ipa_wan_msg));
+			&cache_wan_msg,
+			sizeof(cache_wan_msg));
 
 		memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
 			ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
@@ -4427,6 +4430,8 @@
 	if (IS_ERR_OR_NULL(subsystem_get_retval)) {
 		IPAERR("Unable to trigger PIL process for FW loading\n");
 		return -EINVAL;
+	} else {
+		subsystem_put(subsystem_get_retval);
 	}
 
 	IPADBG("PIL FW loading process is complete\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index 66c712c..3bf0327 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -425,7 +425,13 @@
 	activate_client(client->hdl);
 
 	mutex_lock(&ipa_pm_ctx->client_mutex);
-	client->callback(client->callback_params, IPA_PM_CLIENT_ACTIVATED);
+	if (client->callback) {
+		client->callback(client->callback_params,
+			IPA_PM_CLIENT_ACTIVATED);
+	} else {
+		IPA_PM_ERR("client has no callback");
+		WARN_ON(1);
+	}
 	mutex_unlock(&ipa_pm_ctx->client_mutex);
 
 	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
@@ -677,8 +683,7 @@
 {
 	struct ipa_pm_client *client;
 
-	if (params == NULL || hdl == NULL || params->name == NULL
-		|| params->callback == NULL) {
+	if (params == NULL || hdl == NULL || params->name == NULL) {
 		IPA_PM_ERR("Invalid Params\n");
 		return -EINVAL;
 	}
@@ -1115,9 +1120,14 @@
 		if (pipe_bitmask & (1 << i)) {
 			client = ipa_pm_ctx->clients_by_pipe[i];
 			if (client && client_notified[client->hdl] == false) {
-				client->callback(client->callback_params,
-					IPA_PM_REQUEST_WAKEUP);
-				client_notified[client->hdl] = true;
+				if (client->callback) {
+					client->callback(client->callback_params
+						, IPA_PM_REQUEST_WAKEUP);
+					client_notified[client->hdl] = true;
+				} else {
+					IPA_PM_ERR("client has no callback");
+					WARN_ON(1);
+				}
 			}
 		}
 	}
@@ -1164,8 +1174,8 @@
 	mutex_unlock(&ipa_pm_ctx->client_mutex);
 
 	spin_lock_irqsave(&client->state_lock, flags);
-	if (IPA_PM_STATE_ACTIVE(client->state || (client->group !=
-			IPA_PM_GROUP_DEFAULT))) {
+	if (IPA_PM_STATE_ACTIVE(client->state) || (client->group !=
+			IPA_PM_GROUP_DEFAULT)) {
 		spin_unlock_irqrestore(&client->state_lock, flags);
 		do_clk_scaling();
 		return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 56fed2a..a8d5342 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -595,6 +595,15 @@
 	return pyld;
 }
 
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dummy(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	IPAHAL_ERR("no construct function for IMM_CMD=%s, IPA ver %d\n",
+		ipahal_imm_cmd_name_str(cmd), ipahal_ctx->hw_type);
+	WARN_ON(1);
+	return NULL;
+}
+
 /*
  * struct ipahal_imm_cmd_obj - immediate command H/W information for
  *  specific IPA version
@@ -668,8 +677,8 @@
 		12},
 	/* NAT_DMA was renamed to TABLE_DMA for IPAv4 */
 	[IPA_HW_v4_0][IPA_IMM_CMD_NAT_DMA] = {
-		NULL,
-		-1 },
+		ipa_imm_cmd_construct_dummy,
+		-1},
 	[IPA_HW_v4_0][IPA_IMM_CMD_TABLE_DMA] = {
 		ipa_imm_cmd_construct_table_dma_ipav4,
 		14},
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index af46bf2..82bee5d 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o
diff --git a/drivers/platform/msm/ipa/test/ipa_pm_ut.c b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
new file mode 100644
index 0000000..e07040a
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
@@ -0,0 +1,1758 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_pm.h"
+#include "../ipa_v3/ipa_i.h"
+#include "ipa_ut_framework.h"
+#include <linux/delay.h>
+
+struct callback_param {
+	struct completion complete;
+	enum ipa_pm_cb_event evt;
+};
+
+static int ipa_pm_ut_setup(void **ppriv)
+{
+
+	IPA_UT_DBG("Start Setup\n");
+
+	/* decrement UT vote */
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
+
+	return 0;
+}
+
+static int ipa_pm_ut_teardown(void *priv)
+{
+	IPA_UT_DBG("Start Teardown\n");
+
+	/* undo UT vote */
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
+	return 0;
+}
+
+/* pass completion struct as the user data/callback params */
+static void ipa_pm_call_back(void *user_data, enum ipa_pm_cb_event evt)
+{
+	struct callback_param *param;
+
+	param = (struct callback_param *) user_data;
+	param->evt = evt;
+
+	if (evt == IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_DBG("Activate callback called\n");
+		complete_all(&param->complete);
+	} else if (evt == IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_DBG("Request Wakeup callback called\n");
+		complete_all(&param->complete);
+	} else
+		IPA_UT_ERR("invalid callback - callback #%d\n", evt);
+}
+
+static int clean_up(int n, ...)
+{
+	va_list args;
+	int i, hdl, rc = 0;
+
+	va_start(args, n);
+
+	IPA_UT_DBG("n = %d\n", n);
+
+	IPA_UT_DBG("Clean up Started");
+
+	for (i = 0; i < n; i++) {
+		hdl = va_arg(args, int);
+
+		rc = ipa_pm_deactivate_sync(hdl);
+		if (rc) {
+			IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+			return -EFAULT;
+		}
+		rc = ipa_pm_deregister(hdl);
+		if (rc) {
+			IPA_UT_ERR("fail to deregister client - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("deregister failed");
+			return -EFAULT;
+		}
+	}
+	va_end(args);
+	rc = ipa_pm_destroy();
+	if (rc) {
+		IPA_UT_ERR("fail to destroy pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+
+/* test 1.1 */
+static int ipa_pm_ut_single_registration(void *priv)
+{
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deregister(hdl);
+	if (rc == 0) {
+		IPA_UT_ERR("deregister was not unsuccesful - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deregister was not unsuccesful");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deferred deactivate client - rc = %d\n"
+			, rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deferred deactivate client");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 0) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deregister(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deregister client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deregister client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc == 0) {
+		IPA_UT_ERR("activate was not unsuccesful- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate was not unsuccesful");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_destroy();
+	if (rc) {
+		IPA_UT_ERR("terminate failed - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("terminate_failed");
+	}
+
+	return 0;
+}
+
+/* test 1.1 */
+static int ipa_pm_ut_double_register_activate(void *priv)
+{
+	int rc = 0;
+	int hdl, hdl_test, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&register_params, &hdl_test);
+	if (rc != -EEXIST) {
+		IPA_UT_ERR("registered client with same name rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("did not to fail register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to do nothing - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("do nothing failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to do nothing on 2nd activate = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to not reactivate");
+		return -EFAULT;
+	}
+
+	msleep(200);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deactivate client");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 0) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+/* test 2 */
+static int ipa_pm_ut_deferred_deactivate(void *priv)
+{
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client - rc = %d\n",
+		rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to reactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("reactivate client failed");
+		return -EFAULT;
+	}
+
+	msleep(200);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate_sync client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate sync failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+
+/*test 3*/
+static int ipa_pm_ut_two_clients_activate(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, vote;
+	u32 pipes;
+	struct callback_param user_data_USB;
+	struct callback_param user_data_WLAN;
+
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data_USB
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data_WLAN
+	};
+	user_data_USB.evt = IPA_PM_CB_EVENT_MAX;
+	user_data_WLAN.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data_USB.complete);
+	init_completion(&user_data_WLAN.complete);
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_associate_ipa_cons_to_client(hdl_USB, IPA_CLIENT_USB_CONS);
+	if (rc) {
+		IPA_UT_ERR("fail to map client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to map client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_associate_ipa_cons_to_client(hdl_WLAN,
+		IPA_CLIENT_WLAN1_CONS);
+	if (rc) {
+		IPA_UT_ERR("fail to map client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to map client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_associate_ipa_cons_to_client(hdl_WLAN,
+		IPA_CLIENT_WLAN2_CONS);
+	if (rc) {
+		IPA_UT_ERR("fail to map client 2 to multiplt pipes rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to map client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client 1 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client 2 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_USB.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data_USB.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data_WLAN.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback 2\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_WLAN.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data_WLAN.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	reinit_completion(&user_data_USB.complete);
+	reinit_completion(&user_data_WLAN.complete);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 1 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	msleep(200);
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("no-block activate failed - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("no-block activate fail");
+		return -EFAULT;
+	}
+
+	pipes = 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_CONS);
+	pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+
+	IPA_UT_DBG("pipes = %d\n", pipes);
+
+	rc = ipa_pm_handle_suspend(pipes);
+
+	if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for wakeup_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("wakeup callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_USB.evt != IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_ERR("Callback = %d\n", user_data_USB.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data_WLAN.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for wakeup_callback 2\n");
+		IPA_UT_TEST_FAIL_REPORT("wakeup callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_WLAN.evt != IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_ERR("Callback = %d\n", user_data_WLAN.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	reinit_completion(&user_data_USB.complete);
+
+	rc = ipa_pm_deactivate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate_sync client 1 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deactivate_sync");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("no-block activate failed - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("no-block activate fail");
+		return -EFAULT;
+	}
+
+	pipes = 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_CONS);
+
+	rc = ipa_pm_handle_suspend(pipes);
+
+	if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for wakeup_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("wakeup callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_USB.evt != IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_ERR("Callback = %d\n", user_data_USB.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	rc = clean_up(2, hdl_USB, hdl_WLAN);
+	return rc;
+}
+
+/* test 4 */
+static int ipa_pm_ut_deactivate_all_deferred(void *priv)
+{
+
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rce %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client 1 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 2- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to no-block activate - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("no-block-activate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 3) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 1 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deactivate_all_deferred();
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("deactivate_all_deferred failed");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("clock vote went below 1");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+}
+
+/* test 5 */
+static int ipa_pm_ut_deactivate_after_activate(void *priv)
+{
+
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rce %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&USB_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client - rc = %d\n",
+		rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate sync client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate sync fail");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+/* test 6 */
+static int ipa_pm_ut_atomic_activate(void *priv)
+{
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+	spinlock_t lock;
+	unsigned long flags;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+
+	spin_lock_init(&lock);
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&lock, flags);
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		spin_unlock_irqrestore(&lock, flags);
+		return -EFAULT;
+	}
+	spin_unlock_irqrestore(&lock, flags);
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+/* test 7 */
+static int ipa_pm_ut_deactivate_loop(void *priv)
+{
+	int rc = 0;
+	int i, hdl_USB, hdl_WLAN, vote;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	for (i = 0; i < 50; i++) {
+		IPA_UT_DBG("Loop iteration #%d\n", i);
+
+		vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+		if (vote != 2) {
+			IPA_UT_ERR("clock vote is at %d\n", vote);
+			IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+			return -EINVAL;
+		}
+
+		rc = ipa_pm_activate(hdl_WLAN);
+		if (rc) {
+			IPA_UT_ERR("fail to undo deactivate for client 2");
+			IPA_UT_ERR(" - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("undo deactivate failed");
+			return -EFAULT;
+		}
+
+		rc = ipa_pm_deferred_deactivate(hdl_WLAN);
+		if (rc) {
+			IPA_UT_ERR("fail to deffered deactivate client");
+			IPA_UT_ERR(" - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+			return -EFAULT;
+		}
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+	rc = clean_up(2, hdl_USB, hdl_WLAN);
+	return rc;
+
+}
+
+
+/*test 8*/
+static int ipa_pm_ut_set_perf_profile(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, vote, idx;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 3) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = clean_up(2, hdl_USB, hdl_WLAN);
+	return rc;
+}
+
+/*test 9*/
+static int ipa_pm_ut_group_tput(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_APPS,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_APPS,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 500);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_MODEM, 1000);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 3) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+
+}
+
+/*test 10*/
+static int ipa_pm_ut_skip_clk_vote_tput(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_MODEM,
+		.skip_clk_vote = 1,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_MODEM,
+		.skip_clk_vote = 1,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_MODEM, 2000);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 3) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+
+	rc = ipa_pm_deactivate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 0) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+}
+
+/* Test 11 */
+static int ipa_pm_ut_simple_exception(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
+
+	struct ipa_pm_exception exceptions = {
+		.usecase = "USB",
+		.threshold = {1000, 1800},
+	};
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000},
+		.exception_size = 1,
+		.exceptions[0] = exceptions,
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 2000);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_MODEM, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 3) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 3) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	 idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(pm, "PM for IPA",
+	ipa_pm_ut_setup, ipa_pm_ut_teardown)
+{
+	IPA_UT_ADD_TEST(single_registration,
+		"Single Registration/Basic Functions",
+		ipa_pm_ut_single_registration,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(double_register_activate,
+		"double register/activate",
+		ipa_pm_ut_double_register_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deferred_deactivate,
+		"Deferred_deactivate",
+		ipa_pm_ut_deferred_deactivate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(two_clients_activate,
+		"Activate two clients",
+		ipa_pm_ut_two_clients_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deactivate_all_deferred,
+		"Deactivate all deferred",
+		ipa_pm_ut_deactivate_all_deferred,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deactivate_after_activate,
+		"Deactivate after activate",
+		ipa_pm_ut_deactivate_after_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(atomic_activate,
+		"Atomic activate",
+		ipa_pm_ut_atomic_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deactivate_loop,
+		"Deactivate Loop",
+		ipa_pm_ut_deactivate_loop,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(set_perf_profile,
+		"Set perf profile",
+		ipa_pm_ut_set_perf_profile,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(group_tput,
+		"Group throughputs",
+		ipa_pm_ut_group_tput,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(skip_clk_vote_tput,
+		"Skip clock vote and tput",
+		ipa_pm_ut_skip_clk_vote_tput,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(simple_exception,
+		"throughput while passing simple exception",
+		ipa_pm_ut_simple_exception,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(pm);
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index 823edcf..35f2878 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -22,6 +22,7 @@
  */
 IPA_UT_DECLARE_SUITE(mhi);
 IPA_UT_DECLARE_SUITE(dma);
+IPA_UT_DECLARE_SUITE(pm);
 IPA_UT_DECLARE_SUITE(example);
 IPA_UT_DECLARE_SUITE(hw_stats);
 
@@ -34,6 +35,7 @@
 {
 	IPA_UT_REGISTER_SUITE(mhi),
 	IPA_UT_REGISTER_SUITE(dma),
+	IPA_UT_REGISTER_SUITE(pm),
 	IPA_UT_REGISTER_SUITE(example),
 	IPA_UT_REGISTER_SUITE(hw_stats),
 } IPA_UT_DEFINE_ALL_SUITES_END;
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 0cba866..7f9a797 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -43,7 +43,7 @@
 #define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
 #define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
-#define FCC_CHANGE_VOTER		"FCC_CHANGE_VOTER"
+#define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -357,12 +357,26 @@
 	union power_supply_propval pval = {0, };
 	int rc;
 	int eff_fcc_ua;
+	int total_fcc_ua, master_fcc_ua, slave_fcc_ua = 0;
 
 	chip->taper_work_running = true;
 	while (true) {
-		/* exit immediately if parallel is disabled */
 		if (get_effective_result(chip->pl_disable_votable)) {
-			pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+			/*
+			 * if parallel's FCC share is low, simply disable
+			 * parallel with TAPER_END_VOTER
+			 */
+			total_fcc_ua = get_effective_result_locked(
+					chip->fcc_votable);
+			get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+					&slave_fcc_ua);
+			if (slave_fcc_ua <= MINIMUM_PARALLEL_FCC_UA) {
+				pl_dbg(chip, PR_PARALLEL, "terminating: parallel's share is low\n");
+				vote(chip->pl_disable_votable, TAPER_END_VOTER,
+						true, 0);
+			} else {
+				pl_dbg(chip, PR_PARALLEL, "terminating: parallel disabled\n");
+			}
 			goto done;
 		}
 
@@ -420,11 +434,11 @@
 
 		if (slave_fcc_ua > MINIMUM_PARALLEL_FCC_UA) {
 			chip->slave_fcc_ua = slave_fcc_ua;
-			vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+			vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
 							false, 0);
 		} else {
 			chip->slave_fcc_ua = 0;
-			vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+			vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
 							true, 0);
 		}
 	}
@@ -1012,7 +1026,7 @@
 		goto release_wakeup_source;
 	}
 
-	chip->fv_votable = create_votable("FV", VOTE_MAX,
+	chip->fv_votable = create_votable("FV", VOTE_MIN,
 					pl_fv_vote_callback,
 					chip);
 	if (IS_ERR(chip->fv_votable)) {
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 271c523..4303960 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -287,6 +287,7 @@
 	int	esr_pulse_thresh_ma;
 	int	esr_meas_curr_ma;
 	int	bmd_en_delay_ms;
+	int	ki_coeff_full_soc_dischg;
 	int	jeita_thresholds[NUM_JEITA_LEVELS];
 	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
 	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
@@ -416,6 +417,7 @@
 	struct mutex		bus_lock;
 	struct mutex		sram_rw_lock;
 	struct mutex		charge_full_lock;
+	struct mutex		qnovo_esr_ctrl_lock;
 	u32			batt_soc_base;
 	u32			batt_info_base;
 	u32			mem_if_base;
@@ -424,7 +426,6 @@
 	int			batt_id_ohms;
 	int			ki_coeff_full_soc;
 	int			charge_status;
-	int			prev_charge_status;
 	int			charge_done;
 	int			charge_type;
 	int			online_status;
@@ -450,6 +451,7 @@
 	bool			slope_limit_en;
 	bool			use_ima_single_mode;
 	bool			use_dma;
+	bool			qnovo_enable;
 	struct completion	soc_update;
 	struct completion	soc_ready;
 	struct completion	mem_grant;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 7f220c3..b02c860 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1631,6 +1631,8 @@
 
 	if (batt_temp < 0)
 		ki_coeff_full_soc = 0;
+	else if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
+		ki_coeff_full_soc = chip->dt.ki_coeff_full_soc_dischg;
 	else
 		ki_coeff_full_soc = KI_COEFF_FULL_SOC_DEFAULT;
 
@@ -1748,12 +1750,12 @@
 
 	/* We need 2 most significant bytes here */
 	bsoc = (u32)bsoc >> 16;
-	rc = fg_get_msoc(chip, &msoc);
+	rc = fg_get_msoc_raw(chip, &msoc_raw);
 	if (rc < 0) {
-		pr_err("Error in getting msoc, rc=%d\n", rc);
+		pr_err("Error in getting msoc_raw, rc=%d\n", rc);
 		goto out;
 	}
-	msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
+	msoc = DIV_ROUND_CLOSEST(msoc_raw * FULL_CAPACITY, FULL_SOC_RAW);
 
 	fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
 		msoc, bsoc, chip->health, chip->charge_status,
@@ -2507,7 +2509,6 @@
 		goto out;
 	}
 
-	chip->prev_charge_status = chip->charge_status;
 	chip->charge_status = prop.intval;
 	rc = power_supply_get_property(chip->batt_psy,
 			POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
@@ -3300,20 +3301,21 @@
 	int rc;
 	int esr_uohms;
 
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
 	/* force esr extraction enable */
 	rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
 			ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
 			FG_IMA_DEFAULT);
 	if (rc < 0) {
 		pr_err("failed to enable esr extn rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
 	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
 			LD_REG_CTRL_BIT, 0);
 	if (rc < 0) {
 		pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
 	rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
@@ -3321,24 +3323,36 @@
 			ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
 	if (rc < 0) {
 		pr_err("Error in configuring force ESR rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
+	/*
+	 * Release and grab the lock again after 1.5 seconds so that prepare
+	 * callback can succeed if the request comes in between.
+	 */
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+
 	/* wait 1.5 seconds for hw to measure ESR */
 	msleep(1500);
+
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
 	rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
 			ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
 			0);
 	if (rc < 0) {
 		pr_err("Error in restoring force ESR rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
+	/* If qnovo is disabled, then leave ESR extraction enabled */
+	if (!chip->qnovo_enable)
+		goto done;
+
 	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
 			LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
 	if (rc < 0) {
 		pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
 	/* force esr extraction disable */
@@ -3347,36 +3361,46 @@
 			FG_IMA_DEFAULT);
 	if (rc < 0) {
 		pr_err("failed to disable esr extn rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
+done:
 	fg_get_battery_resistance(chip, &esr_uohms);
 	fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
-
+out:
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
 	return rc;
 }
 
 static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
 {
-	int rc;
+	int rc = 0;
 
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
 	/* force esr extraction disable when qnovo enables */
 	rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
 			ESR_EXTRACTION_ENABLE_OFFSET,
 			BIT(0), qnovo_enable ? 0 : BIT(0),
 			FG_IMA_DEFAULT);
-	if (rc < 0)
+	if (rc < 0) {
 		pr_err("Error in configuring esr extraction rc=%d\n", rc);
+		goto out;
+	}
 
 	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
 			LD_REG_CTRL_BIT,
 			qnovo_enable ? LD_REG_CTRL_BIT : 0);
 	if (rc < 0) {
 		pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
-	fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n");
-	return 0;
+
+	fg_dbg(chip, FG_STATUS, "%s for Qnovo\n",
+		qnovo_enable ? "Prepared" : "Unprepared");
+	chip->qnovo_enable = qnovo_enable;
+out:
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+	return rc;
 }
 
 static void ttf_work(struct work_struct *work)
@@ -4503,7 +4527,11 @@
 static int fg_parse_ki_coefficients(struct fg_chip *chip)
 {
 	struct device_node *node = chip->dev->of_node;
-	int rc, i;
+	int rc, i, temp;
+
+	rc = of_property_read_u32(node, "qcom,ki-coeff-full-dischg", &temp);
+	if (!rc)
+		chip->dt.ki_coeff_full_soc_dischg = temp;
 
 	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg",
 		chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
@@ -4972,7 +5000,6 @@
 	chip->debug_mask = &fg_gen3_debug_mask;
 	chip->irqs = fg_irqs;
 	chip->charge_status = -EINVAL;
-	chip->prev_charge_status = -EINVAL;
 	chip->ki_coeff_full_soc = -EINVAL;
 	chip->online_status = -EINVAL;
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
@@ -5045,6 +5072,7 @@
 	mutex_init(&chip->cl.lock);
 	mutex_init(&chip->ttf.lock);
 	mutex_init(&chip->charge_full_lock);
+	mutex_init(&chip->qnovo_esr_ctrl_lock);
 	init_completion(&chip->soc_update);
 	init_completion(&chip->soc_ready);
 	init_completion(&chip->mem_grant);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 6abbaeb..34514c9 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -188,6 +188,11 @@
 module_param_named(
 	weak_chg_icl_ua, __weak_chg_icl_ua, int, 0600);
 
+static int __try_sink_enabled = 1;
+module_param_named(
+	try_sink_enabled, __try_sink_enabled, int, 0600
+);
+
 #define MICRO_1P5A		1500000
 #define MICRO_P1A		100000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
@@ -1037,7 +1042,10 @@
 		val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_DIE_HEALTH:
-		rc = smblib_get_prop_die_health(chg, val);
+		if (chg->die_health == -EINVAL)
+			rc = smblib_get_prop_die_health(chg, val);
+		else
+			val->intval = chg->die_health;
 		break;
 	case POWER_SUPPLY_PROP_DP_DM:
 		val->intval = chg->pulse_cnt;
@@ -1092,14 +1100,8 @@
 		rc = smblib_set_prop_charge_qnovo_enable(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
-		if (val->intval == -EINVAL) {
-			vote(chg->fv_votable, BATT_PROFILE_VOTER,
-					true, chg->batt_profile_fv_uv);
-			vote(chg->fv_votable, QNOVO_VOTER, false, 0);
-		} else {
-			vote(chg->fv_votable, QNOVO_VOTER, true, val->intval);
-			vote(chg->fv_votable, BATT_PROFILE_VOTER, false, 0);
-		}
+		vote(chg->fv_votable, QNOVO_VOTER,
+			(val->intval >= 0), val->intval);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
@@ -1145,6 +1147,10 @@
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 		rc = smblib_set_prop_input_current_limited(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+		chg->die_health = val->intval;
+		power_supply_changed(chg->batt_psy);
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -1166,6 +1172,7 @@
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
 		return 1;
 	default:
 		break;
@@ -1647,15 +1654,6 @@
 		return rc;
 	}
 
-	/* disable SW STAT override */
-	rc = smblib_masked_write(chg, STAT_CFG_REG,
-				 STAT_SW_OVERRIDE_CFG_BIT, 0);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't disable SW STAT override rc=%d\n",
-			rc);
-		return rc;
-	}
-
 	/* disable h/w autonomous parallel charging control */
 	rc = smblib_masked_write(chg, MISC_CFG_REG,
 				 STAT_PARALLEL_1400MA_EN_CFG_BIT, 0);
@@ -2244,9 +2242,11 @@
 	chg->dev = &pdev->dev;
 	chg->param = v1_params;
 	chg->debug_mask = &__debug_mask;
+	chg->try_sink_enabled = &__try_sink_enabled;
 	chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
 	chg->mode = PARALLEL_MASTER;
 	chg->irq_info = smb2_irqs;
+	chg->die_health = -EINVAL;
 	chg->name = "PMI";
 
 	chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 60f4df8..52fcd7f 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -143,6 +143,23 @@
 	return rc;
 }
 
+int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override)
+{
+	int rc;
+
+	/* override  = 1, SW STAT override; override = 0, HW auto mode */
+	rc = smblib_masked_write(chg, STAT_CFG_REG,
+				STAT_SW_OVERRIDE_CFG_BIT,
+				override ? STAT_SW_OVERRIDE_CFG_BIT : 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure SW STAT override rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return rc;
+}
+
 /********************
  * REGISTER GETTERS *
  ********************/
@@ -584,8 +601,10 @@
 			schedule_work(&chg->bms_update_work);
 	}
 
-	if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel"))
+	if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel")) {
 		chg->pl.psy = psy;
+		schedule_work(&chg->pl_update_work);
+	}
 
 	return NOTIFY_OK;
 }
@@ -2792,7 +2811,7 @@
 		hvdcp = stat & QC_CHARGER_BIT;
 		vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
 		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
-		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+		vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
 		vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
 								false, 0);
 
@@ -3773,8 +3792,165 @@
 	return IRQ_HANDLED;
 }
 
+static int typec_try_sink(struct smb_charger *chg)
+{
+	union power_supply_propval val;
+	bool debounce_done, vbus_detected, sink;
+	u8 stat;
+	int exit_mode = ATTACHED_SRC, rc;
+
+	/* ignore typec interrupt while try.snk WIP */
+	chg->try_sink_active = true;
+
+	/* force SNK mode */
+	val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/* reduce Tccdebounce time to ~20ms */
+	rc = smblib_masked_write(chg, MISC_CFG_REG,
+			TCC_DEBOUNCE_20MS_BIT, TCC_DEBOUNCE_20MS_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/*
+	 * give opportunity to the other side to be a SRC,
+	 * for tDRPTRY + Tccdebounce time
+	 */
+	msleep(100);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+				rc);
+		goto try_sink_exit;
+	}
+
+	debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+	if (!debounce_done)
+		/*
+		 * The other side didn't switch to source, either it
+		 * is an adamant sink or is removed go back to showing Rp
+		 */
+		goto try_wait_src;
+
+	/*
+	 * We are in force sink mode and the other side has switched to
+	 * showing Rp. Config DRP in case the other side removes Rp so we
+	 * can quickly (20ms) switch to showing our Rp. Note that the spec
+	 * needs us to show Rp for 80mS while the drp DFP residency is just
+	 * 54mS. But 54mS is plenty time for us to react and force Rp for
+	 * the remaining 26mS.
+	 */
+	val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set DFP mode rc=%d\n",
+				rc);
+		goto try_sink_exit;
+	}
+
+	/*
+	 * while other side is Rp, wait for VBUS from it; exit if other side
+	 * removes Rp
+	 */
+	do {
+		rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+					rc);
+			goto try_sink_exit;
+		}
+
+		debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+		vbus_detected = stat & TYPEC_VBUS_STATUS_BIT;
+
+		/* Successfully transitioned to ATTACHED.SNK */
+		if (vbus_detected && debounce_done) {
+			exit_mode = ATTACHED_SINK;
+			goto try_sink_exit;
+		}
+
+		/*
+		 * Ensure sink since drp may put us in source if other
+		 * side switches back to Rd
+		 */
+		sink = !(stat &  UFP_DFP_MODE_STATUS_BIT);
+
+		usleep_range(1000, 2000);
+	} while (debounce_done && sink);
+
+try_wait_src:
+	/*
+	 * Transition to trywait.SRC state. check if other side still wants
+	 * to be SNK or has been removed.
+	 */
+	val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/* Need to be in this state for tDRPTRY time, 75ms~150ms */
+	msleep(80);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+	if (debounce_done)
+		/* the other side wants to be a sink */
+		exit_mode = ATTACHED_SRC;
+	else
+		/* the other side is detached */
+		exit_mode = UNATTACHED_SINK;
+
+try_sink_exit:
+	/* release forcing of SRC/SNK mode */
+	val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set DFP mode rc=%d\n", rc);
+
+	/* revert Tccdebounce time back to ~120ms */
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+
+	chg->try_sink_active = false;
+
+	return exit_mode;
+}
+
 static void typec_sink_insertion(struct smb_charger *chg)
 {
+	int exit_mode;
+
+	/*
+	 * Try.SNK entry status - ATTACHWAIT.SRC state and detected Rd-open
+	 * or RD-Ra for TccDebounce time.
+	 */
+
+	if (*chg->try_sink_enabled) {
+		exit_mode = typec_try_sink(chg);
+
+		if (exit_mode != ATTACHED_SRC) {
+			smblib_usb_typec_change(chg);
+			return;
+		}
+	}
+
 	/* when a sink is inserted we should not wait on hvdcp timeout to
 	 * enable pd
 	 */
@@ -3847,7 +4023,7 @@
 
 	/* reset parallel voters */
 	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
-	vote(chg->pl_disable_votable, FCC_CHANGE_VOTER, false, 0);
+	vote(chg->pl_disable_votable, PL_FCC_LOW_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
 	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
@@ -4041,7 +4217,7 @@
 				smblib_typec_mode_name[chg->typec_mode]);
 }
 
-static void smblib_usb_typec_change(struct smb_charger *chg)
+void smblib_usb_typec_change(struct smb_charger *chg)
 {
 	int rc;
 
@@ -4077,7 +4253,8 @@
 		return IRQ_HANDLED;
 	}
 
-	if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) {
+	if (chg->cc2_detach_wa_active || chg->typec_en_dis_active ||
+					 chg->try_sink_active) {
 		smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n",
 			chg->cc2_detach_wa_active ?
 			"cc2_detach_wa" : "typec_en_dis");
@@ -4111,6 +4288,14 @@
 	struct smb_charger *chg = irq_data->parent_data;
 
 	chg->is_hdc = true;
+	/*
+	 * Disable usb IRQs after the flag set and re-enable IRQs after
+	 * the flag cleared in the delayed work queue, to avoid any IRQ
+	 * storming during the delays
+	 */
+	if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+
 	schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
 
 	return IRQ_HANDLED;
@@ -4282,12 +4467,22 @@
 		power_supply_changed(chg->batt_psy);
 }
 
+static void pl_update_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						pl_update_work);
+
+	smblib_stat_sw_override_cfg(chg, false);
+}
+
 static void clear_hdc_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
 						clear_hdc_work.work);
 
 	chg->is_hdc = 0;
+	if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
 }
 
 static void rdstd_cc2_detach_work(struct work_struct *work)
@@ -4812,6 +5007,7 @@
 	mutex_init(&chg->otg_oc_lock);
 	mutex_init(&chg->vconn_oc_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
+	INIT_WORK(&chg->pl_update_work, pl_update_work);
 	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
 	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
 	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
@@ -4860,6 +5056,14 @@
 
 		chg->bms_psy = power_supply_get_by_name("bms");
 		chg->pl.psy = power_supply_get_by_name("parallel");
+		if (chg->pl.psy) {
+			rc = smblib_stat_sw_override_cfg(chg, false);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't config stat sw rc=%d\n", rc);
+				return rc;
+			}
+		}
 		break;
 	case PARALLEL_SLAVE:
 		break;
@@ -4876,6 +5080,7 @@
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
 		cancel_work_sync(&chg->bms_update_work);
+		cancel_work_sync(&chg->pl_update_work);
 		cancel_work_sync(&chg->rdstd_cc2_detach_work);
 		cancel_delayed_work_sync(&chg->hvdcp_detect_work);
 		cancel_delayed_work_sync(&chg->clear_hdc_work);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 80f5bca..1046b27 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -66,7 +66,7 @@
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define WEAK_CHARGER_VOTER		"WEAK_CHARGER_VOTER"
 #define OTG_VOTER			"OTG_VOTER"
-#define FCC_CHANGE_VOTER		"FCC_CHANGE_VOTER"
+#define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 #define WBC_VOTER			"WBC_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
@@ -130,6 +130,12 @@
 	SMB_IRQ_MAX,
 };
 
+enum try_sink_exit_mode {
+	ATTACHED_SRC = 0,
+	ATTACHED_SINK,
+	UNATTACHED_SINK,
+};
+
 struct smb_irq_info {
 	const char			*name;
 	const irq_handler_t		handler;
@@ -232,6 +238,7 @@
 	struct smb_params	param;
 	struct smb_iio		iio;
 	int			*debug_mask;
+	int			*try_sink_enabled;
 	enum smb_mode		mode;
 	struct smb_chg_freq	chg_freq;
 	int			smb_version;
@@ -287,6 +294,7 @@
 
 	/* work */
 	struct work_struct	bms_update_work;
+	struct work_struct	pl_update_work;
 	struct work_struct	rdstd_cc2_detach_work;
 	struct delayed_work	hvdcp_detect_work;
 	struct delayed_work	ps_change_timeout_work;
@@ -342,6 +350,7 @@
 	u32			wa_flags;
 	bool			cc2_detach_wa_active;
 	bool			typec_en_dis_active;
+	bool			try_sink_active;
 	int			boost_current_ua;
 	int			temp_speed_reading_count;
 
@@ -355,6 +364,8 @@
 	/* qnovo */
 	int			usb_icl_delta_ua;
 	int			pulse_cnt;
+
+	int			die_health;
 };
 
 int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -523,6 +534,8 @@
 				union power_supply_propval *val);
 int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 				const union power_supply_propval *val);
+int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override);
+void smblib_usb_typec_change(struct smb_charger *chg);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index a464a81..4b2e9c8 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -861,7 +861,7 @@
 	return (reg & CMD_OTG_EN_BIT) ? 1 : 0;
 }
 
-struct regulator_ops smb1351_chg_otg_reg_ops = {
+static struct regulator_ops smb1351_chg_otg_reg_ops = {
 	.enable		= smb1351_chg_otg_regulator_enable,
 	.disable	= smb1351_chg_otg_regulator_disable,
 	.is_enabled	= smb1351_chg_otg_regulator_is_enable,
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 4e1bb17..4b42420 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -146,6 +146,8 @@
 
 	struct power_supply	*parallel_psy;
 	struct pmic_revid_data	*pmic_rev_id;
+
+	int			c_health;
 };
 
 static bool is_secure(struct smb1355 *chip, int addr)
@@ -434,7 +436,10 @@
 		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
-		val->intval = smb1355_get_prop_connector_health(chip);
+		if (chip->c_health == -EINVAL)
+			val->intval = smb1355_get_prop_connector_health(chip);
+		else
+			val->intval = chip->c_health;
 		break;
 	default:
 		pr_err_ratelimited("parallel psy get prop %d not supported\n",
@@ -497,6 +502,10 @@
 		rc = smb1355_set_charge_param(chip, &chip->param.fcc,
 						val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		chip->c_health = val->intval;
+		power_supply_changed(chip->parallel_psy);
+		break;
 	default:
 		pr_debug("parallel power supply set prop %d not supported\n",
 			prop);
@@ -509,6 +518,13 @@
 static int smb1355_parallel_prop_is_writeable(struct power_supply *psy,
 					      enum power_supply_property prop)
 {
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		return 1;
+	default:
+		break;
+	}
+
 	return 0;
 }
 
@@ -613,6 +629,7 @@
 	[0] = {
 		.name		= "wdog-bark",
 		.handler	= smb1355_handle_wdog_bark,
+		.wake		= true,
 	},
 	[1] = {
 		.name		= "chg-state-change",
@@ -713,6 +730,7 @@
 
 	chip->dev = &pdev->dev;
 	chip->param = v1_params;
+	chip->c_health = -EINVAL;
 	chip->name = "smb1355";
 	mutex_init(&chip->write_lock);
 
@@ -762,14 +780,26 @@
 	return 0;
 }
 
+static void smb1355_shutdown(struct platform_device *pdev)
+{
+	struct smb1355 *chip = platform_get_drvdata(pdev);
+	int rc;
+
+	/* disable parallel charging path */
+	rc = smb1355_set_parallel_charging(chip, true);
+	if (rc < 0)
+		pr_err("Couldn't disable parallel path rc=%d\n", rc);
+}
+
 static struct platform_driver smb1355_driver = {
 	.driver	= {
 		.name		= "qcom,smb1355-charger",
 		.owner		= THIS_MODULE,
 		.of_match_table	= match_table,
 	},
-	.probe	= smb1355_probe,
-	.remove	= smb1355_remove,
+	.probe		= smb1355_probe,
+	.remove		= smb1355_remove,
+	.shutdown	= smb1355_shutdown,
 };
 module_platform_driver(smb1355_driver);
 
diff --git a/drivers/power/supply/qcom/smb135x-charger.c b/drivers/power/supply/qcom/smb135x-charger.c
index 803dd6e..edc0998 100644
--- a/drivers/power/supply/qcom/smb135x-charger.c
+++ b/drivers/power/supply/qcom/smb135x-charger.c
@@ -2218,7 +2218,7 @@
 	return  (reg & OTG_EN) ? 1 : 0;
 }
 
-struct regulator_ops smb135x_chg_otg_reg_ops = {
+static struct regulator_ops smb135x_chg_otg_reg_ops = {
 	.enable		= smb135x_chg_otg_regulator_enable,
 	.disable	= smb135x_chg_otg_regulator_disable,
 	.is_enabled	= smb135x_chg_otg_regulator_is_enable,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c5118b4..4bd6fd4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -500,7 +500,7 @@
 		old_hdr->result = EIO;
 		break;
 	case DID_ERROR:
-		old_hdr->result = (srp->sense_b[0] == 0 && 
+		old_hdr->result = (srp->sense_b[0] == 0 &&
 				  hp->masked_status == GOOD) ? 0 : EIO;
 		break;
 	default:
@@ -859,8 +859,10 @@
 			return -ENXIO;
 		if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
 			return -EFAULT;
+		mutex_lock(&sfp->parentdp->open_rel_lock);
 		result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
 				 1, read_only, 1, &srp);
+		mutex_unlock(&sfp->parentdp->open_rel_lock);
 		if (result < 0)
 			return result;
 		result = wait_event_interruptible(sfp->read_wait,
@@ -901,8 +903,10 @@
 			sfp->low_dma = 1;
 			if ((0 == sfp->low_dma) && !sfp->res_in_use) {
 				val = (int) sfp->reserve.bufflen;
+				mutex_lock(&sfp->parentdp->open_rel_lock);
 				sg_remove_scat(sfp, &sfp->reserve);
 				sg_build_reserve(sfp, val);
+				mutex_unlock(&sfp->parentdp->open_rel_lock);
 			}
 		} else {
 			if (atomic_read(&sdp->detaching))
@@ -970,8 +974,8 @@
 		result = get_user(val, ip);
 		if (result)
 			return result;
-                if (val < 0)
-                        return -EINVAL;
+		if (val < 0)
+			return -EINVAL;
 		val = min_t(int, val,
 			    max_sectors_bytes(sdp->device->request_queue));
 		mutex_lock(&sfp->f_mutex);
@@ -981,9 +985,10 @@
 				mutex_unlock(&sfp->f_mutex);
 				return -EBUSY;
 			}
-
+			mutex_lock(&sfp->parentdp->open_rel_lock);
 			sg_remove_scat(sfp, &sfp->reserve);
 			sg_build_reserve(sfp, val);
+			mutex_unlock(&sfp->parentdp->open_rel_lock);
 		}
 		mutex_unlock(&sfp->f_mutex);
 		return 0;
@@ -1039,8 +1044,8 @@
 				if (srp) {
 					rinfo[val].req_state = srp->done + 1;
 					rinfo[val].problem =
-					    srp->header.masked_status & 
-					    srp->header.host_status & 
+					    srp->header.masked_status &
+					    srp->header.host_status &
 					    srp->header.driver_status;
 					if (srp->done)
 						rinfo[val].duration =
@@ -1061,7 +1066,7 @@
 				}
 			}
 			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-			result = __copy_to_user(p, rinfo, 
+			result = __copy_to_user(p, rinfo,
 						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
 			result = result ? -EFAULT : 0;
 			kfree(rinfo);
@@ -1137,14 +1142,14 @@
 		return -ENXIO;
 
 	sdev = sdp->device;
-	if (sdev->host->hostt->compat_ioctl) { 
+	if (sdev->host->hostt->compat_ioctl) {
 		int ret;
 
 		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
 
 		return ret;
 	}
-	
+
 	return -ENOIOCTLCMD;
 }
 #endif
@@ -1634,7 +1639,7 @@
 	else
 		def_reserved_size = sg_big_buff;
 
-	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 
+	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
 				    SG_MAX_DEVS, "sg");
 	if (rc)
 		return rc;
@@ -2315,7 +2320,7 @@
 };
 
 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
-static ssize_t sg_proc_write_dressz(struct file *filp, 
+static ssize_t sg_proc_write_dressz(struct file *filp,
 		const char __user *buffer, size_t count, loff_t *off);
 static const struct file_operations dressz_fops = {
 	.owner = THIS_MODULE,
@@ -2455,7 +2460,7 @@
 	return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
 }
 
-static ssize_t 
+static ssize_t
 sg_proc_write_adio(struct file *filp, const char __user *buffer,
 		   size_t count, loff_t *off)
 {
@@ -2476,7 +2481,7 @@
 	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
 }
 
-static ssize_t 
+static ssize_t
 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
 		     size_t count, loff_t *off)
 {
@@ -2636,7 +2641,7 @@
 			hp = &srp->header;
 			new_interface = (hp->interface_id == '\0') ? 0 : 1;
 			if (srp->res_used) {
-				if (new_interface && 
+				if (new_interface &&
 				    (SG_FLAG_MMAP_IO & hp->flags))
 					cp = "     mmap>> ";
 				else
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index a8fb8b6..ef55e58 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -537,16 +537,6 @@
 	  online at any given point in time. This module can also restrict
 	  max freq or min freq of cpu cluster
 
-config MSM_PERFORMANCE_HOTPLUG_ON
-	bool "Hotplug functionality through msm_performance turned on"
-	depends on MSM_PERFORMANCE
-	default y
-	help
-	  If some other core-control driver is present turn off the core-control
-	  capability of msm_performance driver. Setting this flag to false will
-	  compile out the nodes needed for core-control functionality through
-	  msm_performance.
-
 config MSM_CDSP_LOADER
 	tristate "CDSP loader support"
 	depends on MSM_GLINK
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 6eef58f..37b33e6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -54,7 +54,7 @@
 obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor_v01.o
 obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor.o
 obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
-obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o
+obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o
 
 obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 2a23ba7..e11efb0 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -148,11 +148,6 @@
 	struct class		*sram_class;
 	struct list_head	cfg_head[DCC_MAX_LINK_LIST];
 	uint32_t		nr_config[DCC_MAX_LINK_LIST];
-	void			*reg_buf;
-	struct msm_dump_data	reg_data;
-	bool			save_reg;
-	void			*sram_buf;
-	struct msm_dump_data	sram_data;
 	uint8_t			curr_list;
 	uint8_t			cti_trig;
 };
@@ -490,39 +485,6 @@
 	return ret;
 }
 
-static void __dcc_reg_dump(struct dcc_drvdata *drvdata)
-{
-	uint32_t *reg_buf;
-	uint8_t i = 0;
-	uint8_t j;
-
-	if (!drvdata->reg_buf)
-		return;
-
-	drvdata->reg_data.version = DCC_REG_DUMP_VER;
-
-	reg_buf = drvdata->reg_buf;
-
-	reg_buf[i++] = dcc_readl(drvdata, DCC_HW_VERSION);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_HW_INFO);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_EXEC_CTRL);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_STATUS);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_CFG);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_FDA_CURR);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_LLA_CURR);
-
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_LL_LOCK(j));
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_LL_CFG(j));
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_LL_BASE(j));
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_FD_BASE(j));
-
-	drvdata->reg_data.magic = DCC_REG_DUMP_MAGIC_V2;
-}
-
 static void __dcc_first_crc(struct dcc_drvdata *drvdata)
 {
 	int i;
@@ -626,9 +588,6 @@
 					   DCC_LL_INT_ENABLE(list));
 		}
 	}
-	/* Save DCC registers */
-	if (drvdata->save_reg)
-		__dcc_reg_dump(drvdata);
 
 err:
 	mutex_unlock(&drvdata->mutex);
@@ -653,9 +612,6 @@
 	}
 	drvdata->ram_cfg = 0;
 	drvdata->ram_start = 0;
-	/* Save DCC registers */
-	if (drvdata->save_reg)
-		__dcc_reg_dump(drvdata);
 
 	mutex_unlock(&drvdata->mutex);
 }
@@ -1462,47 +1418,6 @@
 	dcc_sram_dev_deregister(drvdata);
 }
 
-static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata)
-{
-	int ret;
-	struct device *dev = drvdata->dev;
-	struct msm_dump_entry reg_dump_entry, sram_dump_entry;
-
-	/* Allocate memory for dcc reg dump */
-	drvdata->reg_buf = devm_kzalloc(dev, drvdata->reg_size, GFP_KERNEL);
-	if (drvdata->reg_buf) {
-		drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf);
-		drvdata->reg_data.len = drvdata->reg_size;
-		reg_dump_entry.id = MSM_DUMP_DATA_DCC_REG;
-		reg_dump_entry.addr = virt_to_phys(&drvdata->reg_data);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
-					     &reg_dump_entry);
-		if (ret) {
-			dev_err(dev, "DCC REG dump setup failed\n");
-			devm_kfree(dev, drvdata->reg_buf);
-		}
-	} else {
-		dev_err(dev, "DCC REG dump allocation failed\n");
-	}
-
-	/* Allocate memory for dcc sram dump */
-	drvdata->sram_buf = devm_kzalloc(dev, drvdata->ram_size, GFP_KERNEL);
-	if (drvdata->sram_buf) {
-		drvdata->sram_data.addr = virt_to_phys(drvdata->sram_buf);
-		drvdata->sram_data.len = drvdata->ram_size;
-		sram_dump_entry.id = MSM_DUMP_DATA_DCC_SRAM;
-		sram_dump_entry.addr = virt_to_phys(&drvdata->sram_data);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
-					     &sram_dump_entry);
-		if (ret) {
-			dev_err(dev, "DCC SRAM dump setup failed\n");
-			devm_kfree(dev, drvdata->sram_buf);
-		}
-	} else {
-		dev_err(dev, "DCC SRAM dump allocation failed\n");
-	}
-}
-
 static int dcc_probe(struct platform_device *pdev)
 {
 	int ret, i;
@@ -1542,9 +1457,6 @@
 	if (ret)
 		return -EINVAL;
 
-	drvdata->save_reg = of_property_read_bool(pdev->dev.of_node,
-						  "qcom,save-reg");
-
 	mutex_init(&drvdata->mutex);
 
 	for (i = 0; i < DCC_MAX_LINK_LIST; i++) {
@@ -1580,7 +1492,6 @@
 	if (ret)
 		goto err;
 
-	dcc_allocate_dump_mem(drvdata);
 	return 0;
 err:
 	return ret;
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 51c08c6..f7f3317 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -27,6 +27,8 @@
 #include <linux/serial.h>
 #include <linux/workqueue.h>
 #include <linux/power_supply.h>
+#include <linux/clk.h>
+#include <linux/of.h>
 
 #define EUD_ENABLE_CMD 1
 #define EUD_DISABLE_CMD 0
@@ -71,6 +73,7 @@
 	struct uart_port		port;
 	struct work_struct		eud_work;
 	struct power_supply		*batt_psy;
+	struct clk			*cfg_ahb_clk;
 };
 
 static const unsigned int eud_extcon_cable[] = {
@@ -119,7 +122,7 @@
 		/* write into CSR to enable EUD */
 		writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
 		/* Enable vbus, chgr & safe mode warning interrupts */
-		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
+		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR,
 				priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
 
 		/* Ensure Register Writes Complete */
@@ -448,7 +451,11 @@
 {
 	struct eud_chip *chip = data;
 	u32 reg;
-	u32 int_mask_en1 = readl_relaxed(chip->eud_reg_base +
+	u32 int_mask_en1;
+
+	clk_prepare_enable(chip->cfg_ahb_clk);
+
+	int_mask_en1 = readl_relaxed(chip->eud_reg_base +
 					EUD_REG_INT1_EN_MASK);
 
 	/* read status register and find out which interrupt triggered */
@@ -472,9 +479,11 @@
 		pet_eud(chip);
 	} else {
 		dev_dbg(chip->dev, "Unknown/spurious EUD Interrupt!\n");
+		clk_disable_unprepare(chip->cfg_ahb_clk);
 		return IRQ_NONE;
 	}
 
+	clk_disable_unprepare(chip->cfg_ahb_clk);
 	return IRQ_HANDLED;
 }
 
@@ -492,6 +501,7 @@
 	}
 
 	platform_set_drvdata(pdev, chip);
+	chip->dev = &pdev->dev;
 
 	chip->extcon = devm_extcon_dev_allocate(&pdev->dev, eud_extcon_cable);
 	if (IS_ERR(chip->extcon)) {
@@ -517,10 +527,25 @@
 	if (IS_ERR(chip->eud_reg_base))
 		return PTR_ERR(chip->eud_reg_base);
 
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		chip->cfg_ahb_clk = devm_clk_get(&pdev->dev, "cfg_ahb_clk");
+		if (IS_ERR(chip->cfg_ahb_clk)) {
+			ret = PTR_ERR(chip->cfg_ahb_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(chip->dev,
+				"clk get failed for cfg_ahb_clk ret %d\n",
+				ret);
+			return ret;
+		}
+	}
+
 	chip->eud_irq = platform_get_irq_byname(pdev, "eud_irq");
 
-	ret = devm_request_irq(&pdev->dev, chip->eud_irq, handle_eud_irq,
-				IRQF_TRIGGER_HIGH, "eud_irq", chip);
+	ret = devm_request_threaded_irq(&pdev->dev, chip->eud_irq,
+					NULL, handle_eud_irq,
+					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+					"eud_irq", chip);
 	if (ret) {
 		dev_err(chip->dev, "request failed for eud irq\n");
 		return ret;
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index ecf72ca..6ce481b 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -397,12 +397,6 @@
 	uint32_t rejuvenate_ack_err;
 };
 
-#define MAX_NO_OF_MAC_ADDR 4
-struct icnss_wlan_mac_addr {
-	u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
-	uint32_t no_of_mac_addr_set;
-};
-
 enum icnss_pdr_cause_index {
 	ICNSS_FW_CRASH,
 	ICNSS_ROOT_PD_CRASH,
@@ -479,8 +473,6 @@
 	uint64_t vph_pwr;
 	atomic_t pm_count;
 	struct ramdump_device *msa0_dump_dev;
-	bool is_wlan_mac_set;
-	struct icnss_wlan_mac_addr wlan_mac_addr;
 	bool bypass_s1_smmu;
 	u8 cause_for_rejuvenation;
 	u8 requesting_sub_system;
@@ -3279,78 +3271,6 @@
 }
 EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
 
-int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len)
-{
-	struct icnss_priv *priv = penv;
-	uint32_t no_of_mac_addr;
-	struct icnss_wlan_mac_addr *addr = NULL;
-	int iter;
-	u8 *temp = NULL;
-
-	if (!priv) {
-		icnss_pr_err("Priv data is NULL\n");
-		return -EINVAL;
-	}
-
-	if (priv->is_wlan_mac_set) {
-		icnss_pr_dbg("WLAN MAC address is already set\n");
-		return 0;
-	}
-
-	if (len == 0 || (len % ETH_ALEN) != 0) {
-		icnss_pr_err("Invalid length %d\n", len);
-		return -EINVAL;
-	}
-
-	no_of_mac_addr = len / ETH_ALEN;
-	if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
-		icnss_pr_err("Exceed maxinum supported MAC address %u %u\n",
-			     MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
-		return -EINVAL;
-	}
-
-	priv->is_wlan_mac_set = true;
-	addr = &priv->wlan_mac_addr;
-	addr->no_of_mac_addr_set = no_of_mac_addr;
-	temp = &addr->mac_addr[0][0];
-
-	for (iter = 0; iter < no_of_mac_addr;
-	     ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
-		ether_addr_copy(temp, in);
-		icnss_pr_dbg("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
-			     temp[0], temp[1], temp[2],
-			     temp[3], temp[4], temp[5]);
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_set_wlan_mac_address);
-
-u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num)
-{
-	struct icnss_priv *priv = dev_get_drvdata(dev);
-	struct icnss_wlan_mac_addr *addr = NULL;
-
-	if (priv->magic != ICNSS_MAGIC) {
-		icnss_pr_err("Invalid drvdata: dev %p, data %p, magic 0x%x\n",
-			     dev, priv, priv->magic);
-		goto out;
-	}
-
-	if (!priv->is_wlan_mac_set) {
-		icnss_pr_dbg("WLAN MAC address is not set\n");
-		goto out;
-	}
-
-	addr = &priv->wlan_mac_addr;
-	*num = addr->no_of_mac_addr_set;
-	return &addr->mac_addr[0][0];
-out:
-	*num = 0;
-	return NULL;
-}
-EXPORT_SYMBOL(icnss_get_wlan_mac_address);
-
 int icnss_trigger_recovery(struct device *dev)
 {
 	int ret = 0;
diff --git a/drivers/soc/qcom/icnss_utils.c b/drivers/soc/qcom/icnss_utils.c
deleted file mode 100644
index 6974146..0000000
--- a/drivers/soc/qcom/icnss_utils.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <soc/qcom/icnss.h>
-
-#define ICNSS_MAX_CH_NUM 45
-
-static DEFINE_MUTEX(unsafe_channel_list_lock);
-static DEFINE_SPINLOCK(dfs_nol_info_lock);
-static int driver_load_cnt;
-
-static struct icnss_unsafe_channel_list {
-	u16 unsafe_ch_count;
-	u16 unsafe_ch_list[ICNSS_MAX_CH_NUM];
-} unsafe_channel_list;
-
-static struct icnss_dfs_nol_info {
-	void *dfs_nol_info;
-	u16 dfs_nol_info_len;
-} dfs_nol_info;
-
-int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
-{
-	mutex_lock(&unsafe_channel_list_lock);
-	if ((!unsafe_ch_list) || (ch_count > ICNSS_MAX_CH_NUM)) {
-		mutex_unlock(&unsafe_channel_list_lock);
-		return -EINVAL;
-	}
-
-	unsafe_channel_list.unsafe_ch_count = ch_count;
-
-	if (ch_count != 0) {
-		memcpy(
-		       (char *)unsafe_channel_list.unsafe_ch_list,
-		       (char *)unsafe_ch_list, ch_count * sizeof(u16));
-	}
-	mutex_unlock(&unsafe_channel_list_lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_set_wlan_unsafe_channel);
-
-int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
-				  u16 *ch_count, u16 buf_len)
-{
-	mutex_lock(&unsafe_channel_list_lock);
-	if (!unsafe_ch_list || !ch_count) {
-		mutex_unlock(&unsafe_channel_list_lock);
-		return -EINVAL;
-	}
-
-	if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
-		mutex_unlock(&unsafe_channel_list_lock);
-		return -ENOMEM;
-	}
-
-	*ch_count = unsafe_channel_list.unsafe_ch_count;
-	memcpy(
-		(char *)unsafe_ch_list,
-		(char *)unsafe_channel_list.unsafe_ch_list,
-		unsafe_channel_list.unsafe_ch_count * sizeof(u16));
-	mutex_unlock(&unsafe_channel_list_lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_get_wlan_unsafe_channel);
-
-int icnss_wlan_set_dfs_nol(const void *info, u16 info_len)
-{
-	void *temp;
-	void *old_nol_info;
-	struct icnss_dfs_nol_info *dfs_info;
-
-	if (!info || !info_len)
-		return -EINVAL;
-
-	temp = kmalloc(info_len, GFP_ATOMIC);
-	if (!temp)
-		return -ENOMEM;
-
-	memcpy(temp, info, info_len);
-	spin_lock_bh(&dfs_nol_info_lock);
-	dfs_info = &dfs_nol_info;
-	old_nol_info = dfs_info->dfs_nol_info;
-	dfs_info->dfs_nol_info = temp;
-	dfs_info->dfs_nol_info_len = info_len;
-	spin_unlock_bh(&dfs_nol_info_lock);
-	kfree(old_nol_info);
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_wlan_set_dfs_nol);
-
-int icnss_wlan_get_dfs_nol(void *info, u16 info_len)
-{
-	int len;
-	struct icnss_dfs_nol_info *dfs_info;
-
-	if (!info || !info_len)
-		return -EINVAL;
-
-	spin_lock_bh(&dfs_nol_info_lock);
-
-	dfs_info = &dfs_nol_info;
-	if (dfs_info->dfs_nol_info == NULL ||
-	    dfs_info->dfs_nol_info_len == 0) {
-		spin_unlock_bh(&dfs_nol_info_lock);
-		return -ENOENT;
-	}
-
-	len = min(info_len, dfs_info->dfs_nol_info_len);
-	memcpy(info, dfs_info->dfs_nol_info, len);
-	spin_unlock_bh(&dfs_nol_info_lock);
-
-	return len;
-}
-EXPORT_SYMBOL(icnss_wlan_get_dfs_nol);
-
-void icnss_increment_driver_load_cnt(void)
-{
-	++driver_load_cnt;
-}
-EXPORT_SYMBOL(icnss_increment_driver_load_cnt);
-
-int icnss_get_driver_load_cnt(void)
-{
-	return driver_load_cnt;
-}
-EXPORT_SYMBOL(icnss_get_driver_load_cnt);
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
index 979c628..b5ce753 100644
--- a/drivers/soc/qcom/msm_performance.c
+++ b/drivers/soc/qcom/msm_performance.c
@@ -25,7 +25,6 @@
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/kthread.h>
-#include <soc/qcom/msm-core.h>
 
 static struct mutex managed_cpus_lock;
 
@@ -33,10 +32,6 @@
 static unsigned int num_clusters;
 struct cluster {
 	cpumask_var_t cpus;
-	/* Number of CPUs to maintain online */
-	int max_cpu_request;
-	/* To track CPUs that the module decides to offline */
-	cpumask_var_t offlined_cpus;
 	/* stats for load detection */
 	/* IO */
 	u64 last_io_check_ts;
@@ -84,8 +79,6 @@
 static struct cluster **managed_clusters;
 static bool clusters_inited;
 
-/* Work to evaluate the onlining/offlining CPUs */
-static struct delayed_work evaluate_hotplug_work;
 
 /* To handle cpufreq min/max request */
 struct cpu_status {
@@ -94,11 +87,8 @@
 };
 static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
 
-static unsigned int num_online_managed(struct cpumask *mask);
 static int init_cluster_control(void);
-static int rm_high_pwr_cost_cpus(struct cluster *cl);
 static int init_events_group(void);
-static DEFINE_PER_CPU(unsigned int, cpu_power_cost);
 struct events {
 	spinlock_t cpu_hotplug_lock;
 	bool cpu_hotplug;
@@ -214,65 +204,6 @@
 };
 device_param_cb(num_clusters, &param_ops_num_clusters, NULL, 0644);
 
-static int set_max_cpus(const char *buf, const struct kernel_param *kp)
-{
-	unsigned int i, ntokens = 0;
-	const char *cp = buf;
-	int val;
-
-	if (!clusters_inited)
-		return -EINVAL;
-
-	while ((cp = strpbrk(cp + 1, ":")))
-		ntokens++;
-
-	if (ntokens != (num_clusters - 1))
-		return -EINVAL;
-
-	cp = buf;
-	for (i = 0; i < num_clusters; i++) {
-
-		if (sscanf(cp, "%d\n", &val) != 1)
-			return -EINVAL;
-		if (val > (int)cpumask_weight(managed_clusters[i]->cpus))
-			return -EINVAL;
-
-		managed_clusters[i]->max_cpu_request = val;
-
-		cp = strnchr(cp, strlen(cp), ':');
-		cp++;
-		trace_set_max_cpus(cpumask_bits(managed_clusters[i]->cpus)[0],
-								val);
-	}
-
-	schedule_delayed_work(&evaluate_hotplug_work, 0);
-
-	return 0;
-}
-
-static int get_max_cpus(char *buf, const struct kernel_param *kp)
-{
-	int i, cnt = 0;
-
-	if (!clusters_inited)
-		return cnt;
-
-	for (i = 0; i < num_clusters; i++)
-		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
-				"%d:", managed_clusters[i]->max_cpu_request);
-	cnt--;
-	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
-	return cnt;
-}
-
-static const struct kernel_param_ops param_ops_max_cpus = {
-	.set = set_max_cpus,
-	.get = get_max_cpus,
-};
-
-#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
-device_param_cb(max_cpus, &param_ops_max_cpus, NULL, 0644);
-#endif
 
 static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
 {
@@ -291,7 +222,6 @@
 		if (cpumask_empty(managed_clusters[i]->cpus)) {
 			mutex_lock(&managed_cpus_lock);
 			cpumask_copy(managed_clusters[i]->cpus, &tmp_mask);
-			cpumask_clear(managed_clusters[i]->offlined_cpus);
 			mutex_unlock(&managed_cpus_lock);
 			break;
 		}
@@ -337,53 +267,6 @@
 };
 device_param_cb(managed_cpus, &param_ops_managed_cpus, NULL, 0644);
 
-/* Read-only node: To display all the online managed CPUs */
-static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
-{
-	int i, cnt = 0, total_cnt = 0;
-	char tmp[MAX_LENGTH_CPU_STRING] = "";
-	struct cpumask tmp_mask;
-	struct cluster *i_cl;
-
-	if (!clusters_inited)
-		return cnt;
-
-	for (i = 0; i < num_clusters; i++) {
-		i_cl = managed_clusters[i];
-
-		cpumask_clear(&tmp_mask);
-		cpumask_complement(&tmp_mask, i_cl->offlined_cpus);
-		cpumask_and(&tmp_mask, i_cl->cpus, &tmp_mask);
-
-		cnt = cpumap_print_to_pagebuf(true, buf, &tmp_mask);
-		if ((i + 1) < num_clusters &&
-		    (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
-			snprintf(tmp + total_cnt, cnt, "%s", buf);
-			tmp[cnt-1] = ':';
-			tmp[cnt] = '\0';
-			total_cnt += cnt;
-		} else if ((i + 1) == num_clusters &&
-			   (total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
-			snprintf(tmp + total_cnt, cnt, "%s", buf);
-			total_cnt += cnt;
-		} else {
-			pr_err("invalid string for managed_cpu:%s%s\n", tmp,
-				buf);
-			break;
-		}
-	}
-	snprintf(buf, PAGE_SIZE, "%s", tmp);
-	return total_cnt;
-}
-
-static const struct kernel_param_ops param_ops_managed_online_cpus = {
-	.get = get_managed_online_cpus,
-};
-
-#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
-device_param_cb(managed_online_cpus, &param_ops_managed_online_cpus,
-							NULL, 0444);
-#endif
 /*
  * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
  * scaling_min. To withdraw its vote it needs to enter cpu#:0
@@ -2274,15 +2157,6 @@
 };
 /*******************************sysfs ends************************************/
 
-static unsigned int num_online_managed(struct cpumask *mask)
-{
-	struct cpumask tmp_mask;
-
-	cpumask_clear(&tmp_mask);
-	cpumask_and(&tmp_mask, mask, cpu_online_mask);
-
-	return cpumask_weight(&tmp_mask);
-}
 
 static int perf_adjust_notify(struct notifier_block *nb, unsigned long val,
 							void *data)
@@ -2359,185 +2233,6 @@
 
 	return 0;
 }
-
-/*
- * Attempt to offline CPUs based on their power cost.
- * CPUs with higher power costs are offlined first.
- */
-static int __ref rm_high_pwr_cost_cpus(struct cluster *cl)
-{
-	unsigned int cpu, i;
-	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
-	struct cpu_pstate_pwr *costs;
-	unsigned int *pcpu_pwr;
-	unsigned int max_cost_cpu, max_cost;
-	int any_cpu = -1;
-
-	if (!per_cpu_info)
-		return -EAGAIN;
-
-	for_each_cpu(cpu, cl->cpus) {
-		costs = per_cpu_info[cpu].ptable;
-		if (!costs || !costs[0].freq)
-			continue;
-
-		i = 1;
-		while (costs[i].freq)
-			i++;
-
-		pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
-		*pcpu_pwr = costs[i - 1].power;
-		any_cpu = (int)cpu;
-		pr_debug("msm_perf: CPU:%d Power:%u\n", cpu, *pcpu_pwr);
-	}
-
-	if (any_cpu < 0)
-		return -EAGAIN;
-
-	for (i = 0; i < cpumask_weight(cl->cpus); i++) {
-		max_cost = 0;
-		max_cost_cpu = cpumask_first(cl->cpus);
-
-		for_each_cpu(cpu, cl->cpus) {
-			pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
-			if (max_cost < *pcpu_pwr) {
-				max_cost = *pcpu_pwr;
-				max_cost_cpu = cpu;
-			}
-		}
-
-		if (!cpu_online(max_cost_cpu))
-			goto end;
-
-		pr_debug("msm_perf: Offlining CPU%d Power:%d\n", max_cost_cpu,
-								max_cost);
-		cpumask_set_cpu(max_cost_cpu, cl->offlined_cpus);
-		lock_device_hotplug();
-		if (device_offline(get_cpu_device(max_cost_cpu))) {
-			cpumask_clear_cpu(max_cost_cpu, cl->offlined_cpus);
-			pr_debug("msm_perf: Offlining CPU%d failed\n",
-								max_cost_cpu);
-		}
-		unlock_device_hotplug();
-
-end:
-		pcpu_pwr = &per_cpu(cpu_power_cost, max_cost_cpu);
-		*pcpu_pwr = 0;
-		if (num_online_managed(cl->cpus) <= cl->max_cpu_request)
-			break;
-	}
-
-	if (num_online_managed(cl->cpus) > cl->max_cpu_request)
-		return -EAGAIN;
-	else
-		return 0;
-}
-
-/*
- * try_hotplug tries to online/offline cores based on the current requirement.
- * It loops through the currently managed CPUs and tries to online/offline
- * them until the max_cpu_request criteria is met.
- */
-static void __ref try_hotplug(struct cluster *data)
-{
-	unsigned int i;
-
-	if (!clusters_inited)
-		return;
-
-	pr_debug("msm_perf: Trying hotplug...%d:%d\n",
-			num_online_managed(data->cpus),	num_online_cpus());
-
-	mutex_lock(&managed_cpus_lock);
-	if (num_online_managed(data->cpus) > data->max_cpu_request) {
-		if (!rm_high_pwr_cost_cpus(data)) {
-			mutex_unlock(&managed_cpus_lock);
-			return;
-		}
-
-		/*
-		 * If power aware offlining fails due to power cost info
-		 * being unavaiable fall back to original implementation
-		 */
-		for (i = num_present_cpus() - 1; i >= 0 &&
-						i < num_present_cpus(); i--) {
-			if (!cpumask_test_cpu(i, data->cpus) ||	!cpu_online(i))
-				continue;
-
-			pr_debug("msm_perf: Offlining CPU%d\n", i);
-			cpumask_set_cpu(i, data->offlined_cpus);
-			lock_device_hotplug();
-			if (device_offline(get_cpu_device(i))) {
-				cpumask_clear_cpu(i, data->offlined_cpus);
-				pr_debug("msm_perf: Offlining CPU%d failed\n",
-									i);
-				unlock_device_hotplug();
-				continue;
-			}
-			unlock_device_hotplug();
-			if (num_online_managed(data->cpus) <=
-							data->max_cpu_request)
-				break;
-		}
-	} else {
-		for_each_cpu(i, data->cpus) {
-			if (cpu_online(i))
-				continue;
-			pr_debug("msm_perf: Onlining CPU%d\n", i);
-			lock_device_hotplug();
-			if (device_online(get_cpu_device(i))) {
-				pr_debug("msm_perf: Onlining CPU%d failed\n",
-									i);
-				unlock_device_hotplug();
-				continue;
-			}
-			unlock_device_hotplug();
-			cpumask_clear_cpu(i, data->offlined_cpus);
-			if (num_online_managed(data->cpus) >=
-							data->max_cpu_request)
-				break;
-		}
-	}
-	mutex_unlock(&managed_cpus_lock);
-}
-
-static void __ref release_cluster_control(struct cpumask *off_cpus)
-{
-	int cpu;
-
-	for_each_cpu(cpu, off_cpus) {
-		pr_debug("msm_perf: Release CPU %d\n", cpu);
-		lock_device_hotplug();
-		if (!device_online(get_cpu_device(cpu)))
-			cpumask_clear_cpu(cpu, off_cpus);
-		unlock_device_hotplug();
-	}
-}
-
-/* Work to evaluate current online CPU status and hotplug CPUs as per need */
-static void check_cluster_status(struct work_struct *work)
-{
-	int i;
-	struct cluster *i_cl;
-
-	for (i = 0; i < num_clusters; i++) {
-		i_cl = managed_clusters[i];
-
-		if (cpumask_empty(i_cl->cpus))
-			continue;
-
-		if (i_cl->max_cpu_request < 0) {
-			if (!cpumask_empty(i_cl->offlined_cpus))
-				release_cluster_control(i_cl->offlined_cpus);
-			continue;
-		}
-
-		if (num_online_managed(i_cl->cpus) !=
-					i_cl->max_cpu_request)
-			try_hotplug(i_cl);
-	}
-}
-
 static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
 		unsigned long action, void *hcpu)
 {
@@ -2559,43 +2254,6 @@
 		}
 	}
 
-	if (i_cl == NULL)
-		return NOTIFY_OK;
-
-	if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
-		/*
-		 * Prevent onlining of a managed CPU if max_cpu criteria is
-		 * already satisfied
-		 */
-		if (i_cl->offlined_cpus == NULL)
-			return NOTIFY_OK;
-		if (i_cl->max_cpu_request <=
-					num_online_managed(i_cl->cpus)) {
-			pr_debug("msm_perf: Prevent CPU%d onlining\n", cpu);
-			cpumask_set_cpu(cpu, i_cl->offlined_cpus);
-			return NOTIFY_BAD;
-		}
-		cpumask_clear_cpu(cpu, i_cl->offlined_cpus);
-
-	} else if (action == CPU_DEAD) {
-		if (i_cl->offlined_cpus == NULL)
-			return NOTIFY_OK;
-		if (cpumask_test_cpu(cpu, i_cl->offlined_cpus))
-			return NOTIFY_OK;
-		/*
-		 * Schedule a re-evaluation to check if any more CPUs can be
-		 * brought online to meet the max_cpu_request requirement. This
-		 * work is delayed to account for CPU hotplug latencies
-		 */
-		if (schedule_delayed_work(&evaluate_hotplug_work, 0)) {
-			trace_reevaluate_hotplug(cpumask_bits(i_cl->cpus)[0],
-							i_cl->max_cpu_request);
-			pr_debug("msm_perf: Re-evaluation scheduled %d\n", cpu);
-		} else {
-			pr_debug("msm_perf: Work scheduling failed %d\n", cpu);
-		}
-	}
-
 	return NOTIFY_OK;
 }
 
@@ -2626,13 +2284,7 @@
 			ret = -ENOMEM;
 			goto error;
 		}
-		if (!alloc_cpumask_var(&managed_clusters[i]->offlined_cpus,
-		     GFP_KERNEL)) {
-			ret = -ENOMEM;
-			goto error;
-		}
 
-		managed_clusters[i]->max_cpu_request = -1;
 		managed_clusters[i]->single_enter_load = DEF_SINGLE_ENT;
 		managed_clusters[i]->single_exit_load = DEF_SINGLE_EX;
 		managed_clusters[i]->single_enter_cycles
@@ -2669,7 +2321,6 @@
 			perf_cl_peak_mod_exit_timer;
 	}
 
-	INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
 	mutex_init(&managed_cpus_lock);
 
 	ip_evts = kcalloc(1, sizeof(struct input_events), GFP_KERNEL);
@@ -2707,8 +2358,6 @@
 	for (i = 0; i < num_clusters; i++) {
 		if (!managed_clusters[i])
 			break;
-		if (managed_clusters[i]->offlined_cpus)
-			free_cpumask_var(managed_clusters[i]->offlined_cpus);
 		if (managed_clusters[i]->cpus)
 			free_cpumask_var(managed_clusters[i]->cpus);
 		kfree(managed_clusters[i]);
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 926016f..ec3063e 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -615,7 +615,7 @@
 	char *fw_name_p;
 	void *mba_dp_virt;
 	dma_addr_t mba_dp_phys, mba_dp_phys_end;
-	int ret, count;
+	int ret;
 	const u8 *data;
 	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
 
@@ -681,10 +681,9 @@
 					&mba_dp_phys, &mba_dp_phys_end);
 
 	/* Load the MBA image into memory */
-	count = fw->size;
-	if (count <= SZ_1M) {
+	if (fw->size <= SZ_1M) {
 		/* Ensures memcpy is done for max 1MB fw size */
-		memcpy(mba_dp_virt, data, count);
+		memcpy(mba_dp_virt, data, fw->size);
 	} else {
 		dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
 			__func__);
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index 6a30381..6d6b9f7 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -84,7 +84,7 @@
 /* QDSP6v65 parameters */
 #define QDSP6SS_BOOT_CORE_START		(0x400)
 #define QDSP6SS_BOOT_CMD		(0x404)
-#define QDSP6SS_BOOT_STATUS		(0x408)
+#define MSS_STATUS			(0x40)
 #define QDSP6SS_SLEEP			(0x3C)
 #define SLEEP_CHECK_MAX_LOOPS		(200)
 #define BOOT_FSM_TIMEOUT		(100)
@@ -410,8 +410,8 @@
 	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CMD);
 
 	/* Wait for boot FSM to complete */
-	ret = readl_poll_timeout(drv->reg_base + QDSP6SS_BOOT_STATUS, val,
-			val != 0, 10, BOOT_FSM_TIMEOUT);
+	ret = readl_poll_timeout(drv->rmb_base + MSS_STATUS, val,
+			(val & BIT(1)) != 0, 10, BOOT_FSM_TIMEOUT);
 
 	if (ret) {
 		dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index de5f0ff..68681f9 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -206,10 +206,8 @@
 	 * Only one rx/tx transaction at a time (request + response).
 	 */
 	int ref_count;
-	u32 pid;
 
-	/* link UP/DOWN callback */
-	void (*notify_link_state_cb)(bool up);
+	u32 pid; /* debug only to find user space application */
 
 	/* abort flags */
 	bool rx_abort;
@@ -493,13 +491,10 @@
 
 		ch->glink_state = event;
 
-		/*
-		 * if spcom_notify_state() is called within glink_open()
-		 * then ch->glink_handle is not updated yet.
-		 */
-		if (!ch->glink_handle) {
-			pr_debug("update glink_handle, ch [%s].\n", ch->name);
-			ch->glink_handle = handle;
+		if (!handle) {
+			pr_err("inavlid glink_handle, ch [%s].\n", ch->name);
+			mutex_unlock(&ch->lock);
+			return;
 		}
 
 		/* signal before unlock mutex & before calling glink */
@@ -512,8 +507,7 @@
 		 */
 
 		pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
-		ret = glink_queue_rx_intent(ch->glink_handle,
-					    ch, ch->rx_buf_size);
+		ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
 		if (ret) {
 			pr_err("glink_queue_rx_intent() err [%d]\n", ret);
 		} else {
@@ -736,6 +730,7 @@
 	long timeleft;
 	const char *name;
 	void *handle;
+	u32 pid = current_pid();
 
 	mutex_lock(&ch->lock);
 	name = ch->name;
@@ -749,7 +744,7 @@
 	}
 
 	pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
-		 name, ch->pid, ch->ref_count);
+		 name, pid, ch->ref_count);
 
 	pr_debug("Open channel [%s] timeout_msec [%d].\n", name, timeout_msec);
 
@@ -777,7 +772,7 @@
 	/* init channel context after successful open */
 	ch->glink_handle = handle;
 	ch->ref_count++;
-	ch->pid = current_pid();
+	ch->pid = pid;
 	ch->txn_id = INITIAL_TXN_ID;
 
 	mutex_unlock(&ch->lock);
@@ -1026,10 +1021,12 @@
 			 ch->name, ch->actual_rx_size);
 		goto exit_ready;
 	}
+	mutex_unlock(&ch->lock); /* unlock while waiting */
 
 	pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
 	wait_for_completion(&ch->rx_done);
 
+	mutex_lock(&ch->lock); /* re-lock after waiting */
 	/* Check Rx Abort on SP reset */
 	if (ch->rx_abort) {
 		pr_err("rx aborted.\n");
@@ -2027,6 +2024,7 @@
 				      void *buf,
 				      uint32_t size)
 {
+	int ret = -1;
 	uint32_t next_req_size = 0;
 
 	if (size < sizeof(next_req_size)) {
@@ -2034,7 +2032,10 @@
 		return -EINVAL;
 	}
 
-	next_req_size = spcom_get_next_request_size(ch);
+	ret = spcom_get_next_request_size(ch);
+	if (ret < 0)
+		return ret;
+	next_req_size = (uint32_t) ret;
 
 	memcpy(buf, &next_req_size, sizeof(next_req_size));
 	pr_debug("next_req_size [%d].\n", next_req_size);
@@ -2139,18 +2140,20 @@
 			      void *buf,
 			      uint32_t size)
 {
+	int ret = -1;
+
 	if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
 		pr_debug("get next request size, ch [%s].\n", ch->name);
 		ch->is_server = true;
-		size = spcom_handle_get_req_size(ch, buf, size);
+		ret = spcom_handle_get_req_size(ch, buf, size);
 	} else {
 		pr_debug("get request/response, ch [%s].\n", ch->name);
-		size = spcom_handle_read_req_resp(ch, buf, size);
+		ret = spcom_handle_read_req_resp(ch, buf, size);
 	}
 
 	pr_debug("ch [%s] , size = %d.\n", ch->name, size);
 
-	return size;
+	return ret;
 }
 
 /*======================================================================*/
@@ -2302,6 +2305,7 @@
 	char *buf;
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
+	int buf_size = 0;
 
 	pr_debug("Write file [%s] size [%d] pos [%d].\n",
 		 name, (int) size, (int) *f_pos);
@@ -2328,6 +2332,7 @@
 			   (int) size, (int) SPCOM_MAX_COMMAND_SIZE);
 		return -EINVAL;
 	}
+	buf_size = size; /* explicit casting size_t to int */
 
 	if (*f_pos != 0) {
 		pr_err("offset should be zero, no sparse buffer.\n");
@@ -2345,7 +2350,7 @@
 		return -EFAULT;
 	}
 
-	ret = spcom_handle_write(ch, buf, size);
+	ret = spcom_handle_write(ch, buf, buf_size);
 	if (ret) {
 		pr_err("handle command error [%d].\n", ret);
 		kfree(buf);
@@ -2373,6 +2378,7 @@
 	char *buf;
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
+	uint32_t buf_size = 0;
 
 	pr_debug("Read file [%s], size = %d bytes.\n", name, (int) size);
 
@@ -2381,6 +2387,7 @@
 		pr_err("invalid parameters.\n");
 		return -EINVAL;
 	}
+	buf_size = size; /* explicit casting size_t to uint32_t */
 
 	ch = filp->private_data;
 
@@ -2398,7 +2405,7 @@
 	if (buf == NULL)
 		return -ENOMEM;
 
-	ret = spcom_handle_read(ch, buf, size);
+	ret = spcom_handle_read(ch, buf, buf_size);
 	if (ret < 0) {
 		pr_err("read error [%d].\n", ret);
 		kfree(buf);
@@ -2481,9 +2488,14 @@
 		done = (spcom_dev->link_state == GLINK_LINK_STATE_UP);
 		break;
 	case SPCOM_POLL_CH_CONNECT:
+		/*
+		 * ch is not expected to be NULL since user must call open()
+		 * to get FD before it can call poll().
+		 * open() will fail if no ch related to the char-device.
+		 */
 		if (ch == NULL) {
 			pr_err("invalid ch pointer, file [%s].\n", name);
-			return -EINVAL;
+			return POLLERR;
 		}
 		pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT.\n", name);
 		if (wait) {
@@ -2784,7 +2796,7 @@
 {
 	int ret;
 
-	pr_info("spcom driver version 1.1 17-July-2017.\n");
+	pr_info("spcom driver version 1.2 23-Aug-2017.\n");
 
 	ret = platform_driver_register(&spcom_driver);
 	if (ret)
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index e46bc98..8dc42ac 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -24,6 +24,7 @@
 #include <linux/qcom-geni-se.h>
 #include <linux/msm_gpi.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/spi-geni-qcom.h>
 
 #define SPI_NUM_CHIPSELECT	(4)
 #define SPI_XFER_TIMEOUT_MS	(250)
@@ -67,6 +68,11 @@
 /* SPI_TX/SPI_RX_TRANS_LEN fields */
 #define TRANS_LEN_MSK		(GENMASK(23, 0))
 
+/* SE_SPI_DELAY_COUNTERS */
+#define SPI_INTER_WORDS_DELAY_MSK	(GENMASK(9, 0))
+#define SPI_CS_CLK_DELAY_MSK		(GENMASK(19, 10))
+#define SPI_CS_CLK_DELAY_SHFT		(10)
+
 /* M_CMD OP codes for SPI */
 #define SPI_TX_ONLY		(1)
 #define SPI_RX_ONLY		(2)
@@ -229,6 +235,8 @@
 	int ret = 0;
 	int idx;
 	int div;
+	struct spi_geni_qcom_ctrl_data *delay_params = NULL;
+	u32 spi_delay_params = 0;
 
 	loopback_cfg &= ~LOOPBACK_MSK;
 	cpol &= ~CPOL;
@@ -246,6 +254,22 @@
 	if (spi_slv->mode & SPI_CS_HIGH)
 		demux_output_inv |= BIT(spi_slv->chip_select);
 
+	if (spi_slv->controller_data) {
+		u32 cs_clk_delay = 0;
+		u32 inter_words_delay = 0;
+
+		delay_params =
+		(struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
+		cs_clk_delay =
+		(delay_params->spi_cs_clk_delay << SPI_CS_CLK_DELAY_SHFT)
+							& SPI_CS_CLK_DELAY_MSK;
+		inter_words_delay =
+			delay_params->spi_inter_words_delay &
+						SPI_INTER_WORDS_DELAY_MSK;
+		spi_delay_params =
+		(inter_words_delay | cs_clk_delay);
+	}
+
 	demux_sel = spi_slv->chip_select;
 	mas->cur_speed_hz = spi_slv->max_speed_hz;
 	mas->cur_word_len = spi_slv->bits_per_word;
@@ -267,12 +291,13 @@
 	geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
 	geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
 	geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
+	geni_write_reg(spi_delay_params, mas->base, SE_SPI_DELAY_COUNTERS);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
 		"%s:Loopback%d demux_sel0x%x demux_op_inv 0x%x clk_cfg 0x%x\n",
 		__func__, loopback_cfg, demux_sel, demux_output_inv, m_clk_cfg);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
-		"%s:clk_sel 0x%x cpol %d cpha %d\n", __func__,
-							clk_sel, cpol, cpha);
+		"%s:clk_sel 0x%x cpol %d cpha %d delay 0x%x\n", __func__,
+					clk_sel, cpol, cpha, spi_delay_params);
 	/* Ensure message level attributes are written before returning */
 	mb();
 setup_fifo_params_exit:
@@ -306,7 +331,8 @@
 }
 
 static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
-				struct spi_geni_master *mas, u16 mode)
+				struct spi_geni_master *mas, u16 mode,
+				u32 cs_clk_delay, u32 inter_words_delay)
 {
 	struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
 	u8 flags = 0;
@@ -340,12 +366,16 @@
 	}
 	c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
 								word_len);
-	c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, 0, 0);
+	c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
+							inter_words_delay);
 	c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
 	c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 1);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
 		"%s: flags 0x%x word %d pack %d idx %d div %d\n",
 		__func__, flags, word_len, pack, idx, div);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+		"%s: cs_clk_delay %d inter_words_delay %d\n", __func__,
+				 cs_clk_delay, inter_words_delay);
 	return c0_tre;
 }
 
@@ -503,13 +533,27 @@
 	u32 rx_len = 0;
 	int go_flags = 0;
 	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+	struct spi_geni_qcom_ctrl_data *delay_params = NULL;
+	u32 cs_clk_delay = 0;
+	u32 inter_words_delay = 0;
+
+	if (spi_slv->controller_data) {
+		delay_params =
+		(struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
+
+		cs_clk_delay =
+			delay_params->spi_cs_clk_delay;
+		inter_words_delay =
+			delay_params->spi_inter_words_delay;
+	}
 
 	if ((xfer->bits_per_word != mas->cur_word_len) ||
 		(xfer->speed_hz != mas->cur_speed_hz)) {
 		mas->cur_word_len = xfer->bits_per_word;
 		mas->cur_speed_hz = xfer->speed_hz;
 		tx_nent++;
-		c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode);
+		c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode,
+					cs_clk_delay, inter_words_delay);
 		if (IS_ERR_OR_NULL(c0_tre)) {
 			dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
 							__func__, ret);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index bac9975..626cfdc 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1091,6 +1091,18 @@
 	select SERIAL_CORE_CONSOLE
 	select SERIAL_EARLYCON
 
+config SERIAL_MSM_HS
+	tristate "MSM UART High Speed: Serial Driver"
+	depends on ARCH_QCOM
+	select SERIAL_CORE
+	help
+	  If you have a machine based on MSM family of SoCs, you
+	  can enable its onboard high speed serial port by enabling
+	  this option.
+
+	  Choose M here to compile it as a module. The module will be
+	  called msm_serial_hs.
+
 config SERIAL_VT8500
 	bool "VIA VT8500 on-chip serial port support"
 	depends on ARCH_VT8500
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index b39165b..1bdc7f8 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -62,6 +62,7 @@
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
 obj-$(CONFIG_SERIAL_MSM_GENI) += msm_geni_serial.o
+obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
 obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
new file mode 100644
index 0000000..6a05d5b
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -0,0 +1,3808 @@
+/* drivers/serial/msm_serial_hs.c
+ *
+ * MSM 7k High speed uart driver
+ *
+ * Copyright (c) 2008 Google Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Modified: Nick Pelly <npelly@google.com>
+ *
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * Has optional support for uart power management independent of linux
+ * suspend/resume:
+ *
+ * RX wakeup.
+ * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
+ * UART RX pin). This should only be used if there is not a wakeup
+ * GPIO on the UART CTS, and the first RX byte is known (for example, with the
+ * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
+ * always be lost. RTS will be asserted even while the UART is off in this mode
+ * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
+ */
+
+#include <linux/module.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/tty_flip.h>
+#include <linux/wait.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/wakelock.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/ipc_logging.h>
+#include <asm/irq.h>
+#include <linux/kthread.h>
+
+#include <linux/msm-sps.h>
+#include <linux/platform_data/msm_serial_hs.h>
+#include <linux/msm-bus.h>
+
+#include "msm_serial_hs_hwreg.h"
+#define UART_SPS_CONS_PERIPHERAL 0
+#define UART_SPS_PROD_PERIPHERAL 1
+
+#define IPC_MSM_HS_LOG_STATE_PAGES 2
+#define IPC_MSM_HS_LOG_USER_PAGES 2
+#define IPC_MSM_HS_LOG_DATA_PAGES 3
+#define UART_DMA_DESC_NR 8
+#define BUF_DUMP_SIZE 32
+
+/* If the debug_mask gets set to FATAL_LEV,
+ * a fatal error has happened and further IPC logging
+ * is disabled so that this problem can be detected
+ */
+enum {
+	FATAL_LEV = 0U,
+	ERR_LEV = 1U,
+	WARN_LEV = 2U,
+	INFO_LEV = 3U,
+	DBG_LEV = 4U,
+};
+
+#define MSM_HS_DBG(x...) do { \
+	if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
+		if (msm_uport->ipc_msm_hs_log_ctxt) \
+			ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+#define MSM_HS_INFO(x...) do { \
+	if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
+		if (msm_uport->ipc_msm_hs_log_ctxt) \
+			ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define MSM_HS_WARN(x...) do { \
+	pr_warn(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= WARN_LEV) \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define MSM_HS_ERR(x...) do { \
+	pr_err(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= ERR_LEV) { \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+		msm_uport->ipc_debug_mask = FATAL_LEV; \
+	} \
+} while (0)
+
+#define LOG_USR_MSG(ctx, x...) do { \
+	if (ctx) \
+		ipc_log_string(ctx, x); \
+} while (0)
+
+/*
+ * There are 3 different kind of UART Core available on MSM.
+ * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
+ * and BSLP based HSUART.
+ */
+enum uart_core_type {
+	LEGACY_HSUART,
+	GSBI_HSUART,
+	BLSP_HSUART,
+};
+
+enum flush_reason {
+	FLUSH_NONE,
+	FLUSH_DATA_READY,
+	FLUSH_DATA_INVALID,  /* values after this indicate invalid data */
+	FLUSH_IGNORE,
+	FLUSH_STOP,
+	FLUSH_SHUTDOWN,
+};
+
+/*
+ * SPS data structures to support HSUART with BAM
+ * @sps_pipe - This struct defines BAM pipe descriptor
+ * @sps_connect - This struct defines a connection's end point
+ * @sps_register - This struct defines a event registration parameters
+ */
+struct msm_hs_sps_ep_conn_data {
+	struct sps_pipe *pipe_handle;
+	struct sps_connect config;
+	struct sps_register_event event;
+};
+
+struct msm_hs_tx {
+	bool dma_in_flight;    /* tx dma in progress */
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	int tx_count;
+	dma_addr_t dma_base;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data cons;
+	struct timer_list tx_timeout_timer;
+	void *ipc_tx_ctxt;
+};
+
+struct msm_hs_rx {
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	dma_addr_t rbuffer;
+	unsigned char *buffer;
+	unsigned int buffer_pending;
+	struct delayed_work flip_insert_work;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data prod;
+	unsigned long queued_flag;
+	unsigned long pending_flag;
+	int rx_inx;
+	struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
+	void *ipc_rx_ctxt;
+};
+enum buffer_states {
+	NONE_PENDING = 0x0,
+	FIFO_OVERRUN = 0x1,
+	PARITY_ERROR = 0x2,
+	CHARS_NORMAL = 0x4,
+};
+
+enum msm_hs_pm_state {
+	MSM_HS_PM_ACTIVE,
+	MSM_HS_PM_SUSPENDED,
+	MSM_HS_PM_SYS_SUSPENDED,
+};
+
+/* optional low power wakeup, typically on a GPIO RX irq */
+struct msm_hs_wakeup {
+	int irq;  /* < 0 indicates low power wakeup disabled */
+	unsigned char ignore;  /* bool */
+
+	/* bool: inject char into rx tty on wakeup */
+	bool inject_rx;
+	unsigned char rx_to_inject;
+	bool enabled;
+	bool freed;
+};
+
+struct msm_hs_port {
+	struct uart_port uport;
+	unsigned long imr_reg;  /* shadow value of UARTDM_IMR */
+	struct clk *clk;
+	struct clk *pclk;
+	struct msm_hs_tx tx;
+	struct msm_hs_rx rx;
+	atomic_t resource_count;
+	struct msm_hs_wakeup wakeup;
+
+	struct dentry *loopback_dir;
+	struct work_struct clock_off_w; /* work for actual clock off */
+	struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
+	struct mutex mtx; /* resource access mutex */
+	enum uart_core_type uart_type;
+	unsigned long bam_handle;
+	resource_size_t bam_mem;
+	int bam_irq;
+	unsigned char __iomem *bam_base;
+	unsigned int bam_tx_ep_pipe_index;
+	unsigned int bam_rx_ep_pipe_index;
+	/* struct sps_event_notify is an argument passed when triggering a
+	 * callback event object registered for an SPS connection end point.
+	 */
+	struct sps_event_notify notify;
+	/* bus client handler */
+	u32 bus_perf_client;
+	/* BLSP UART required BUS Scaling data */
+	struct msm_bus_scale_pdata *bus_scale_table;
+	bool rx_bam_inprogress;
+	wait_queue_head_t bam_disconnect_wait;
+	bool use_pinctrl;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	bool flow_control;
+	enum msm_hs_pm_state pm_state;
+	atomic_t client_count;
+	bool obs; /* out of band sleep flag */
+	atomic_t client_req_state;
+	void *ipc_msm_hs_log_ctxt;
+	void *ipc_msm_hs_pwr_ctxt;
+	int ipc_debug_mask;
+};
+
+static const struct of_device_id msm_hs_match_table[] = {
+	{ .compatible = "qcom,msm-hsuart-v14"},
+	{}
+};
+
+
+#define MSM_UARTDM_BURST_SIZE 16   /* DM burst size (in bytes) */
+#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
+#define UARTDM_RX_BUF_SIZE 512
+#define RETRY_TIMEOUT 5
+#define UARTDM_NR 256
+#define BAM_PIPE_MIN 0
+#define BAM_PIPE_MAX 11
+#define BUS_SCALING 1
+#define BUS_RESET 0
+#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
+#define BLSP_UART_CLK_FMAX 63160000
+
+static struct dentry *debug_base;
+static struct platform_driver msm_serial_hs_platform_driver;
+static struct uart_driver msm_hs_driver;
+static const struct uart_ops msm_hs_ops;
+static void msm_hs_start_rx_locked(struct uart_port *uport);
+static void msm_serial_hs_rx_work(struct kthread_work *work);
+static void flip_insert_work(struct work_struct *work);
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
+static int msm_hs_pm_resume(struct device *dev);
+
+#define UARTDM_TO_MSM(uart_port) \
+	container_of((uart_port), struct msm_hs_port, uport)
+
+static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
+						unsigned long arg)
+{
+	int ret = 0, state = 1;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!msm_uport)
+		return -ENODEV;
+
+	switch (cmd) {
+	case MSM_ENABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_on(&msm_uport->uport);
+		break;
+	}
+	case MSM_DISABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_off(&msm_uport->uport);
+		break;
+	}
+	case MSM_GET_UART_CLOCK_STATUS: {
+		/* Return value 0 - UART CLOCK is OFF
+		 * Return value 1 - UART CLOCK is ON
+		 */
+
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = state;
+		MSM_HS_INFO("%s():GET UART CLOCK STATUS: cmd=%d state=%d\n",
+			__func__, cmd, state);
+		break;
+	}
+	default: {
+		MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
+			   cmd);
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	}
+
+	return ret;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource APIs to enable them.
+ */
+
+static int msm_hs_clk_bus_vote(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+
+	msm_hs_bus_voting(msm_uport, BUS_SCALING);
+	/* Turn on core clk and iface clk */
+	if (msm_uport->pclk) {
+		rc = clk_prepare_enable(msm_uport->pclk);
+		if (rc) {
+			dev_err(msm_uport->uport.dev,
+				"%s: Could not turn on pclk [%d]\n",
+				__func__, rc);
+			goto busreset;
+		}
+	}
+	rc = clk_prepare_enable(msm_uport->clk);
+	if (rc) {
+		dev_err(msm_uport->uport.dev,
+			"%s: Could not turn on core clk [%d]\n",
+			__func__, rc);
+		goto core_unprepare;
+	}
+	MSM_HS_DBG("%s: Clock ON successful\n", __func__);
+	return rc;
+core_unprepare:
+	clk_disable_unprepare(msm_uport->pclk);
+busreset:
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	return rc;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource apis to disable them.
+ */
+static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
+{
+	clk_disable_unprepare(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_disable_unprepare(msm_uport->pclk);
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
+}
+
+ /* Remove vote for resources when done */
+static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	int rc = atomic_read(&msm_uport->resource_count);
+
+	MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
+	if (rc <= 0) {
+		MSM_HS_WARN("%s(): rc zero, bailing\n", __func__);
+		WARN_ON(1);
+		return;
+	}
+	atomic_dec(&msm_uport->resource_count);
+	pm_runtime_mark_last_busy(uport->dev);
+	pm_runtime_put_autosuspend(uport->dev);
+}
+
+ /* Vote for resources before accessing them */
+static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	ret = pm_runtime_get_sync(uport->dev);
+	if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
+			__func__, dev_name(uport->dev), ret,
+					msm_uport->pm_state);
+		msm_hs_pm_resume(uport->dev);
+	}
+	atomic_inc(&msm_uport->resource_count);
+}
+
+/* Check if the uport line number matches with user id stored in pdata.
+ * User id information is stored during initialization. This function
+ * ensues that the same device is selected
+ */
+
+static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
+{
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
+
+	if ((!msm_uport) || (msm_uport->uport.line != pdev->id
+	   && msm_uport->uport.line != pdata->userid)) {
+		pr_err("uport line number mismatch!");
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return msm_uport;
+}
+
+static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	int state = 1;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
+	}
+	return ret;
+}
+
+static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int state;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		state = buf[0] - '0';
+		switch (state) {
+		case 0:
+			MSM_HS_DBG("%s: Request clock OFF\n", __func__);
+			msm_hs_request_clock_off(&msm_uport->uport);
+			ret = count;
+			break;
+		case 1:
+			MSM_HS_DBG("%s: Request clock ON\n", __func__);
+			msm_hs_request_clock_on(&msm_uport->uport);
+			ret = count;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(clock, 0644, show_clock, set_clock);
+
+static ssize_t show_debug_mask(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport)
+		ret = snprintf(buf, sizeof(int), "%u\n",
+					msm_uport->ipc_debug_mask);
+	return ret;
+}
+
+static ssize_t set_debug_mask(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		msm_uport->ipc_debug_mask = buf[0] - '0';
+		if (msm_uport->ipc_debug_mask < FATAL_LEV ||
+				msm_uport->ipc_debug_mask > DBG_LEV) {
+			/* set to default level */
+			msm_uport->ipc_debug_mask = INFO_LEV;
+			MSM_HS_ERR("Range is 0 to 4;Set to default level 3\n");
+			return -EINVAL;
+		}
+	}
+	return count;
+}
+
+static DEVICE_ATTR(debug_mask, 0644, show_debug_mask,
+							set_debug_mask);
+
+static inline bool is_use_low_power_wakeup(struct msm_hs_port *msm_uport)
+{
+	return msm_uport->wakeup.irq > 0;
+}
+
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
+{
+	int ret;
+
+	if (msm_uport->bus_perf_client) {
+		MSM_HS_DBG("Bus voting:%d\n", vote);
+		ret = msm_bus_scale_client_update_request(
+				msm_uport->bus_perf_client, vote);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
+							__func__, vote);
+	}
+}
+
+static inline unsigned int msm_hs_read(struct uart_port *uport,
+				       unsigned int index)
+{
+	return readl_relaxed(uport->membase + index);
+}
+
+static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
+				 unsigned int value)
+{
+	writel_relaxed(value, uport->membase + index);
+}
+
+static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
+{
+	struct sps_connect config;
+	int ret;
+
+	ret = sps_get_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s: sps_get_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	config.options |= SPS_O_POLL;
+	ret = sps_set_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s: sps_set_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	return sps_disconnect(sps_pipe_handler);
+}
+
+static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
+			char *prefix, char *string, u64 addr, int size)
+
+{
+	char buf[(BUF_DUMP_SIZE * 3) + 2];
+	int len = 0;
+
+	len = min(size, BUF_DUMP_SIZE);
+	/*
+	 * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
+	 * don't include the ASCII text at the end of the buffer.
+	 */
+	hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
+	ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+					(unsigned int)addr, size, buf);
+}
+
+/*
+ * This API read and provides UART Core registers information.
+ */
+static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
+			__func__, atomic_read(&msm_uport->resource_count));
+		return;
+	}
+
+	MSM_HS_DBG(
+	"MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
+	msm_hs_read(uport, UART_DM_MR1),
+	msm_hs_read(uport, UART_DM_MR2),
+	msm_hs_read(uport, UART_DM_TFWR),
+	msm_hs_read(uport, UART_DM_RFWR),
+	msm_hs_read(uport, UART_DM_DMEN),
+	msm_hs_read(uport, UART_DM_IMR),
+	msm_hs_read(uport, UART_DM_MISR),
+	msm_hs_read(uport, UART_DM_NCF_TX));
+	MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
+	msm_hs_read(uport, UART_DM_SR),
+	msm_hs_read(uport, UART_DM_ISR),
+	msm_hs_read(uport, UART_DM_DMRX),
+	msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
+	msm_hs_read(uport, UART_DM_TXFS),
+	msm_hs_read(uport, UART_DM_RXFS));
+	MSM_HS_DBG("rx.flush:%u\n", msm_uport->rx.flush);
+}
+
+static int msm_serial_loopback_enable_set(void *data, u64 val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	if (val) {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+}
+
+static int msm_serial_loopback_enable_get(void *data, u64 *val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+
+	*val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
+			msm_serial_loopback_enable_set, "%llu\n");
+
+/*
+ * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
+ * writing 1 turns on internal loopback mode in HW. Useful for automation
+ * test scripts.
+ * writing 0 disables the internal loopback mode. Default is disabled.
+ */
+static void msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
+					   int id)
+{
+	char node_name[15];
+
+	snprintf(node_name, sizeof(node_name), "loopback.%d", id);
+	msm_uport->loopback_dir = debugfs_create_file(node_name,
+						0644,
+						debug_base,
+						msm_uport,
+						&loopback_enable_fops);
+
+	if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
+		MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry",
+							__func__, id);
+}
+
+static int msm_hs_remove(struct platform_device *pdev)
+{
+
+	struct msm_hs_port *msm_uport;
+	struct device *dev;
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		pr_err("Invalid plaform device ID = %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = get_matching_hs_port(pdev);
+	if (!msm_uport)
+		return -EINVAL;
+
+	dev = msm_uport->uport.dev;
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_debug_mask.attr);
+	debugfs_remove(msm_uport->loopback_dir);
+
+	dma_free_coherent(msm_uport->uport.dev,
+			UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+			msm_uport->rx.buffer, msm_uport->rx.rbuffer);
+
+	msm_uport->rx.buffer = NULL;
+	msm_uport->rx.rbuffer = 0;
+
+	destroy_workqueue(msm_uport->hsuart_wq);
+	mutex_destroy(&msm_uport->mtx);
+
+	uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
+	clk_put(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	iounmap(msm_uport->uport.membase);
+
+	return 0;
+}
+
+
+/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
+ *
+ * Also registers a SPS callback function for the consumer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
+	struct sps_connect *sps_config = &tx->cons.config;
+	struct sps_register_event *sps_event = &tx->cons.event;
+	unsigned long flags;
+	unsigned int data;
+
+	if (tx->flush != FLUSH_SHUTDOWN) {
+		MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
+		return 0;
+	}
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for EOT (End of transfer) event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	msm_uport->tx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Tx BAM Interface */
+	data |= UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	MSM_HS_DBG("%s(): TX Connect", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/* Connect a UART peripheral's SPS endpoint(producer endpoint)
+ *
+ * Also registers a SPS callback function for the producer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_rx(struct uart_port *uport)
+{
+	int ret;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	struct sps_connect *sps_config = &rx->prod.config;
+	struct sps_register_event *sps_event = &rx->prod.event;
+	unsigned long flags;
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for DESC_DONE event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+	spin_lock_irqsave(&uport->lock, flags);
+	if (msm_uport->rx.pending_flag)
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
+		__func__, msm_uport->rx.pending_flag);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+	msm_uport->rx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&uport->lock, flags);
+	MSM_HS_DBG("%s(): RX Connect\n", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/*
+ * programs the UARTDM_CSR register with correct bit rates
+ *
+ * Interrupts should be disabled before we are called, as
+ * we modify Set Baud rate
+ * Set receive stale interrupt level, dependent on Bit Rate
+ * Goal is to have around 8 ms before indicate stale.
+ * roundup (((Bit Rate * .008) / 10) + 1
+ */
+static void msm_hs_set_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	switch (bps) {
+	case 300:
+		msm_hs_write(uport, UART_DM_CSR, 0x00);
+		rxstale = 1;
+		break;
+	case 600:
+		msm_hs_write(uport, UART_DM_CSR, 0x11);
+		rxstale = 1;
+		break;
+	case 1200:
+		msm_hs_write(uport, UART_DM_CSR, 0x22);
+		rxstale = 1;
+		break;
+	case 2400:
+		msm_hs_write(uport, UART_DM_CSR, 0x33);
+		rxstale = 1;
+		break;
+	case 4800:
+		msm_hs_write(uport, UART_DM_CSR, 0x44);
+		rxstale = 1;
+		break;
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x55);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0x66);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0x77);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0x88);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 16;
+		break;
+	case 76800:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 31;
+		break;
+	case 230400:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 31;
+		break;
+	case 460800:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	case 4000000:
+	case 3686400:
+	case 3200000:
+	case 3500000:
+	case 3000000:
+	case 2500000:
+	case 2000000:
+	case 1500000:
+	case 1152000:
+	case 1000000:
+	case 921600:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+	/*
+	 * uart baud rate depends on CSR and MND Values
+	 * we are updating CSR before and then calling
+	 * clk_set_rate which updates MND Values. Hence
+	 * dsb requires here.
+	 */
+	mb();
+	if (bps > 460800) {
+		uport->uartclk = bps * 16;
+		/* BLSP based UART supports maximum clock frequency
+		 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
+		 * UART can support baud rate of 3.94 Mbps which is
+		 * equivalent to 4 Mbps.
+		 * UART hardware is robust enough to handle this
+		 * deviation to achieve baud rate ~4 Mbps.
+		 */
+		if (bps == 4000000)
+			uport->uartclk = BLSP_UART_CLK_FMAX;
+	} else {
+		uport->uartclk = 7372800;
+	}
+
+	if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
+		MSM_HS_WARN("Error setting clock rate on UART\n");
+		WARN_ON(1);
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+	/*
+	 * It is suggested to do reset of transmitter and receiver after
+	 * changing any protocol configuration. Here Baud rate and stale
+	 * timeout are getting updated. Hence reset transmitter and receiver.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+}
+
+
+static void msm_hs_set_std_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+
+	switch (bps) {
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0xdd);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+}
+
+static void msm_hs_enable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	if (msm_uport->flow_control || override) {
+		/* Enable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+		/* Enable auto RFR */
+		data = msm_hs_read(uport, UART_DM_MR1);
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+static void msm_hs_disable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	/*
+	 * Clear the Rx Ready Ctl bit - This ensures that
+	 * flow control lines stop the other side from sending
+	 * data while we change the parameters
+	 */
+
+	if (msm_uport->flow_control || override) {
+		data = msm_hs_read(uport, UART_DM_MR1);
+		/* disable auto ready-for-receiving */
+		data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Disable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+/*
+ * termios :  new ktermios
+ * oldtermios:  old ktermios previous setting
+ *
+ * Configure the serial port
+ */
+static void msm_hs_set_termios(struct uart_port *uport,
+				   struct ktermios *termios,
+				   struct ktermios *oldtermios)
+{
+	unsigned int bps;
+	unsigned long data;
+	unsigned int c_cflag = termios->c_cflag;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	/**
+	 * set_termios can be invoked from the framework when
+	 * the clocks are off and the client has not had a chance
+	 * to turn them on. Make sure that they are on
+	 */
+	msm_hs_resource_vote(msm_uport);
+	mutex_lock(&msm_uport->mtx);
+	msm_hs_write(uport, UART_DM_IMR, 0);
+
+	msm_hs_disable_flow_control(uport, true);
+
+	/*
+	 * Disable Rx channel of UARTDM
+	 * DMA Rx Stall happens if enqueue and flush of Rx command happens
+	 * concurrently. Hence before changing the baud rate/protocol
+	 * configuration and sending flush command to ADM, disable the Rx
+	 * channel of UARTDM.
+	 * Note: should not reset the receiver here immediately as it is not
+	 * suggested to do disable/reset or reset/disable at the same time.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Disable UARTDM RX BAM Interface */
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	/*
+	 * Reset RX and TX.
+	 * Resetting the RX enables it, therefore we must reset and disable.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+
+	/* 300 is the minimum baud support by the driver  */
+	bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
+
+	/* Temporary remapping  200 BAUD to 3.2 mbps */
+	if (bps == 200)
+		bps = 3200000;
+
+	uport->uartclk = clk_get_rate(msm_uport->clk);
+	if (!uport->uartclk)
+		msm_hs_set_std_bps_locked(uport, bps);
+	else
+		msm_hs_set_bps_locked(uport, bps);
+
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
+	/* set parity */
+	if (c_cflag & PARENB) {
+		if (c_cflag & PARODD)
+			data |= ODD_PARITY;
+		else if (c_cflag & CMSPAR)
+			data |= SPACE_PARITY;
+		else
+			data |= EVEN_PARITY;
+	}
+
+	/* Set bits per char */
+	data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
+
+	switch (c_cflag & CSIZE) {
+	case CS5:
+		data |= FIVE_BPC;
+		break;
+	case CS6:
+		data |= SIX_BPC;
+		break;
+	case CS7:
+		data |= SEVEN_BPC;
+		break;
+	default:
+		data |= EIGHT_BPC;
+		break;
+	}
+	/* stop bits */
+	if (c_cflag & CSTOPB) {
+		data |= STOP_BIT_TWO;
+	} else {
+		/* otherwise 1 stop bit */
+		data |= STOP_BIT_ONE;
+	}
+	data |= UARTDM_MR2_ERROR_MODE_BMSK;
+	/* write parity/bits per char/stop bit configuration */
+	msm_hs_write(uport, UART_DM_MR2, data);
+
+	uport->ignore_status_mask = termios->c_iflag & INPCK;
+	uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
+	uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
+
+	uport->read_status_mask = (termios->c_cflag & CREAD);
+
+	/* Set Transmit software time out */
+	uart_update_timeout(uport, c_cflag, bps);
+
+	/* Enable UARTDM Rx BAM Interface */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+	/* Issue TX,RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure Register Writes Complete */
+	mb();
+
+	/* Configure HW flow control
+	 * UART Core would see status of CTS line when it is sending data
+	 * to remote uart to confirm that it can receive or not.
+	 * UART Core would trigger RFR if it is not having any space with
+	 * RX FIFO.
+	 */
+	/* Pulling RFR line high */
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
+	if (c_cflag & CRTSCTS) {
+		data |= UARTDM_MR1_CTS_CTL_BMSK;
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_uport->flow_control = true;
+	}
+	msm_hs_write(uport, UART_DM_MR1, data);
+	MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
+
+	mutex_unlock(&msm_uport->mtx);
+
+	msm_hs_resource_unvote(msm_uport);
+}
+
+/*
+ *  Standard API, Transmitter
+ *  Any character in the transmit shift register is sent
+ */
+unsigned int msm_hs_tx_empty(struct uart_port *uport)
+{
+	unsigned int data;
+	unsigned int isr;
+	unsigned int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	data = msm_hs_read(uport, UART_DM_SR);
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_INFO("%s(): SR:0x%x ISR:0x%x ", __func__, data, isr);
+
+	if (data & UARTDM_SR_TXEMT_BMSK) {
+		ret = TIOCSER_TEMT;
+	} else
+		/*
+		 * Add an extra sleep here because sometimes the framework's
+		 * delay (based on baud rate) isn't good enough.
+		 * Note that this won't happen during every port close, only
+		 * on select occassions when the userspace does back to back
+		 * write() and close().
+		 */
+		usleep_range(5000, 7000);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_tx_empty);
+
+/*
+ *  Standard API, Stop transmitter.
+ *  Any character in the transmit shift register is sent as
+ *  well as the current data mover transfer .
+ */
+static void msm_hs_stop_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	tx->flush = FLUSH_STOP;
+}
+
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	int ret = 0;
+
+	ret = sps_rx_disconnect(sps_pipe_handle);
+
+	if (msm_uport->rx.pending_flag)
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
+		__func__, msm_uport->rx.pending_flag);
+	MSM_HS_DBG("%s(): clearing desc usage flag", __func__);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+	MSM_HS_DBG("%s: Calling Completion\n", __func__);
+	wake_up(&msm_uport->bam_disconnect_wait);
+	MSM_HS_DBG("%s: Done Completion\n", __func__);
+	wake_up(&msm_uport->rx.wait);
+	return ret;
+}
+
+static int sps_tx_disconnect(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *tx_pipe = tx->cons.pipe_handle;
+	unsigned long flags;
+	int ret = 0;
+
+	if (msm_uport->tx.flush == FLUSH_SHUTDOWN) {
+		MSM_HS_DBG("%s(): pipe already disonnected", __func__);
+		return ret;
+	}
+
+	ret = sps_disconnect(tx_pipe);
+
+	if (ret) {
+		MSM_HS_ERR("%s(): sps_disconnect failed %d", __func__, ret);
+		return ret;
+	}
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	MSM_HS_DBG("%s(): TX Disconnect", __func__);
+	return ret;
+}
+
+static void msm_hs_disable_rx(struct uart_port *uport)
+{
+	unsigned int data;
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+}
+
+/*
+ *  Standard API, Stop receiver as soon as possible.
+ *
+ *  Function immediately terminates the operation of the
+ *  channel receiver and any incoming characters are lost. None
+ *  of the receiver status bits are affected by this command and
+ *  characters that are already in the receive FIFO there.
+ */
+static void msm_hs_stop_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+	else
+		msm_hs_disable_rx(uport);
+
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+}
+
+static void msm_hs_disconnect_rx(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_disable_rx(uport);
+	/* Disconnect the BAM RX pipe */
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+	disconnect_rx_endpoint(msm_uport);
+	MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush);
+}
+
+/* Tx timeout callback function */
+void tx_timeout_handler(unsigned long arg)
+{
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *) arg;
+	struct uart_port *uport = &msm_uport->uport;
+	int isr;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): clocks are off", __func__);
+		return;
+	}
+
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
+		MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
+	dump_uart_hs_registers(msm_uport);
+}
+
+/*  Transmit the next chunk of data */
+static void msm_hs_submit_tx_locked(struct uart_port *uport)
+{
+	int left;
+	int tx_count;
+	int aligned_tx_count;
+	dma_addr_t src_addr;
+	dma_addr_t aligned_src_addr;
+	u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
+	struct sps_pipe *sps_pipe_handle;
+	int ret;
+
+	if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
+		tx->dma_in_flight = false;
+		msm_hs_stop_tx_locked(uport);
+		return;
+	}
+
+	tx_count = uart_circ_chars_pending(tx_buf);
+
+	if (tx_count > UARTDM_TX_BUF_SIZE)
+		tx_count = UARTDM_TX_BUF_SIZE;
+
+	left = UART_XMIT_SIZE - tx_buf->tail;
+
+	if (tx_count > left)
+		tx_count = left;
+
+	src_addr = tx->dma_base + tx_buf->tail;
+	/* Mask the src_addr to align on a cache
+	 * and add those bytes to tx_count
+	 */
+	aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
+	aligned_tx_count = tx_count + src_addr - aligned_src_addr;
+
+	dma_sync_single_for_device(uport->dev, aligned_src_addr,
+			aligned_tx_count, DMA_TO_DEVICE);
+
+	tx->tx_count = tx_count;
+
+	hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
+			&tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
+	sps_pipe_handle = tx->cons.pipe_handle;
+
+	/* Set 1 second timeout */
+	mod_timer(&tx->tx_timeout_timer,
+		jiffies + msecs_to_jiffies(MSEC_PER_SEC));
+	/* Queue transfer request to SPS */
+	ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
+				msm_uport, flags);
+
+	MSM_HS_DBG("%s:Enqueue Tx Cmd, ret %d\n", __func__, ret);
+}
+
+/* This function queues the rx descriptor for BAM transfer */
+static void msm_hs_post_rx_desc(struct msm_hs_port *msm_uport, int inx)
+{
+	u32 flags = SPS_IOVEC_FLAG_INT;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int ret;
+
+	phys_addr_t rbuff_addr = rx->rbuffer + (UARTDM_RX_BUF_SIZE * inx);
+	u8 *virt_addr = rx->buffer + (UARTDM_RX_BUF_SIZE * inx);
+
+	MSM_HS_DBG("%s: %d:Queue desc %d, 0x%llx, base 0x%llx virtaddr %p",
+		__func__, msm_uport->uport.line, inx,
+		(u64)rbuff_addr, (u64)rx->rbuffer, virt_addr);
+
+	rx->iovec[inx].size = 0;
+	ret = sps_transfer_one(rx->prod.pipe_handle, rbuff_addr,
+		UARTDM_RX_BUF_SIZE, msm_uport, flags);
+
+	if (ret)
+		MSM_HS_ERR("Error processing descriptor %d", ret);
+}
+
+/* Update the rx descriptor index to specify the next one to be processed */
+static void msm_hs_mark_next(struct msm_hs_port *msm_uport, int inx)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int prev;
+
+	inx %= UART_DMA_DESC_NR;
+	MSM_HS_DBG("%s(): inx %d, pending 0x%lx", __func__, inx,
+		rx->pending_flag);
+
+	if (!inx)
+		prev = UART_DMA_DESC_NR - 1;
+	else
+		prev = inx - 1;
+
+	if (!test_bit(prev, &rx->pending_flag))
+		msm_uport->rx.rx_inx = inx;
+	MSM_HS_DBG("%s(): prev %d pending flag 0x%lx, next %d", __func__,
+		prev, rx->pending_flag, msm_uport->rx.rx_inx);
+}
+
+/*
+ *	Queue the rx descriptor that has just been processed or
+ *	all of them if queueing for the first time
+ */
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int i, flag = 0;
+
+	/* At first, queue all, if not, queue only one */
+	if (rx->queued_flag || rx->pending_flag) {
+		if (!test_bit(rx->rx_inx, &rx->queued_flag) &&
+		    !test_bit(rx->rx_inx, &rx->pending_flag)) {
+			msm_hs_post_rx_desc(msm_uport, rx->rx_inx);
+			set_bit(rx->rx_inx, &rx->queued_flag);
+			MSM_HS_DBG("%s(): Set Queued Bit %d",
+				__func__, rx->rx_inx);
+		} else
+			MSM_HS_ERR("%s(): rx_inx pending or queued", __func__);
+		return;
+	}
+
+	for (i = 0; i < UART_DMA_DESC_NR; i++) {
+		if (!test_bit(i, &rx->queued_flag) &&
+		!test_bit(i, &rx->pending_flag)) {
+			MSM_HS_DBG("%s(): Calling post rx %d", __func__, i);
+			msm_hs_post_rx_desc(msm_uport, i);
+			set_bit(i, &rx->queued_flag);
+			flag = 1;
+		}
+	}
+
+	if (!flag)
+		MSM_HS_ERR("%s(): error queueing descriptor", __func__);
+}
+
+/* Start to receive the next chunk of data */
+static void msm_hs_start_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	unsigned int buffer_pending = msm_uport->rx.buffer_pending;
+	unsigned int data;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	if (rx->pending_flag) {
+		MSM_HS_INFO("%s: Rx Cmd got executed, wait for rx_tlet\n",
+								 __func__);
+		rx->flush = FLUSH_IGNORE;
+		return;
+	}
+	if (buffer_pending)
+		MSM_HS_ERR("Error: rx started in buffer state =%x",
+			buffer_pending);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
+	msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
+	/*
+	 * Enable UARTDM Rx Interface as previously it has been
+	 * disable in set_termios before configuring baud rate.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Rx BAM Interface */
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Calling next DMOV API. Hence mb() here. */
+	mb();
+
+	/*
+	 * RX-transfer will be automatically re-activated
+	 * after last data of previous transfer was read.
+	 */
+	data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
+				RX_DMRX_CYCLIC_EN);
+	msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
+	/* Issue RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure register IO completion */
+	mb();
+
+	msm_uport->rx.flush = FLUSH_NONE;
+	msm_uport->rx_bam_inprogress = true;
+	msm_hs_queue_rx_desc(msm_uport);
+	msm_uport->rx_bam_inprogress = false;
+	wake_up(&msm_uport->rx.wait);
+	MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
+}
+
+static void flip_insert_work(struct work_struct *work)
+{
+	unsigned long flags;
+	int retval;
+	struct msm_hs_port *msm_uport =
+		container_of(work, struct msm_hs_port,
+			     rx.flip_insert_work.work);
+	struct tty_struct *tty = msm_uport->uport.state->port.tty;
+
+	spin_lock_irqsave(&msm_uport->uport.lock, flags);
+	if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		dev_err(msm_uport->uport.dev,
+			"%s:Invalid driver state flush %d\n",
+				__func__, msm_uport->rx.flush);
+		MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+				__func__, msm_uport->rx.flush);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+
+	if (msm_uport->rx.buffer_pending == NONE_PENDING) {
+		MSM_HS_ERR("Error: No buffer pending in %s", __func__);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+	if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_OVERRUN);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
+	}
+	if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_PARITY);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
+	}
+	if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
+		int rx_count, rx_offset;
+
+		rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
+		rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
+		retval = tty_insert_flip_string(tty->port,
+			msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)
+			+ rx_offset, rx_count);
+		msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
+						 PARITY_ERROR);
+		if (retval != rx_count)
+			msm_uport->rx.buffer_pending |= CHARS_NORMAL |
+				retval << 8 | (rx_count - retval) << 16;
+	}
+	if (msm_uport->rx.buffer_pending) {
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work,
+				      msecs_to_jiffies(RETRY_TIMEOUT));
+	} else if (msm_uport->rx.flush <= FLUSH_IGNORE) {
+		MSM_HS_WARN("Pending buffers cleared, restarting");
+		clear_bit(msm_uport->rx.rx_inx,
+			&msm_uport->rx.pending_flag);
+		msm_hs_start_rx_locked(&msm_uport->uport);
+		msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+	}
+	spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+	tty_flip_buffer_push(tty->port);
+}
+
+static void msm_serial_hs_rx_work(struct kthread_work *work)
+{
+	int retval;
+	int rx_count = 0;
+	unsigned long status;
+	unsigned long flags;
+	unsigned int error_f = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	unsigned int flush = FLUSH_DATA_INVALID;
+	struct tty_struct *tty;
+	struct sps_event_notify *notify;
+	struct msm_hs_rx *rx;
+	struct sps_pipe *sps_pipe_handle;
+	struct platform_device *pdev;
+	const struct msm_serial_hs_platform_data *pdata;
+
+	msm_uport = container_of((struct kthread_work *) work,
+				 struct msm_hs_port, rx.kwork);
+	msm_hs_resource_vote(msm_uport);
+	uport = &msm_uport->uport;
+	tty = uport->state->port.tty;
+	notify = &msm_uport->notify;
+	rx = &msm_uport->rx;
+	pdev = to_platform_device(uport->dev);
+	pdata = pdev->dev.platform_data;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (!tty || rx->flush == FLUSH_SHUTDOWN) {
+		dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
+				__func__, rx->flush);
+		MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+				__func__, rx->flush);
+		spin_unlock_irqrestore(&uport->lock, flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	/*
+	 * Process all pending descs or if nothing is
+	 * queued - called from termios
+	 */
+	while (!rx->buffer_pending &&
+		(rx->pending_flag || !rx->queued_flag)) {
+		MSM_HS_DBG("%s(): Loop P 0x%lx Q 0x%lx", __func__,
+			rx->pending_flag, rx->queued_flag);
+
+		status = msm_hs_read(uport, UART_DM_SR);
+
+		MSM_HS_DBG("In %s\n", __func__);
+
+		/* overflow is not connect to data in a FIFO */
+		if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
+			     (uport->read_status_mask & CREAD))) {
+			retval = tty_insert_flip_char(tty->port,
+							0, TTY_OVERRUN);
+			MSM_HS_WARN("%s(): RX Buffer Overrun Detected\n",
+				__func__);
+			if (!retval)
+				msm_uport->rx.buffer_pending |= TTY_OVERRUN;
+			uport->icount.buf_overrun++;
+			error_f = 1;
+		}
+
+		if (!(uport->ignore_status_mask & INPCK))
+			status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
+
+		if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
+			/* Can not tell diff between parity & frame error */
+			MSM_HS_WARN("msm_serial_hs: parity error\n");
+			uport->icount.parity++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNPAR)) {
+				retval = tty_insert_flip_char(tty->port,
+							0, TTY_PARITY);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_PARITY;
+			}
+		}
+
+		if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
+			MSM_HS_DBG("msm_serial_hs: Rx break\n");
+			uport->icount.brk++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNBRK)) {
+				retval = tty_insert_flip_char(tty->port,
+								0, TTY_BREAK);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_BREAK;
+			}
+		}
+
+		if (error_f)
+			msm_hs_write(uport, UART_DM_CR,	RESET_ERROR_STATUS);
+		flush = msm_uport->rx.flush;
+		if (flush == FLUSH_IGNORE)
+			if (!msm_uport->rx.buffer_pending) {
+				MSM_HS_DBG("%s: calling start_rx_locked\n",
+					__func__);
+				msm_hs_start_rx_locked(uport);
+			}
+		if (flush >= FLUSH_DATA_INVALID)
+			goto out;
+
+		rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
+		hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
+			(msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
+			msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
+			rx_count);
+
+		 /*
+		  * We are in a spin locked context, spin lock taken at
+		  * other places where these flags are updated
+		  */
+		if (0 != (uport->read_status_mask & CREAD)) {
+			if (!test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag) &&
+			    !test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.queued_flag))
+				MSM_HS_ERR("%s: RX INX not set", __func__);
+			else if (test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.pending_flag) &&
+				!test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.queued_flag)) {
+				MSM_HS_DBG("%s(): Clear Pending Bit %d",
+					__func__, msm_uport->rx.rx_inx);
+
+				retval = tty_insert_flip_string(tty->port,
+					msm_uport->rx.buffer +
+					(msm_uport->rx.rx_inx *
+					UARTDM_RX_BUF_SIZE),
+					rx_count);
+
+				if (retval != rx_count) {
+					MSM_HS_INFO("%s(): ret %d rx_count %d",
+						__func__, retval, rx_count);
+					msm_uport->rx.buffer_pending |=
+					CHARS_NORMAL | retval << 5 |
+					(rx_count - retval) << 16;
+				}
+			} else
+				MSM_HS_ERR("%s: Error in inx %d", __func__,
+					msm_uport->rx.rx_inx);
+		}
+
+		if (!msm_uport->rx.buffer_pending) {
+			msm_uport->rx.flush = FLUSH_NONE;
+			msm_uport->rx_bam_inprogress = true;
+			sps_pipe_handle = rx->prod.pipe_handle;
+			MSM_HS_DBG("Queing bam descriptor\n");
+			/* Queue transfer request to SPS */
+			clear_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag);
+			msm_hs_queue_rx_desc(msm_uport);
+			msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+			msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+			msm_uport->rx_bam_inprogress = false;
+			wake_up(&msm_uport->rx.wait);
+		} else
+			break;
+
+	}
+out:
+	if (msm_uport->rx.buffer_pending) {
+		MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work
+				      , msecs_to_jiffies(RETRY_TIMEOUT));
+	}
+	/* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
+	spin_unlock_irqrestore(&uport->lock, flags);
+	if (flush < FLUSH_DATA_INVALID)
+		tty_flip_buffer_push(tty->port);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_start_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	/* Bail if transfer in progress */
+	if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
+		MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
+			__func__, tx->flush, tx->dma_in_flight);
+		return;
+	}
+
+	if (!tx->dma_in_flight) {
+		tx->dma_in_flight = true;
+		kthread_queue_work(&msm_uport->tx.kworker,
+			&msm_uport->tx.kwork);
+	}
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
+{
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	del_timer(&msm_uport->tx.tx_timeout_timer);
+	MSM_HS_DBG("%s(): Queue kthread work", __func__);
+	kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
+}
+
+static void msm_serial_hs_tx_work(struct kthread_work *work)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport =
+			container_of((struct kthread_work *)work,
+			struct msm_hs_port, tx.kwork);
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	/*
+	 * Do the work buffer related work in BAM
+	 * mode that is equivalent to legacy mode
+	 */
+	msm_hs_resource_vote(msm_uport);
+	if (tx->flush >= FLUSH_STOP) {
+		spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+		tx->flush = FLUSH_NONE;
+		MSM_HS_DBG("%s(): calling submit_tx", __func__);
+		msm_hs_submit_tx_locked(uport);
+		spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	if (!uart_circ_empty(tx_buf))
+		tx_buf->tail = (tx_buf->tail +
+		tx->tx_count) & ~UART_XMIT_SIZE;
+	else
+		MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
+
+	wake_up(&msm_uport->tx.wait);
+
+	uport->icount.tx += tx->tx_count;
+
+	/*
+	 * Calling to send next chunk of data
+	 * If the circ buffer is empty, we stop
+	 * If the clock off was requested, the clock
+	 * off sequence is kicked off
+	 */
+	 MSM_HS_DBG("%s(): calling submit_tx", __func__);
+	 msm_hs_submit_tx_locked(uport);
+
+	if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+		uart_write_wakeup(uport);
+
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void
+msm_hs_mark_proc_rx_desc(struct msm_hs_port *msm_uport,
+			struct sps_event_notify *notify)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	set_bit(inx, &rx->pending_flag);
+	clear_bit(inx, &rx->queued_flag);
+	rx->iovec[inx] = notify->data.transfer.iovec;
+	MSM_HS_DBG("Clear Q, Set P Bit %d, Q 0x%lx P 0x%lx",
+		inx, rx->queued_flag, rx->pending_flag);
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
+{
+
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	struct uart_port *uport;
+	unsigned long flags;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	uport = &(msm_uport->uport);
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_mark_proc_rx_desc(msm_uport, notify);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (msm_uport->rx.flush == FLUSH_NONE) {
+		/* Test if others are queued */
+		if (msm_uport->rx.pending_flag & ~(1 << inx)) {
+			MSM_HS_DBG("%s(): inx 0x%x, 0x%lx not processed",
+			__func__, inx,
+			msm_uport->rx.pending_flag & ~(1<<inx));
+		}
+		kthread_queue_work(&msm_uport->rx.kworker,
+				&msm_uport->rx.kwork);
+		MSM_HS_DBG("%s(): Scheduled rx_tlet", __func__);
+	}
+}
+
+/*
+ *  Standard API, Current states of modem control inputs
+ *
+ * Since CTS can be handled entirely by HARDWARE we always
+ * indicate clear to send and count on the TX FIFO to block when
+ * it fills up.
+ *
+ * - TIOCM_DCD
+ * - TIOCM_CTS
+ * - TIOCM_DSR
+ * - TIOCM_RI
+ *  (Unsupported) DCD and DSR will return them high. RI will return low.
+ */
+static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
+{
+	return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+/*
+ *  Standard API, Set or clear RFR_signal
+ *
+ * Set RFR high, (Indicate we are not ready for data), we disable auto
+ * ready for receiving and then set RFR_N high. To set RFR to low we just turn
+ * back auto ready for receiving and it should lower RFR signal
+ * when hardware is ready
+ */
+void msm_hs_set_mctrl_locked(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned int set_rts;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	/* RTS is active low */
+	set_rts = TIOCM_RTS & mctrl ? 0 : 1;
+	MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
+
+	if (set_rts)
+		msm_hs_disable_flow_control(uport, false);
+	else
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_set_mctrl_locked(uport, mctrl);
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+EXPORT_SYMBOL(msm_hs_set_mctrl);
+
+/* Standard API, Enable modem status (CTS) interrupt  */
+static void msm_hs_enable_ms_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+
+	/* Enable DELTA_CTS Interrupt */
+	msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Ensure register IO completion */
+	mb();
+
+}
+
+/*
+ *  Standard API, Break Signal
+ *
+ * Control the transmission of a break signal. ctl eq 0 => break
+ * signal terminate ctl ne 0 => start break signal
+ */
+static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
+	/* Ensure register IO completion */
+	mb();
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
+{
+	if (cfg_flags & UART_CONFIG_TYPE)
+		uport->type = PORT_MSM;
+
+}
+
+/*  Handle CTS changes (Called from interrupt handler) */
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	/* clear interrupt */
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+	uport->icount.cts++;
+
+	/* clear the IOCTL TIOCMIWAIT if called */
+	wake_up_interruptible(&uport->state->port.delta_msr_wait);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static irqreturn_t msm_hs_isr(int irq, void *dev)
+{
+	unsigned long flags;
+	unsigned int isr_status;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	isr_status = msm_hs_read(uport, UART_DM_MISR);
+	MSM_HS_INFO("%s: DM_ISR: 0x%x\n", __func__, isr_status);
+	dump_uart_hs_registers(msm_uport);
+
+	/* Uart RX starting */
+	if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
+		MSM_HS_DBG("%s:UARTDM_ISR_RXLEV_BMSK\n", __func__);
+		msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		/* Complete device write for IMR. Hence mb() requires. */
+		mb();
+	}
+	/* Stale rx interrupt */
+	if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
+		msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
+		msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+		/*
+		 * Complete device write before calling DMOV API. Hence
+		 * mb() requires here.
+		 */
+		mb();
+		MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
+	}
+	/* tx ready interrupt */
+	if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
+		MSM_HS_DBG("%s: ISR_TX_READY Interrupt\n", __func__);
+		/* Clear  TX Ready */
+		msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
+
+		/*
+		 * Complete both writes before starting new TX.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		/* Complete DMA TX transactions and submit new transactions */
+
+		/* Do not update tx_buf.tail if uart_flush_buffer already
+		 * called in serial core
+		 */
+		if (!uart_circ_empty(tx_buf))
+			tx_buf->tail = (tx_buf->tail +
+					tx->tx_count) & ~UART_XMIT_SIZE;
+
+		tx->dma_in_flight = false;
+
+		uport->icount.tx += tx->tx_count;
+
+		if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+			uart_write_wakeup(uport);
+	}
+	if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
+		/* TX FIFO is empty */
+		msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		MSM_HS_DBG("%s: TXLEV Interrupt\n", __func__);
+		/*
+		 * Complete device write before starting clock_off request.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
+	}
+
+	/* Change in CTS interrupt */
+	if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
+		msm_hs_handle_delta_cts_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/* The following two functions provide interfaces to get the underlying
+ * port structure (struct uart_port or struct msm_hs_port) given
+ * the port index. msm_hs_get_uart port is called by clients.
+ * The function msm_hs_get_hs_port is for internal use
+ */
+
+struct uart_port *msm_hs_get_uart_port(int port_index)
+{
+	struct uart_state *state = msm_hs_driver.state + port_index;
+
+	/* The uart_driver structure stores the states in an array.
+	 * Thus the corresponding offset from the drv->state returns
+	 * the state for the uart_port that is requested
+	 */
+	if (port_index == state->uart_port->line)
+		return state->uart_port;
+
+	return NULL;
+}
+EXPORT_SYMBOL(msm_hs_get_uart_port);
+
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
+{
+	struct uart_port *uport = msm_hs_get_uart_port(port_index);
+
+	if (uport)
+		return UARTDM_TO_MSM(uport);
+	return NULL;
+}
+
+void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (!(msm_uport->wakeup.enabled)) {
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.ignore = 1;
+		msm_uport->wakeup.enabled = true;
+		spin_unlock_irqrestore(&uport->lock, flags);
+		disable_irq(uport->irq);
+		enable_irq(msm_uport->wakeup.irq);
+	} else {
+		MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
+	}
+}
+
+void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (msm_uport->wakeup.enabled) {
+		disable_irq_nosync(msm_uport->wakeup.irq);
+		enable_irq(uport->irq);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.enabled = false;
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
+	}
+}
+
+void msm_hs_resource_off(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+
+	MSM_HS_DBG("%s(): begin", __func__);
+	msm_hs_disable_flow_control(uport, false);
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_hs_disconnect_rx(uport);
+
+	/* disable dlink */
+	if (msm_uport->tx.flush == FLUSH_NONE)
+		wait_event_timeout(msm_uport->tx.wait,
+			msm_uport->tx.flush == FLUSH_STOP, 500);
+
+	if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+		sps_tx_disconnect(msm_uport);
+	}
+	if (!atomic_read(&msm_uport->client_req_state))
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_resource_on(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+	unsigned long flags;
+
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
+	msm_uport->rx.flush == FLUSH_STOP) {
+		msm_hs_write(uport, UART_DM_CR, RESET_RX);
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data |= UARTDM_RX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+	}
+
+	msm_hs_spsconnect_tx(msm_uport);
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		msm_hs_spsconnect_rx(uport);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_hs_start_rx_locked(uport);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+}
+
+/* Request to turn off uart clock once pending TX is flushed */
+int msm_hs_request_clock_off(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret = 0;
+	int client_count = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s:Can't process clk request during suspend",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_off;
+
+	if (atomic_read(&msm_uport->client_count) <= 0) {
+		MSM_HS_WARN("%s(): ioctl count -ve, client check voting",
+			__func__);
+		ret = -EPERM;
+		goto exit_request_clock_off;
+	}
+	/* Set the flag to disable flow control and wakeup irq */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 1);
+	msm_hs_resource_unvote(msm_uport);
+	atomic_dec(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count %d\n", __func__,
+			client_count);
+exit_request_clock_off:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_off);
+
+int msm_hs_request_clock_on(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int client_count;
+	int ret = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s:Can't process clk request during suspend",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_on;
+
+	msm_hs_resource_vote(UARTDM_TO_MSM(uport));
+	atomic_inc(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count %d\n", __func__,
+			client_count);
+
+	/* Clear the flag */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 0);
+exit_request_clock_on:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_on);
+
+static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
+{
+	unsigned int wakeup = 0;
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct tty_struct *tty = NULL;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (msm_uport->wakeup.ignore)
+		msm_uport->wakeup.ignore = 0;
+	else
+		wakeup = 1;
+
+	if (wakeup) {
+		/*
+		 * Port was clocked off during rx, wake up and
+		 * optionally inject char into tty rx
+		 */
+		if (msm_uport->wakeup.inject_rx) {
+			tty = uport->state->port.tty;
+			tty_insert_flip_char(tty->port,
+					     msm_uport->wakeup.rx_to_inject,
+					     TTY_NORMAL);
+			hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
+				"Rx Inject",
+				&msm_uport->wakeup.rx_to_inject, 0, 1);
+			MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
+						msm_uport->wakeup.ignore);
+		}
+	}
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (wakeup && msm_uport->wakeup.inject_rx)
+		tty_flip_buffer_push(tty->port);
+	return IRQ_HANDLED;
+}
+
+static const char *msm_hs_type(struct uart_port *port)
+{
+	return "MSM HS UART";
+}
+
+/**
+ * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
+ * @uport: uart port
+ */
+static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret;
+
+	if (msm_uport->use_pinctrl) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Failed to pinctrl set_state",
+				__func__);
+	} else if (pdata) {
+		if (gpio_is_valid(pdata->uart_tx_gpio))
+			gpio_free(pdata->uart_tx_gpio);
+		if (gpio_is_valid(pdata->uart_rx_gpio))
+			gpio_free(pdata->uart_rx_gpio);
+		if (gpio_is_valid(pdata->uart_cts_gpio))
+			gpio_free(pdata->uart_cts_gpio);
+		if (gpio_is_valid(pdata->uart_rfr_gpio))
+			gpio_free(pdata->uart_rfr_gpio);
+	} else
+		MSM_HS_ERR("Error:Pdata is NULL.\n");
+}
+
+/**
+ * msm_hs_config_uart_gpios - Configures UART GPIOs
+ * @uport: uart port
+ */
+static int msm_hs_config_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Using Pinctrl", __func__);
+		msm_uport->use_pinctrl = true;
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed to pinctrl set_state",
+				__func__);
+		return ret;
+	} else if (pdata) {
+		/* Fall back to using gpio lib */
+		if (gpio_is_valid(pdata->uart_tx_gpio)) {
+			ret = gpio_request(pdata->uart_tx_gpio,
+							"UART_TX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_tx_gpio);
+				goto exit_uart_config;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rx_gpio)) {
+			ret = gpio_request(pdata->uart_rx_gpio,
+							"UART_RX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rx_gpio);
+				goto uart_tx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_cts_gpio)) {
+			ret = gpio_request(pdata->uart_cts_gpio,
+							"UART_CTS_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_cts_gpio);
+				goto uart_rx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rfr_gpio)) {
+			ret = gpio_request(pdata->uart_rfr_gpio,
+							"UART_RFR_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rfr_gpio);
+				goto uart_cts_unconfig;
+			}
+		}
+	} else {
+		MSM_HS_ERR("Pdata is NULL.\n");
+		ret = -EINVAL;
+	}
+	return ret;
+
+uart_cts_unconfig:
+	if (gpio_is_valid(pdata->uart_cts_gpio))
+		gpio_free(pdata->uart_cts_gpio);
+uart_rx_unconfig:
+	if (gpio_is_valid(pdata->uart_rx_gpio))
+		gpio_free(pdata->uart_rx_gpio);
+uart_tx_unconfig:
+	if (gpio_is_valid(pdata->uart_tx_gpio))
+		gpio_free(pdata->uart_tx_gpio);
+exit_uart_config:
+	return ret;
+}
+
+
+static void msm_hs_get_pinctrl_configs(struct uart_port *uport)
+{
+	struct pinctrl_state *set_state;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_uport->pinctrl = devm_pinctrl_get(uport->dev);
+	if (IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Pinctrl not defined", __func__);
+	} else {
+		MSM_HS_DBG("%s(): Using Pinctrl", __func__);
+		msm_uport->use_pinctrl = true;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_DEFAULT);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for default state");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state active %p\n", __func__,
+			set_state);
+		msm_uport->gpio_state_active = set_state;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_SLEEP);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for sleep state");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state sleep %p\n", __func__,
+			set_state);
+		msm_uport->gpio_state_suspend = set_state;
+		return;
+	}
+pinctrl_fail:
+	msm_uport->pinctrl = NULL;
+}
+
+/* Called when port is opened */
+static int msm_hs_startup(struct uart_port *uport)
+{
+	int ret;
+	int rfr_level;
+	unsigned long flags;
+	unsigned int data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
+	struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
+
+	rfr_level = uport->fifosize;
+	if (rfr_level > 16)
+		rfr_level -= 16;
+
+	tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
+				      DMA_TO_DEVICE);
+
+	/* turn on uart clk */
+	msm_hs_resource_vote(msm_uport);
+
+	if (is_use_low_power_wakeup(msm_uport)) {
+		ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
+					msm_hs_wakeup_isr,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"msm_hs_wakeup", msm_uport);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
+				  __func__, ret);
+			goto unvote_exit;
+		}
+
+		msm_uport->wakeup.freed = false;
+		disable_irq(msm_uport->wakeup.irq);
+		msm_uport->wakeup.enabled = false;
+
+		ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
+			goto free_uart_irq;
+		}
+	}
+
+	ret = msm_hs_config_uart_gpios(uport);
+	if (ret) {
+		MSM_HS_ERR("Uart GPIO request failed\n");
+		goto free_uart_irq;
+	}
+
+	msm_hs_write(uport, UART_DM_DMEN, 0);
+
+	/* Connect TX */
+	sps_tx_disconnect(msm_uport);
+	ret = msm_hs_spsconnect_tx(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX");
+		goto unconfig_uart_gpios;
+	}
+
+	/* Connect RX */
+	kthread_flush_worker(&msm_uport->rx.kworker);
+	if (rx->flush != FLUSH_SHUTDOWN)
+		disconnect_rx_endpoint(msm_uport);
+	ret = msm_hs_spsconnect_rx(uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
+		goto sps_disconnect_tx;
+	}
+
+	data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
+		UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
+		UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
+	msm_hs_write(uport, UART_DM_BCR, data);
+
+	/* Set auto RFR Level */
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
+	msm_hs_write(uport, UART_DM_MR1, data);
+
+	/* Make sure RXSTALE count is non-zero */
+	data = msm_hs_read(uport, UART_DM_IPR);
+	if (!data) {
+		data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
+		msm_hs_write(uport, UART_DM_IPR, data);
+	}
+
+	/* Assume no flow control, unless termios sets it */
+	msm_uport->flow_control = false;
+	msm_hs_disable_flow_control(uport, true);
+
+
+	/* Reset TX */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
+	msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	/* Turn on Uart Receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+
+	/* Turn on Uart Transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	tx->dma_in_flight = false;
+	MSM_HS_DBG("%s():desc usage flag 0x%lx", __func__, rx->queued_flag);
+	setup_timer(&(tx->tx_timeout_timer),
+			tx_timeout_handler,
+			(unsigned long) msm_uport);
+
+	/* Enable reading the current CTS, no harm even if CTS is ignored */
+	msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
+
+	/* TXLEV on empty TX fifo */
+	msm_hs_write(uport, UART_DM_TFWR, 4);
+	/*
+	 * Complete all device write related configuration before
+	 * queuing RX request. Hence mb() requires here.
+	 */
+	mb();
+
+	ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
+			  "msm_hs_uart", msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("%s():Error %d getting uart irq\n", __func__, ret);
+		goto sps_disconnect_rx;
+	}
+
+
+	spin_lock_irqsave(&uport->lock, flags);
+	atomic_set(&msm_uport->client_count, 0);
+	atomic_set(&msm_uport->client_req_state, 0);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count 0\n", __func__);
+	msm_hs_start_rx_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+
+sps_disconnect_rx:
+	sps_disconnect(sps_pipe_handle_rx);
+sps_disconnect_tx:
+	sps_disconnect(sps_pipe_handle_tx);
+unconfig_uart_gpios:
+	msm_hs_unconfig_uart_gpios(uport);
+free_uart_irq:
+	free_irq(uport->irq, msm_uport);
+unvote_exit:
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_ERR("%s(): Error return\n", __func__);
+	return ret;
+}
+
+/* Initialize tx and rx data structures */
+static int uartdm_init_port(struct uart_port *uport)
+{
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+
+	init_waitqueue_head(&rx->wait);
+	init_waitqueue_head(&tx->wait);
+	init_waitqueue_head(&msm_uport->bam_disconnect_wait);
+
+	/* Init kernel threads for tx and rx */
+
+	kthread_init_worker(&rx->kworker);
+	rx->task = kthread_run(kthread_worker_fn,
+			&rx->kworker, "msm_serial_hs_%d_rx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task", __func__);
+		goto exit_lh_init;
+	}
+	kthread_init_work(&rx->kwork, msm_serial_hs_rx_work);
+
+	kthread_init_worker(&tx->kworker);
+	tx->task = kthread_run(kthread_worker_fn,
+			&tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task", __func__);
+		goto exit_lh_init;
+	}
+
+	kthread_init_work(&tx->kwork, msm_serial_hs_tx_work);
+
+	rx->buffer = dma_alloc_coherent(uport->dev,
+				UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+				 &rx->rbuffer, GFP_KERNEL);
+	if (!rx->buffer) {
+		MSM_HS_ERR("%s(): cannot allocate rx->buffer", __func__);
+		ret = -ENOMEM;
+		goto exit_lh_init;
+	}
+
+	/* Set up Uart Receive */
+	msm_hs_write(uport, UART_DM_RFWR, 32);
+	/* Write to BADR explicitly to set up FIFO sizes */
+	msm_hs_write(uport, UARTDM_BADR_ADDR, 64);
+
+	INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
+
+	return ret;
+exit_lh_init:
+	kthread_stop(rx->task);
+	rx->task = NULL;
+	kthread_stop(tx->task);
+	tx->task = NULL;
+	return ret;
+}
+
+struct msm_serial_hs_platform_data
+	*msm_hs_dt_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct msm_serial_hs_platform_data *pdata;
+	u32 rx_to_inject;
+	int ret;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+	/* UART TX GPIO */
+	pdata->uart_tx_gpio = of_get_named_gpio(node,
+					"qcom,tx-gpio", 0);
+	if (pdata->uart_tx_gpio < 0)
+		pr_err("uart_tx_gpio is not available\n");
+
+	/* UART RX GPIO */
+	pdata->uart_rx_gpio = of_get_named_gpio(node,
+					"qcom,rx-gpio", 0);
+	if (pdata->uart_rx_gpio < 0)
+		pr_err("uart_rx_gpio is not available\n");
+
+	/* UART CTS GPIO */
+	pdata->uart_cts_gpio = of_get_named_gpio(node,
+					"qcom,cts-gpio", 0);
+	if (pdata->uart_cts_gpio < 0)
+		pr_err("uart_cts_gpio is not available\n");
+
+	/* UART RFR GPIO */
+	pdata->uart_rfr_gpio = of_get_named_gpio(node,
+					"qcom,rfr-gpio", 0);
+	if (pdata->uart_rfr_gpio < 0)
+		pr_err("uart_rfr_gpio is not available\n");
+
+	pdata->no_suspend_delay = of_property_read_bool(node,
+				"qcom,no-suspend-delay");
+
+	pdata->obs = of_property_read_bool(node,
+				"qcom,msm-obs");
+	if (pdata->obs)
+		pr_err("%s:Out of Band sleep flag is set\n", __func__);
+
+	pdata->inject_rx_on_wakeup = of_property_read_bool(node,
+				"qcom,inject-rx-on-wakeup");
+
+	if (pdata->inject_rx_on_wakeup) {
+		ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
+						&rx_to_inject);
+		if (ret < 0) {
+			pr_err("Error: Rx_char_to_inject not specified.\n");
+			return ERR_PTR(ret);
+		}
+		pdata->rx_to_inject = (u8)rx_to_inject;
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
+				&pdata->bam_tx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
+					&pdata->bam_rx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
+		"tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
+		pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
+		pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
+		pdata->uart_rfr_gpio);
+
+	return pdata;
+}
+
+
+/**
+ * Deallocate UART peripheral's SPS endpoint
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ */
+
+static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+
+	dma_free_coherent(msm_uport->uport.dev,
+			sps_config->desc.size,
+			&sps_config->desc.phys_base,
+			GFP_KERNEL);
+	sps_free_endpoint(sps_pipe_handle);
+}
+
+
+/**
+ * Allocate UART peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context
+ * by calling appropriate SPS driver APIs.
+ *
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *             - 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_handle = sps_alloc_endpoint();
+	if (!sps_pipe_handle) {
+		MSM_HS_ERR("%s(): sps_alloc_endpoint() failed!!\n"
+			"is_producer=%d", __func__, is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_handle, sps_config);
+	if (rc) {
+		MSM_HS_ERR("%s(): failed! pipe_handle=0x%p rc=%d",
+			__func__, sps_pipe_handle, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/* For UART producer transfer, source is UART peripheral
+		 * where as destination is system memory
+		 */
+		sps_config->source = msm_uport->bam_handle;
+		sps_config->destination = SPS_DEV_HANDLE_MEM;
+		sps_config->mode = SPS_MODE_SRC;
+		sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
+		sps_config->dest_pipe_index = 0;
+		sps_event->callback = msm_hs_sps_rx_callback;
+	} else {
+		/* For UART consumer transfer, source is system memory
+		 * where as destination is UART peripheral
+		 */
+		sps_config->source = SPS_DEV_HANDLE_MEM;
+		sps_config->destination = msm_uport->bam_handle;
+		sps_config->mode = SPS_MODE_DEST;
+		sps_config->src_pipe_index = 0;
+		sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
+		sps_event->callback = msm_hs_sps_tx_callback;
+	}
+
+	sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
+	sps_config->event_thresh = 0x10;
+
+	/* Allocate maximum descriptor fifo size */
+	sps_config->desc.size =
+		(1 + UART_DMA_DESC_NR) * sizeof(struct sps_iovec);
+	sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
+						sps_config->desc.size,
+						&sps_config->desc.phys_base,
+						GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		rc = -ENOMEM;
+		MSM_HS_ERR("msm_serial_hs: dma_alloc_coherent() failed!!\n");
+		goto get_config_err;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+
+	sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
+	sps_event->user = (void *)msm_uport;
+
+	/* Now save the sps pipe handle */
+	ep->pipe_handle = sps_pipe_handle;
+	MSM_HS_DBG("msm_serial_hs: success !! %s: pipe_handle=0x%p\n"
+		"desc_fifo.phys_base=0x%pa\n",
+		is_producer ? "READ" : "WRITE",
+		sps_pipe_handle, &sps_config->desc.phys_base);
+	return 0;
+
+get_config_err:
+	sps_free_endpoint(sps_pipe_handle);
+out:
+	return rc;
+}
+
+/**
+ * Initialize SPS HW connected with UART core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * msm_uport - Pointer to msm_hs_port structure
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	unsigned long bam_handle;
+
+	rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
+	if (rc || !bam_handle) {
+		bam.phys_addr = msm_uport->bam_mem;
+		bam.virt_addr = msm_uport->bam_base;
+		/*
+		 * This event thresold value is only significant for BAM-to-BAM
+		 * transfer. It's ignored for BAM-to-System mode transfer.
+		 */
+		bam.event_threshold = 0x10;	/* Pipe event threshold */
+		bam.summing_threshold = 1;	/* BAM event threshold */
+
+		/* SPS driver wll handle the UART BAM IRQ */
+		bam.irq = (u32)msm_uport->bam_irq;
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+
+		MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
+							&bam.phys_addr);
+		MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%p\n",
+							bam.virt_addr);
+
+		/* Register UART Peripheral BAM device to SPS driver */
+		rc = sps_register_bam_device(&bam, &bam_handle);
+		if (rc) {
+			MSM_HS_ERR("%s: BAM device register failed\n",
+				  __func__);
+			return rc;
+		}
+		MSM_HS_DBG("%s:BAM device registered. bam_handle=0x%lx",
+			   __func__, msm_uport->bam_handle);
+	}
+	msm_uport->bam_handle = bam_handle;
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
+				UART_SPS_PROD_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s: Failed to Init Producer BAM-pipe", __func__);
+		goto deregister_bam;
+	}
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
+				UART_SPS_CONS_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s: Failed to Init Consumer BAM-pipe", __func__);
+		goto deinit_ep_conn_prod;
+	}
+	return 0;
+
+deinit_ep_conn_prod:
+	msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
+deregister_bam:
+	sps_deregister_bam_device(msm_uport->bam_handle);
+	return rc;
+}
+
+
+static bool deviceid[UARTDM_NR] = {0};
+/*
+ * The mutex synchronizes grabbing next free device number
+ * both in case of an alias being used or not. When alias is
+ * used, the msm_hs_dt_to_pdata gets it and the boolean array
+ * is accordingly updated with device_id_set_used. If no alias
+ * is used, then device_id_grab_next_free sets that array.
+ */
+static DEFINE_MUTEX(mutex_next_device_id);
+
+static int device_id_grab_next_free(void)
+{
+	int i;
+	int ret = -ENODEV;
+
+	mutex_lock(&mutex_next_device_id);
+	for (i = 0; i < UARTDM_NR; i++)
+		if (!deviceid[i]) {
+			ret = i;
+			deviceid[i] = true;
+			break;
+		}
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static int device_id_set_used(int index)
+{
+	int ret = 0;
+
+	mutex_lock(&mutex_next_device_id);
+	if (deviceid[index])
+		ret = -ENODEV;
+	else
+		deviceid[index] = true;
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static void obs_manage_irq(struct msm_hs_port *msm_uport, bool en)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->obs) {
+		if (en)
+			enable_irq(uport->irq);
+		else
+			disable_irq(uport->irq);
+	}
+}
+
+static void msm_hs_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret;
+	int client_count = 0;
+
+	if (!msm_uport)
+		goto err_suspend;
+	mutex_lock(&msm_uport->mtx);
+
+	client_count = atomic_read(&msm_uport->client_count);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	msm_hs_resource_off(msm_uport);
+	obs_manage_irq(msm_uport, false);
+	msm_hs_clk_bus_unvote(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+			msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
+				__func__);
+	}
+
+	if (!atomic_read(&msm_uport->client_req_state))
+		enable_wakeup_interrupt(msm_uport);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s: PM State Suspended client_count %d\n", __func__,
+								client_count);
+	mutex_unlock(&msm_uport->mtx);
+	return;
+err_suspend:
+	pr_err("%s(): invalid uport", __func__);
+}
+
+static int msm_hs_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret = 0;
+	int client_count = 0;
+
+	if (!msm_uport) {
+		dev_err(dev, "%s:Invalid uport\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&msm_uport->mtx);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
+		goto exit_pm_resume;
+	if (!atomic_read(&msm_uport->client_req_state))
+		disable_wakeup_interrupt(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to active state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s():Error selecting active state",
+				 __func__);
+	}
+
+	ret = msm_hs_clk_bus_vote(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
+		dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
+		goto exit_pm_resume;
+	}
+	obs_manage_irq(msm_uport, true);
+	msm_uport->pm_state = MSM_HS_PM_ACTIVE;
+	msm_hs_resource_on(msm_uport);
+
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State:Active client_count %d\n", __func__, client_count);
+exit_pm_resume:
+	mutex_unlock(&msm_uport->mtx);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	enum msm_hs_pm_state prev_pwr_state;
+	int clk_cnt, client_count, ret = 0;
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+
+	mutex_lock(&msm_uport->mtx);
+
+	/*
+	 * If there is an active clk request or an impending userspace request
+	 * fail the suspend callback.
+	 */
+	clk_cnt = atomic_read(&msm_uport->resource_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
+				 __func__, clk_cnt, client_count);
+		ret = -EBUSY;
+		goto exit_suspend_noirq;
+	}
+
+	prev_pwr_state = msm_uport->pm_state;
+	msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State:Sys-Suspended client_count %d\n", __func__,
+								client_count);
+exit_suspend_noirq:
+	mutex_unlock(&msm_uport->mtx);
+	return ret;
+};
+
+static int msm_hs_pm_sys_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+	/*
+	 * Note system-pm resume and update the state
+	 * variable. Resource activation will be done
+	 * when transfer is requested.
+	 */
+
+	mutex_lock(&msm_uport->mtx);
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
+		msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State: Suspended\n", __func__);
+	mutex_unlock(&msm_uport->mtx);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static void  msm_serial_hs_rt_init(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	MSM_HS_INFO("%s(): Enabling runtime pm", __func__);
+	pm_runtime_set_suspended(uport->dev);
+	pm_runtime_set_autosuspend_delay(uport->dev, 100);
+	pm_runtime_use_autosuspend(uport->dev);
+	mutex_lock(&msm_uport->mtx);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	mutex_unlock(&msm_uport->mtx);
+	pm_runtime_enable(uport->dev);
+}
+
+static int msm_hs_runtime_suspend(struct device *dev)
+{
+	msm_hs_pm_suspend(dev);
+	return 0;
+}
+
+static int msm_hs_runtime_resume(struct device *dev)
+{
+	return msm_hs_pm_resume(dev);
+}
+#else
+static void  msm_serial_hs_rt_init(struct uart_port *uport) {}
+static int msm_hs_runtime_suspend(struct device *dev) {}
+static int msm_hs_runtime_resume(struct device *dev) {}
+#endif
+
+
+static int msm_hs_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	struct resource *core_resource;
+	struct resource *bam_resource;
+	int core_irqres, bam_irqres, wakeup_irqres;
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	unsigned long data;
+	char name[30];
+
+	if (pdev->dev.of_node) {
+		dev_dbg(&pdev->dev, "device tree enabled\n");
+		pdata = msm_hs_dt_to_pdata(pdev);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+
+		if (pdev->id < 0) {
+			pdev->id = device_id_grab_next_free();
+			if (pdev->id < 0) {
+				dev_err(&pdev->dev,
+					"Error grabbing next free device id");
+				return pdev->id;
+			}
+		} else {
+			ret = device_id_set_used(pdev->id);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "%d alias taken",
+					pdev->id);
+				return ret;
+			}
+		}
+		pdev->dev.platform_data = pdata;
+	}
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
+								pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
+			GFP_KERNEL);
+	if (!msm_uport)
+		return -ENOMEM;
+
+	msm_uport->uport.type = PORT_UNKNOWN;
+	uport = &msm_uport->uport;
+	uport->dev = &pdev->dev;
+
+	if (pdev->dev.of_node)
+		msm_uport->uart_type = BLSP_HSUART;
+
+	msm_hs_get_pinctrl_configs(uport);
+	/* Get required resources for BAM HSUART */
+	core_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "core_mem");
+	if (!core_resource) {
+		dev_err(&pdev->dev, "Invalid core HSUART Resources.\n");
+		return -ENXIO;
+	}
+	bam_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "bam_mem");
+	if (!bam_resource) {
+		dev_err(&pdev->dev, "Invalid BAM HSUART Resources.\n");
+		return -ENXIO;
+	}
+	core_irqres = platform_get_irq_byname(pdev, "core_irq");
+	if (core_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid core irq resources.\n",
+			core_irqres);
+		return -ENXIO;
+	}
+	bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
+	if (bam_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid bam irq resources.\n",
+			bam_irqres);
+		return -ENXIO;
+	}
+	wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
+	if (wakeup_irqres < 0) {
+		wakeup_irqres = -1;
+		pr_info("Wakeup irq not specified.\n");
+	}
+
+	uport->mapbase = core_resource->start;
+
+	uport->membase = ioremap(uport->mapbase,
+				resource_size(core_resource));
+	if (unlikely(!uport->membase)) {
+		dev_err(&pdev->dev, "UART Resource ioremap Failed.\n");
+		return -ENOMEM;
+	}
+	msm_uport->bam_mem = bam_resource->start;
+	msm_uport->bam_base = ioremap(msm_uport->bam_mem,
+				resource_size(bam_resource));
+	if (unlikely(!msm_uport->bam_base)) {
+		dev_err(&pdev->dev, "UART BAM Resource ioremap Failed.\n");
+		iounmap(uport->membase);
+		return -ENOMEM;
+	}
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_state");
+	msm_uport->ipc_msm_hs_log_ctxt =
+			ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
+								name, 0);
+	if (!msm_uport->ipc_msm_hs_log_ctxt) {
+		dev_err(&pdev->dev, "%s: error creating logging context",
+								__func__);
+	} else {
+		msm_uport->ipc_debug_mask = INFO_LEV;
+		ret = sysfs_create_file(&pdev->dev.kobj,
+				&dev_attr_debug_mask.attr);
+		if (unlikely(ret))
+			MSM_HS_WARN("%s: Failed to create dev. attr", __func__);
+	}
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+	pdata->wakeup_irq = wakeup_irqres;
+
+	msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (!msm_uport->bus_scale_table) {
+		MSM_HS_ERR("BLSP UART: Bus scaling is disabled.\n");
+	} else {
+		msm_uport->bus_perf_client =
+			msm_bus_scale_register_client
+				(msm_uport->bus_scale_table);
+		if (IS_ERR(&msm_uport->bus_perf_client)) {
+			MSM_HS_ERR("%s():Bus client register failed\n",
+				   __func__);
+			ret = -EINVAL;
+			goto unmap_memory;
+		}
+	}
+
+	msm_uport->wakeup.irq = pdata->wakeup_irq;
+	msm_uport->wakeup.ignore = 1;
+	msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
+	msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
+	msm_uport->obs = pdata->obs;
+
+	msm_uport->bam_tx_ep_pipe_index =
+			pdata->bam_tx_ep_pipe_index;
+	msm_uport->bam_rx_ep_pipe_index =
+			pdata->bam_rx_ep_pipe_index;
+	msm_uport->wakeup.enabled = true;
+
+	uport->iotype = UPIO_MEM;
+	uport->fifosize = 64;
+	uport->ops = &msm_hs_ops;
+	uport->flags = UPF_BOOT_AUTOCONF;
+	uport->uartclk = 7372800;
+	msm_uport->imr_reg = 0x0;
+
+	msm_uport->clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(msm_uport->clk)) {
+		ret = PTR_ERR(msm_uport->clk);
+		goto deregister_bus_client;
+	}
+
+	msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
+	/*
+	 * Some configurations do not require explicit pclk control so
+	 * do not flag error on pclk get failure.
+	 */
+	if (IS_ERR(msm_uport->pclk))
+		msm_uport->pclk = NULL;
+
+	msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
+					WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!msm_uport->hsuart_wq) {
+		MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
+								__func__);
+		ret =  -ENOMEM;
+		goto put_clk;
+	}
+
+	mutex_init(&msm_uport->mtx);
+
+	/* Initialize SPS HW connected with UART core */
+	ret = msm_hs_sps_init(msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("SPS Initialization failed ! err=%d", ret);
+		goto destroy_mutex;
+	}
+
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_tx");
+	msm_uport->tx.ipc_tx_ctxt =
+		ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->tx.ipc_tx_ctxt)
+		dev_err(&pdev->dev, "%s: error creating tx logging context",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_rx");
+	msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->rx.ipc_rx_ctxt)
+		dev_err(&pdev->dev, "%s: error creating rx logging context",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_pwr");
+	msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_USER_PAGES, name, 0);
+	if (!msm_uport->ipc_msm_hs_pwr_ctxt)
+		dev_err(&pdev->dev, "%s: error creating usr logging context",
+								__func__);
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+
+	clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
+	msm_hs_clk_bus_vote(msm_uport);
+	ret = uartdm_init_port(uport);
+	if (unlikely(ret))
+		goto err_clock;
+
+	/* configure the CR Protection to Enable */
+	msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
+
+	/*
+	 * Enable Command register protection before going ahead as this hw
+	 * configuration makes sure that issued cmd to CR register gets complete
+	 * before next issued cmd start. Hence mb() requires here.
+	 */
+	mb();
+
+	/*
+	 * Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
+	 * so any rx_break and character having parity of framing
+	 * error don't enter inside UART RX FIFO.
+	 */
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
+			UARTDM_MR2_RX_ERROR_CHAR_OFF);
+	msm_hs_write(uport, UART_DM_MR2, data);
+	/* Ensure register IO completion */
+	mb();
+
+	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("Probe Failed as sysfs failed\n");
+		goto err_clock;
+	}
+
+	msm_serial_debugfs_init(msm_uport, pdev->id);
+	msm_hs_unconfig_uart_gpios(uport);
+
+	uport->line = pdev->id;
+	if (pdata->userid && pdata->userid <= UARTDM_NR)
+		uport->line = pdata->userid;
+	ret = uart_add_one_port(&msm_hs_driver, uport);
+	if (!ret) {
+		msm_hs_clk_bus_unvote(msm_uport);
+		msm_serial_hs_rt_init(uport);
+		return ret;
+	}
+
+err_clock:
+	msm_hs_clk_bus_unvote(msm_uport);
+
+destroy_mutex:
+	mutex_destroy(&msm_uport->mtx);
+	destroy_workqueue(msm_uport->hsuart_wq);
+
+put_clk:
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	if (msm_uport->clk)
+		clk_put(msm_uport->clk);
+
+deregister_bus_client:
+	msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
+unmap_memory:
+	iounmap(uport->membase);
+	iounmap(msm_uport->bam_base);
+
+	return ret;
+}
+
+static int __init msm_serial_hs_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&msm_hs_driver);
+	if (unlikely(ret)) {
+		pr_err("%s failed to load\n", __func__);
+		return ret;
+	}
+	debug_base = debugfs_create_dir("msm_serial_hs", NULL);
+	if (IS_ERR_OR_NULL(debug_base))
+		pr_err("msm_serial_hs: Cannot create debugfs dir\n");
+
+	ret = platform_driver_register(&msm_serial_hs_platform_driver);
+	if (ret) {
+		pr_err("%s failed to load\n", __func__);
+		debugfs_remove_recursive(debug_base);
+		uart_unregister_driver(&msm_hs_driver);
+		return ret;
+	}
+
+	pr_info("msm_serial_hs module loaded\n");
+	return ret;
+}
+
+/*
+ *  Called by the upper layer when port is closed.
+ *     - Disables the port
+ *     - Unhook the ISR
+ */
+static void msm_hs_shutdown(struct uart_port *uport)
+{
+	int ret, rc;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	int data;
+	unsigned long flags;
+
+	if (is_use_low_power_wakeup(msm_uport))
+		irq_set_irq_wake(msm_uport->wakeup.irq, 0);
+
+	if (msm_uport->wakeup.enabled)
+		disable_irq(msm_uport->wakeup.irq);
+	else
+		disable_irq(uport->irq);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->wakeup.enabled = false;
+	msm_uport->wakeup.ignore = 1;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	/* Free the interrupt */
+	free_irq(uport->irq, msm_uport);
+	if (is_use_low_power_wakeup(msm_uport)) {
+		free_irq(msm_uport->wakeup.irq, msm_uport);
+		MSM_HS_DBG("%s(): wakeup irq freed", __func__);
+	}
+	msm_uport->wakeup.freed = true;
+
+	/* make sure tx lh finishes */
+	kthread_flush_worker(&msm_uport->tx.kworker);
+	ret = wait_event_timeout(msm_uport->tx.wait,
+			uart_circ_empty(tx_buf), 500);
+	if (!ret)
+		MSM_HS_WARN("Shutdown called when tx buff not empty");
+
+	msm_hs_resource_vote(msm_uport);
+	/* Stop remote side from sending data */
+	msm_hs_disable_flow_control(uport, false);
+	/* make sure rx lh finishes */
+	kthread_flush_worker(&msm_uport->rx.kworker);
+
+	if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
+		/* disable and disconnect rx */
+		ret = wait_event_timeout(msm_uport->rx.wait,
+				!msm_uport->rx.pending_flag, 500);
+		if (!ret)
+			MSM_HS_WARN("%s(): rx disconnect not complete",
+				__func__);
+		msm_hs_disconnect_rx(uport);
+	}
+
+	cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
+	flush_workqueue(msm_uport->hsuart_wq);
+
+	/* BAM Disconnect for TX */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	ret = sps_tx_disconnect(msm_uport);
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n",
+					__func__);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	/* Disable the transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
+	/* Disable the receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+
+	msm_uport->imr_reg = 0;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/*
+	 * Complete all device write before actually disabling uartclk.
+	 * Hence mb() requires here.
+	 */
+	mb();
+
+	msm_uport->rx.buffer_pending = NONE_PENDING;
+	MSM_HS_DBG("%s(): tx, rx events complete", __func__);
+
+	dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
+			 UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+	msm_hs_resource_unvote(msm_uport);
+	rc = atomic_read(&msm_uport->resource_count);
+	if (rc) {
+		atomic_set(&msm_uport->resource_count, 1);
+		MSM_HS_WARN("%s(): removing extra vote\n", __func__);
+		msm_hs_resource_unvote(msm_uport);
+	}
+	if (atomic_read(&msm_uport->client_req_state)) {
+		MSM_HS_WARN("%s: Client clock vote imbalance\n", __func__);
+		atomic_set(&msm_uport->client_req_state, 0);
+	}
+	if (atomic_read(&msm_uport->client_count)) {
+		MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
+		atomic_set(&msm_uport->client_count, 0);
+		LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count 0\n", __func__);
+	}
+	msm_hs_unconfig_uart_gpios(uport);
+	MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
+}
+
+static void __exit msm_serial_hs_exit(void)
+{
+	pr_info("msm_serial_hs module removed\n");
+	debugfs_remove_recursive(debug_base);
+	platform_driver_unregister(&msm_serial_hs_platform_driver);
+	uart_unregister_driver(&msm_hs_driver);
+}
+
+static const struct dev_pm_ops msm_hs_dev_pm_ops = {
+	.runtime_suspend = msm_hs_runtime_suspend,
+	.runtime_resume = msm_hs_runtime_resume,
+	.runtime_idle = NULL,
+	.suspend_noirq = msm_hs_pm_sys_suspend_noirq,
+	.resume_noirq = msm_hs_pm_sys_resume_noirq,
+};
+
+static struct platform_driver msm_serial_hs_platform_driver = {
+	.probe	= msm_hs_probe,
+	.remove = msm_hs_remove,
+	.driver = {
+		.name = "msm_serial_hs",
+		.pm   = &msm_hs_dev_pm_ops,
+		.of_match_table = msm_hs_match_table,
+	},
+};
+
+static struct uart_driver msm_hs_driver = {
+	.owner = THIS_MODULE,
+	.driver_name = "msm_serial_hs",
+	.dev_name = "ttyHS",
+	.nr = UARTDM_NR,
+	.cons = 0,
+};
+
+static const struct uart_ops msm_hs_ops = {
+	.tx_empty = msm_hs_tx_empty,
+	.set_mctrl = msm_hs_set_mctrl_locked,
+	.get_mctrl = msm_hs_get_mctrl_locked,
+	.stop_tx = msm_hs_stop_tx_locked,
+	.start_tx = msm_hs_start_tx_locked,
+	.stop_rx = msm_hs_stop_rx_locked,
+	.enable_ms = msm_hs_enable_ms_locked,
+	.break_ctl = msm_hs_break_ctl,
+	.startup = msm_hs_startup,
+	.shutdown = msm_hs_shutdown,
+	.set_termios = msm_hs_set_termios,
+	.type = msm_hs_type,
+	.config_port = msm_hs_config_port,
+	.flush_buffer = NULL,
+	.ioctl = msm_hs_ioctl,
+};
+
+module_init(msm_serial_hs_init);
+module_exit(msm_serial_hs_exit);
+MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
new file mode 100644
index 0000000..d5ce41f
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -0,0 +1,283 @@
+/* drivers/serial/msm_serial_hs_hwreg.h
+ *
+ * Copyright (c) 2007-2009, 2012-2014,The Linux Foundation. All rights reserved.
+ *
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#ifndef MSM_SERIAL_HS_HWREG_H
+#define MSM_SERIAL_HS_HWREG_H
+
+#define GSBI_CONTROL_ADDR              0x0
+#define GSBI_PROTOCOL_CODE_MASK        0x30
+#define GSBI_PROTOCOL_I2C_UART         0x60
+#define GSBI_PROTOCOL_UART             0x40
+#define GSBI_PROTOCOL_IDLE             0x0
+
+#define TCSR_ADM_1_A_CRCI_MUX_SEL      0x78
+#define TCSR_ADM_1_B_CRCI_MUX_SEL      0x7C
+#define ADM1_CRCI_GSBI6_RX_SEL         0x800
+#define ADM1_CRCI_GSBI6_TX_SEL         0x400
+
+#define MSM_ENABLE_UART_CLOCK TIOCPMGET
+#define MSM_DISABLE_UART_CLOCK TIOCPMPUT
+#define MSM_GET_UART_CLOCK_STATUS TIOCPMACT
+
+enum msm_hsl_regs {
+	UARTDM_MR1,
+	UARTDM_MR2,
+	UARTDM_IMR,
+	UARTDM_SR,
+	UARTDM_CR,
+	UARTDM_CSR,
+	UARTDM_IPR,
+	UARTDM_ISR,
+	UARTDM_RX_TOTAL_SNAP,
+	UARTDM_RFWR,
+	UARTDM_TFWR,
+	UARTDM_RF,
+	UARTDM_TF,
+	UARTDM_MISR,
+	UARTDM_DMRX,
+	UARTDM_NCF_TX,
+	UARTDM_DMEN,
+	UARTDM_BCR,
+	UARTDM_TXFS,
+	UARTDM_RXFS,
+	UARTDM_LAST,
+};
+
+enum msm_hs_regs {
+	UART_DM_MR1 = 0x0,
+	UART_DM_MR2 = 0x4,
+	UART_DM_IMR = 0xb0,
+	UART_DM_SR = 0xa4,
+	UART_DM_CR = 0xa8,
+	UART_DM_CSR = 0xa0,
+	UART_DM_IPR = 0x18,
+	UART_DM_ISR = 0xb4,
+	UART_DM_RX_TOTAL_SNAP = 0xbc,
+	UART_DM_TFWR = 0x1c,
+	UART_DM_RFWR = 0x20,
+	UART_DM_RF = 0x140,
+	UART_DM_TF = 0x100,
+	UART_DM_MISR = 0xac,
+	UART_DM_DMRX = 0x34,
+	UART_DM_NCF_TX = 0x40,
+	UART_DM_DMEN = 0x3c,
+	UART_DM_TXFS = 0x4c,
+	UART_DM_RXFS = 0x50,
+	UART_DM_RX_TRANS_CTRL = 0xcc,
+	UART_DM_BCR = 0xc8,
+};
+
+#define UARTDM_MR1_ADDR 0x0
+#define UARTDM_MR2_ADDR 0x4
+
+/* Backward Compatibility Register for UARTDM Core v1.4 */
+#define UARTDM_BCR_ADDR	0xc8
+
+/*
+ * UARTDM Core v1.4 STALE_IRQ_EMPTY bit defination
+ * Stale interrupt will fire if bit is set when RX-FIFO is empty
+ */
+#define UARTDM_BCR_TX_BREAK_DISABLE	0x1
+#define UARTDM_BCR_STALE_IRQ_EMPTY	0x2
+#define UARTDM_BCR_RX_DMRX_LOW_EN	0x4
+#define UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL	0x10
+#define UARTDM_BCR_RX_DMRX_1BYTE_RES_EN	0x20
+
+/* TRANSFER_CONTROL Register for UARTDM Core v1.4 */
+#define UARTDM_RX_TRANS_CTRL_ADDR      0xcc
+
+/* TRANSFER_CONTROL Register bits */
+#define RX_STALE_AUTO_RE_EN		0x1
+#define RX_TRANS_AUTO_RE_ACTIVATE	0x2
+#define RX_DMRX_CYCLIC_EN		0x4
+
+/* write only register */
+#define UARTDM_CSR_115200 0xFF
+#define UARTDM_CSR_57600  0xEE
+#define UARTDM_CSR_38400  0xDD
+#define UARTDM_CSR_28800  0xCC
+#define UARTDM_CSR_19200  0xBB
+#define UARTDM_CSR_14400  0xAA
+#define UARTDM_CSR_9600   0x99
+#define UARTDM_CSR_7200   0x88
+#define UARTDM_CSR_4800   0x77
+#define UARTDM_CSR_3600   0x66
+#define UARTDM_CSR_2400   0x55
+#define UARTDM_CSR_1200   0x44
+#define UARTDM_CSR_600    0x33
+#define UARTDM_CSR_300    0x22
+#define UARTDM_CSR_150    0x11
+#define UARTDM_CSR_75     0x00
+
+/* write only register */
+#define UARTDM_IPR_ADDR 0x18
+#define UARTDM_TFWR_ADDR 0x1c
+#define UARTDM_RFWR_ADDR 0x20
+#define UARTDM_HCR_ADDR 0x24
+#define UARTDM_DMRX_ADDR 0x34
+#define UARTDM_DMEN_ADDR 0x3c
+
+/* UART_DM_NO_CHARS_FOR_TX */
+#define UARTDM_NCF_TX_ADDR 0x40
+
+#define UARTDM_BADR_ADDR 0x44
+
+#define UARTDM_SIM_CFG_ADDR 0x80
+
+/* Read Only register */
+#define UARTDM_TXFS_ADDR 0x4C
+#define UARTDM_RXFS_ADDR 0x50
+
+/* Register field Mask Mapping */
+#define UARTDM_SR_RX_BREAK_BMSK	        BIT(6)
+#define UARTDM_SR_PAR_FRAME_BMSK	BIT(5)
+#define UARTDM_SR_OVERRUN_BMSK		BIT(4)
+#define UARTDM_SR_TXEMT_BMSK		BIT(3)
+#define UARTDM_SR_TXRDY_BMSK		BIT(2)
+#define UARTDM_SR_RXRDY_BMSK		BIT(0)
+
+#define UARTDM_CR_TX_DISABLE_BMSK	BIT(3)
+#define UARTDM_CR_RX_DISABLE_BMSK	BIT(1)
+#define UARTDM_CR_TX_EN_BMSK		BIT(2)
+#define UARTDM_CR_RX_EN_BMSK		BIT(0)
+
+/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
+#define RESET_RX		0x10
+#define RESET_TX		0x20
+#define RESET_ERROR_STATUS	0x30
+#define RESET_BREAK_INT		0x40
+#define START_BREAK		0x50
+#define STOP_BREAK		0x60
+#define RESET_CTS		0x70
+#define RESET_STALE_INT		0x80
+#define RFR_LOW			0xD0
+#define RFR_HIGH		0xE0
+#define CR_PROTECTION_EN	0x100
+#define STALE_EVENT_ENABLE	0x500
+#define STALE_EVENT_DISABLE	0x600
+#define FORCE_STALE_EVENT	0x400
+#define CLEAR_TX_READY		0x300
+#define RESET_TX_ERROR		0x800
+#define RESET_TX_DONE		0x810
+
+/*
+ * UARTDM_CR BAM IFC comman bit value
+ * for UARTDM Core v1.4
+ */
+#define START_RX_BAM_IFC	0x850
+#define START_TX_BAM_IFC	0x860
+
+#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
+#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
+#define UARTDM_MR1_CTS_CTL_BMSK 0x40
+#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
+
+/*
+ * UARTDM Core v1.4 MR2_RFR_CTS_LOOP bitmask
+ * Enables internal loopback between RFR_N of
+ * RX channel and CTS_N of TX channel.
+ */
+#define UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK	0x400
+
+#define UARTDM_MR2_LOOP_MODE_BMSK		0x80
+#define UARTDM_MR2_ERROR_MODE_BMSK		0x40
+#define UARTDM_MR2_BITS_PER_CHAR_BMSK		0x30
+#define UARTDM_MR2_RX_ZERO_CHAR_OFF		0x100
+#define UARTDM_MR2_RX_ERROR_CHAR_OFF		0x200
+#define UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF	0x100
+
+#define UARTDM_MR2_BITS_PER_CHAR_8	(0x3 << 4)
+
+/* bits per character configuration */
+#define FIVE_BPC  (0 << 4)
+#define SIX_BPC   (1 << 4)
+#define SEVEN_BPC (2 << 4)
+#define EIGHT_BPC (3 << 4)
+
+#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
+#define STOP_BIT_ONE (1 << 2)
+#define STOP_BIT_TWO (3 << 2)
+
+#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
+
+/* Parity configuration */
+#define NO_PARITY 0x0
+#define EVEN_PARITY 0x2
+#define ODD_PARITY 0x1
+#define SPACE_PARITY 0x3
+
+#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
+#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
+
+/* These can be used for both ISR and IMR register */
+#define UARTDM_ISR_TX_READY_BMSK	BIT(7)
+#define UARTDM_ISR_CURRENT_CTS_BMSK	BIT(6)
+#define UARTDM_ISR_DELTA_CTS_BMSK	BIT(5)
+#define UARTDM_ISR_RXLEV_BMSK		BIT(4)
+#define UARTDM_ISR_RXSTALE_BMSK		BIT(3)
+#define UARTDM_ISR_RXBREAK_BMSK		BIT(2)
+#define UARTDM_ISR_RXHUNT_BMSK		BIT(1)
+#define UARTDM_ISR_TXLEV_BMSK		BIT(0)
+
+/* Field definitions for UART_DM_DMEN*/
+#define UARTDM_TX_DM_EN_BMSK 0x1
+#define UARTDM_RX_DM_EN_BMSK 0x2
+
+/*
+ * UARTDM Core v1.4 bitmask
+ * Bitmasks for enabling Rx and Tx BAM Interface
+ */
+#define UARTDM_TX_BAM_ENABLE_BMSK 0x4
+#define UARTDM_RX_BAM_ENABLE_BMSK 0x8
+
+/* Register offsets for UART Core v13 */
+
+/* write only register */
+#define UARTDM_CSR_ADDR    0x8
+
+/* write only register */
+#define UARTDM_TF_ADDR   0x70
+#define UARTDM_TF2_ADDR  0x74
+#define UARTDM_TF3_ADDR  0x78
+#define UARTDM_TF4_ADDR  0x7c
+
+/* write only register */
+#define UARTDM_CR_ADDR 0x10
+/* write only register */
+#define UARTDM_IMR_ADDR 0x14
+#define UARTDM_IRDA_ADDR 0x38
+
+/* Read Only register */
+#define UARTDM_SR_ADDR 0x8
+
+/* Read Only register */
+#define UARTDM_RF_ADDR   0x70
+#define UARTDM_RF2_ADDR  0x74
+#define UARTDM_RF3_ADDR  0x78
+#define UARTDM_RF4_ADDR  0x7c
+
+/* Read Only register */
+#define UARTDM_MISR_ADDR 0x10
+
+/* Read Only register */
+#define UARTDM_ISR_ADDR 0x14
+#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
+
+#endif /* MSM_SERIAL_HS_HWREG_H */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index c0f1e46..7b8ca7d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2256,39 +2256,38 @@
 
 /*-------------------------------------------------------------------------*/
 
-dma_addr_t
-usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
-	unsigned int intr_num)
+phys_addr_t
+usb_hcd_get_sec_event_ring_phys_addr(struct usb_device *udev,
+	unsigned int intr_num, dma_addr_t *dma)
 {
 	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
 
 	if (!HCD_RH_RUNNING(hcd))
 		return 0;
 
-	return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
+	return hcd->driver->get_sec_event_ring_phys_addr(hcd, intr_num, dma);
 }
 
-dma_addr_t
-usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
+phys_addr_t
+usb_hcd_get_xfer_ring_phys_addr(struct usb_device *udev,
+		struct usb_host_endpoint *ep, dma_addr_t *dma)
 {
 	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
 
 	if (!HCD_RH_RUNNING(hcd))
 		return 0;
 
-	return hcd->driver->get_dcba_dma_addr(hcd, udev);
+	return hcd->driver->get_xfer_ring_phys_addr(hcd, udev, ep, dma);
 }
 
-dma_addr_t
-usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
-		struct usb_host_endpoint *ep)
+int usb_hcd_get_controller_id(struct usb_device *udev)
 {
 	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
 
 	if (!HCD_RH_RUNNING(hcd))
-		return 0;
+		return -EINVAL;
 
-	return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+	return hcd->driver->get_core_id(hcd);
 }
 
 #ifdef	CONFIG_PM
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 92e5d13..d745733 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -704,36 +704,35 @@
 }
 EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
 
-dma_addr_t
-usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
-	unsigned int intr_num)
+phys_addr_t
+usb_get_sec_event_ring_phys_addr(struct usb_device *dev,
+	unsigned int intr_num, dma_addr_t *dma)
 {
 	if (dev->state == USB_STATE_NOTATTACHED)
 		return 0;
 
-	return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
+	return usb_hcd_get_sec_event_ring_phys_addr(dev, intr_num, dma);
 }
-EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
+EXPORT_SYMBOL(usb_get_sec_event_ring_phys_addr);
 
-dma_addr_t
-usb_get_dcba_dma_addr(struct usb_device *dev)
+phys_addr_t usb_get_xfer_ring_phys_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep, dma_addr_t *dma)
 {
 	if (dev->state == USB_STATE_NOTATTACHED)
 		return 0;
 
-	return usb_hcd_get_dcba_dma_addr(dev);
+	return usb_hcd_get_xfer_ring_phys_addr(dev, ep, dma);
 }
-EXPORT_SYMBOL(usb_get_dcba_dma_addr);
+EXPORT_SYMBOL(usb_get_xfer_ring_phys_addr);
 
-dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
-	struct usb_host_endpoint *ep)
+int usb_get_controller_id(struct usb_device *dev)
 {
 	if (dev->state == USB_STATE_NOTATTACHED)
-		return 0;
+		return -EINVAL;
 
-	return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+	return usb_hcd_get_controller_id(dev);
 }
-EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+EXPORT_SYMBOL(usb_get_controller_id);
 
 /*-------------------------------------------------------------------*/
 /*
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 18241f4..c11629d 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1565,12 +1565,22 @@
 		struct usb_request *req)
 {
 	struct f_gsi *gsi = req->context;
+	rndis_init_msg_type *buf;
 	int status;
 
 	status = rndis_msg_parser(gsi->params, (u8 *) req->buf);
 	if (status < 0)
 		log_event_err("RNDIS command error %d, %d/%d",
 			status, req->actual, req->length);
+
+	buf = (rndis_init_msg_type *)req->buf;
+	if (buf->MessageType == RNDIS_MSG_INIT) {
+		gsi->d_port.in_aggr_size = min_t(u32, gsi->d_port.in_aggr_size,
+						gsi->params->dl_max_xfer_size);
+		log_event_dbg("RNDIS host dl_aggr_size:%d in_aggr_size:%d\n",
+				gsi->params->dl_max_xfer_size,
+				gsi->d_port.in_aggr_size);
+	}
 }
 
 static void
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index a560083..d6bf0f4 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -1393,7 +1393,7 @@
 
 /* string descriptors: */
 static struct usb_string qdss_gsi_string_defs[] = {
-	[0].s = "Qualcomm DPL Data",
+	[0].s = "DPL Data",
 	{}, /* end of list */
 };
 
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 40a7acf..a0fecb2 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -338,7 +338,7 @@
 	struct usb_gadget *gadget = c->cdev->gadget;
 	struct f_qdss *qdss = func_to_qdss(f);
 	struct usb_ep *ep;
-	int iface;
+	int iface, id;
 
 	pr_debug("qdss_bind\n");
 
@@ -356,6 +356,12 @@
 	qdss_data_intf_desc.bInterfaceNumber = iface;
 	qdss->data_iface_id = iface;
 
+	id = usb_string_id(c->cdev);
+	if (id < 0)
+		return id;
+	qdss_string_defs[QDSS_DATA_IDX].id = id;
+	qdss_data_intf_desc.iInterface = id;
+
 	if (qdss->debug_inface_enabled) {
 		/* Allocate ctrl I/F */
 		iface = usb_interface_id(c, f);
@@ -365,6 +371,11 @@
 		}
 		qdss_ctrl_intf_desc.bInterfaceNumber = iface;
 		qdss->ctrl_iface_id = iface;
+		id = usb_string_id(c->cdev);
+		if (id < 0)
+			return id;
+		qdss_string_defs[QDSS_CTRL_IDX].id = id;
+		qdss_ctrl_intf_desc.iInterface = id;
 	}
 
 	ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 38d58f3..ac2231a 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -813,8 +813,17 @@
 	/* For USB: responses may take up to 10 seconds */
 	switch (MsgType) {
 	case RNDIS_MSG_INIT:
-		pr_debug("%s: RNDIS_MSG_INIT\n",
-			__func__);
+		pr_debug("%s: RNDIS_MSG_INIT\n", __func__);
+		tmp++; /* to get RequestID */
+		params->host_rndis_major_ver = get_unaligned_le32(tmp++);
+		params->host_rndis_minor_ver = get_unaligned_le32(tmp++);
+		params->dl_max_xfer_size = get_unaligned_le32(tmp++);
+
+		pr_debug("%s(): RNDIS Host Major:%d Minor:%d version\n",
+					__func__, params->host_rndis_major_ver,
+					params->host_rndis_minor_ver);
+		pr_debug("%s(): DL Max Transfer size:%x\n",
+				__func__, params->dl_max_xfer_size);
 		params->state = RNDIS_INITIALIZED;
 		return rndis_init_response(params, (rndis_init_msg_type *)buf);
 
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 939c3be..4ffc282 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -191,6 +191,9 @@
 
 	u32			vendorID;
 	u8			max_pkt_per_xfer;
+	u32			host_rndis_major_ver;
+	u32			host_rndis_minor_ver;
+	u32			dl_max_xfer_size;
 	const char		*vendorDescr;
 	u8			pkt_alignment_factor;
 	void			(*resp_avail)(void *v);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6cb5ab3..89de903 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -316,6 +316,9 @@
 	if (device_property_read_u32(sysdev, "snps,xhci-imod-value", &imod))
 		imod = 0;
 
+	if (device_property_read_u32(sysdev, "usb-core-id", &xhci->core_id))
+		xhci->core_id = -EINVAL;
+
 	hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
 	if (IS_ERR(hcd->usb_phy)) {
 		ret = PTR_ERR(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 15bf308..1660c7c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4964,10 +4964,13 @@
 }
 EXPORT_SYMBOL_GPL(xhci_gen_setup);
 
-dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
-	unsigned int intr_num)
+static phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_hcd *hcd,
+	unsigned int intr_num, dma_addr_t *dma)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct device *dev = hcd->self.sysdev;
+	struct sg_table sgt;
+	phys_addr_t pa;
 
 	if (intr_num >= xhci->max_interrupters) {
 		xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
@@ -4977,31 +4980,34 @@
 
 	if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
 		xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
-		&& xhci->sec_event_ring[intr_num]->first_seg)
-		return xhci->sec_event_ring[intr_num]->first_seg->dma;
+		&& xhci->sec_event_ring[intr_num]->first_seg) {
+
+		dma_get_sgtable(dev, &sgt,
+			xhci->sec_event_ring[intr_num]->first_seg->trbs,
+			xhci->sec_event_ring[intr_num]->first_seg->dma,
+			TRB_SEGMENT_SIZE);
+
+		*dma = xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+		pa = page_to_phys(sg_page(sgt.sgl));
+		sg_free_table(&sgt);
+
+		return pa;
+	}
 
 	return 0;
 }
 
-dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
-	struct usb_device *udev)
-{
-	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
-	if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
-		return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
-
-	return 0;
-}
-
-dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
-	struct usb_device *udev, struct usb_host_endpoint *ep)
+static phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma)
 {
 	int ret;
 	unsigned int ep_index;
 	struct xhci_virt_device *virt_dev;
-
+	struct device *dev = hcd->self.sysdev;
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct sg_table sgt;
+	phys_addr_t pa;
 
 	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
 	if (ret <= 0) {
@@ -5013,12 +5019,31 @@
 	ep_index = xhci_get_endpoint_index(&ep->desc);
 
 	if (virt_dev->eps[ep_index].ring &&
-		virt_dev->eps[ep_index].ring->first_seg)
-		return virt_dev->eps[ep_index].ring->first_seg->dma;
+		virt_dev->eps[ep_index].ring->first_seg) {
+
+		dma_get_sgtable(dev, &sgt,
+			virt_dev->eps[ep_index].ring->first_seg->trbs,
+			virt_dev->eps[ep_index].ring->first_seg->dma,
+			TRB_SEGMENT_SIZE);
+
+		*dma = virt_dev->eps[ep_index].ring->first_seg->dma;
+
+		pa = page_to_phys(sg_page(sgt.sgl));
+		sg_free_table(&sgt);
+
+		return pa;
+	}
 
 	return 0;
 }
 
+int xhci_get_core_id(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	return xhci->core_id;
+}
+
 static const struct hc_driver xhci_hc_driver = {
 	.description =		"xhci-hcd",
 	.product_desc =		"xHCI Host Controller",
@@ -5080,9 +5105,9 @@
 	.find_raw_port_number =	xhci_find_raw_port_number,
 	.sec_event_ring_setup =		xhci_sec_event_ring_setup,
 	.sec_event_ring_cleanup =	xhci_sec_event_ring_cleanup,
-	.get_sec_event_ring_dma_addr =	xhci_get_sec_event_ring_dma_addr,
-	.get_xfer_ring_dma_addr =	xhci_get_xfer_ring_dma_addr,
-	.get_dcba_dma_addr =		xhci_get_dcba_dma_addr,
+	.get_sec_event_ring_phys_addr =	xhci_get_sec_event_ring_phys_addr,
+	.get_xfer_ring_phys_addr =	xhci_get_xfer_ring_phys_addr,
+	.get_core_id =			xhci_get_core_id,
 };
 
 void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 757d045..db46db4 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1538,6 +1538,8 @@
 	/* secondary interrupter */
 	struct	xhci_intr_reg __iomem **sec_ir_set;
 
+	int		core_id;
+
 	/* Cached register copies of read-only HC data */
 	__u32		hcs_params1;
 	__u32		hcs_params2;
@@ -1977,6 +1979,7 @@
 		char *buf, u16 wLength);
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
+int xhci_get_core_id(struct usb_hcd *hcd);
 
 #ifdef CONFIG_PM
 int xhci_bus_suspend(struct usb_hcd *hcd);
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 6103172..68bd576 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -63,6 +63,17 @@
 #define LINESTATE_DP			BIT(0)
 #define LINESTATE_DM			BIT(1)
 
+/* eud related registers */
+#define EUD_SW_ATTACH_DET	0x1018
+#define EUD_INT1_EN_MASK	0x0024
+
+/* EUD interrupt mask bits */
+#define EUD_INT_RX		BIT(0)
+#define EUD_INT_TX		BIT(1)
+#define EUD_INT_VBUS		BIT(2)
+#define EUD_INT_CHGR		BIT(3)
+#define EUD_INT_SAFE_MODE	BIT(4)
+
 unsigned int phy_tune1;
 module_param(phy_tune1, uint, 0644);
 MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
@@ -81,6 +92,7 @@
 	struct usb_phy		phy;
 	struct mutex		lock;
 	void __iomem		*base;
+	void __iomem		*eud_base;
 	void __iomem		*efuse_reg;
 
 	struct clk		*ref_clk_src;
@@ -664,6 +676,22 @@
 			return ret;
 		}
 		qphy->dpdm_enable = true;
+
+		if (qphy->eud_base) {
+			if (qphy->cfg_ahb_clk)
+				clk_prepare_enable(qphy->cfg_ahb_clk);
+			writel_relaxed(BIT(0),
+					qphy->eud_base + EUD_SW_ATTACH_DET);
+			/* to flush above write before next write */
+			wmb();
+
+			writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR,
+					qphy->eud_base + EUD_INT1_EN_MASK);
+			/* to flush above write before turning off clk */
+			wmb();
+			if (qphy->cfg_ahb_clk)
+				clk_disable_unprepare(qphy->cfg_ahb_clk);
+		}
 	}
 
 	return ret;
@@ -678,6 +706,16 @@
 				__func__, qphy->dpdm_enable);
 
 	if (qphy->dpdm_enable) {
+		if (qphy->eud_base) {
+			if (qphy->cfg_ahb_clk)
+				clk_prepare_enable(qphy->cfg_ahb_clk);
+			writel_relaxed(0, qphy->eud_base + EUD_SW_ATTACH_DET);
+			/* to flush above write before turning off clk */
+			wmb();
+			if (qphy->cfg_ahb_clk)
+				clk_disable_unprepare(qphy->cfg_ahb_clk);
+		}
+
 		ret = qusb_phy_enable_power(qphy, false);
 		if (ret < 0) {
 			dev_dbg(qphy->phy.dev,
@@ -784,6 +822,17 @@
 		}
 	}
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"eud_base");
+	if (res) {
+		qphy->eud_base = devm_ioremap(dev, res->start,
+					resource_size(res));
+		if (IS_ERR(qphy->eud_base)) {
+			dev_dbg(dev, "couldn't ioremap eud_base\n");
+			qphy->eud_base = NULL;
+		}
+	}
+
 	/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
 	if (IS_ERR(qphy->ref_clk_src)) {
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 59f5379..7e7c76c 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -623,10 +623,7 @@
 	}
 
 	if (suspend) {
-		if (!phy->cable_connected)
-			writel_relaxed(0x00,
-			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
-		else
+		if (phy->cable_connected)
 			msm_ssusb_qmp_enable_autonomous(phy, 1);
 
 		/* Make sure above write completed with PHY */
@@ -674,6 +671,10 @@
 	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
 					phy);
 
+	writel_relaxed(0x00,
+		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+	readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
 	dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
 	dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
 	phy->cable_connected = false;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index b7e3431..c122409 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -450,6 +450,8 @@
 	return !!(val & ICC_SRE_EL1_SRE);
 }
 
+void gic_show_pending_irqs(void);
+unsigned int get_gic_highpri_irq(void);
 #endif
 
 #endif
diff --git a/include/linux/platform_data/msm_serial_hs.h b/include/linux/platform_data/msm_serial_hs.h
new file mode 100644
index 0000000..72c76e5
--- /dev/null
+++ b/include/linux/platform_data/msm_serial_hs.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2010-2014, The Linux Foundation. All rights reserved.
+ * Author: Nick Pelly <npelly@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
+#define __ASM_ARCH_MSM_SERIAL_HS_H
+
+#include <linux/serial_core.h>
+
+/**
+ * struct msm_serial_hs_platform_data - platform device data
+ *					for msm hsuart device
+ * @wakeup_irq : IRQ line to be configured as Wakeup source.
+ * @inject_rx_on_wakeup : Set 1 if specific character to be inserted on wakeup
+ * @rx_to_inject : Character to be inserted on wakeup
+ * @gpio_config : Configure gpios that are used for uart communication
+ * @userid : User-defined number to be used to enumerate device as tty<userid>
+ * @uart_tx_gpio: GPIO number for UART Tx Line.
+ * @uart_rx_gpio: GPIO number for UART Rx Line.
+ * @uart_cts_gpio: GPIO number for UART CTS Line.
+ * @uart_rfr_gpio: GPIO number for UART RFR Line.
+ * @bam_tx_ep_pipe_index : BAM TX Endpoint Pipe Index for HSUART
+ * @bam_tx_ep_pipe_index : BAM RX Endpoint Pipe Index for HSUART
+ * @no_suspend_delay : Flag used to make system go to suspend
+ * immediately or not
+ * @obs: Flag to enable out of band sleep feature support
+ */
+struct msm_serial_hs_platform_data {
+	int wakeup_irq;  /* wakeup irq */
+	bool inject_rx_on_wakeup;
+	u8 rx_to_inject;
+	int (*gpio_config)(int);
+	int userid;
+
+	int uart_tx_gpio;
+	int uart_rx_gpio;
+	int uart_cts_gpio;
+	int uart_rfr_gpio;
+	unsigned int bam_tx_ep_pipe_index;
+	unsigned int bam_rx_ep_pipe_index;
+	bool no_suspend_delay;
+	bool obs;
+};
+
+/* return true when tx is empty */
+unsigned int msm_hs_tx_empty(struct uart_port *uport);
+int msm_hs_request_clock_off(struct uart_port *uport);
+int msm_hs_request_clock_on(struct uart_port *uport);
+struct uart_port *msm_hs_get_uart_port(int port_index);
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl);
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e5541af..67860f3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3960,6 +3960,7 @@
 #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
 #define SCHED_CPUFREQ_WALT (1U << 4)
 #define SCHED_CPUFREQ_PL	(1U << 5)
+#define SCHED_CPUFREQ_EARLY_DET	(1U << 6)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi b/include/linux/spi/spi-geni-qcom.h
similarity index 63%
copy from arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
copy to include/linux/spi/spi-geni-qcom.h
index bc431f2..8aee88a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-int-codec-audio-overlay.dtsi
+++ b/include/linux/spi/spi-geni-qcom.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -8,5 +9,15 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
+ *
  */
-#include "sdm670-audio-overlay.dtsi"
+
+#ifndef __SPI_GENI_QCOM_HEADER___
+#define __SPI_GENI_QCOM_HEADER___
+
+struct spi_geni_qcom_ctrl_data {
+	u32 spi_cs_clk_delay;
+	u32 spi_inter_words_delay;
+};
+
+#endif /*__SPI_GENI_QCOM_HEADER___*/
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index aa17ccf..c97fac8 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -43,9 +43,10 @@
 
 	struct fence		*fence;
 	struct fence_cb cb;
+	unsigned long flags;
 };
 
-#define POLL_ENABLED FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
 
 struct sync_file *sync_file_create(struct fence *fence);
 struct fence *sync_file_get_fence(int fd);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index ef20e16..232c3e0 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -751,11 +751,11 @@
 extern int usb_sec_event_ring_cleanup(struct usb_device *dev,
 	unsigned int intr_num);
 
-extern dma_addr_t usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
-	unsigned int intr_num);
-extern dma_addr_t usb_get_dcba_dma_addr(struct usb_device *dev);
-extern dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
-	struct usb_host_endpoint *ep);
+extern phys_addr_t usb_get_sec_event_ring_phys_addr(
+	struct usb_device *dev, unsigned int intr_num, dma_addr_t *dma);
+extern phys_addr_t usb_get_xfer_ring_phys_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep, dma_addr_t *dma);
+extern int usb_get_controller_id(struct usb_device *dev);
 
 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
 extern int usb_alloc_streams(struct usb_interface *interface,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index b305b0e..1699d2b 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -401,12 +401,12 @@
 	int (*sec_event_ring_setup)(struct usb_hcd *hcd, unsigned int intr_num);
 	int (*sec_event_ring_cleanup)(struct usb_hcd *hcd,
 			unsigned int intr_num);
-	dma_addr_t (*get_sec_event_ring_dma_addr)(struct usb_hcd *hcd,
-			unsigned int intr_num);
-	dma_addr_t (*get_xfer_ring_dma_addr)(struct usb_hcd *hcd,
-			struct usb_device *udev, struct usb_host_endpoint *ep);
-	dma_addr_t (*get_dcba_dma_addr)(struct usb_hcd *hcd,
-			struct usb_device *udev);
+	phys_addr_t (*get_sec_event_ring_phys_addr)(struct usb_hcd *hcd,
+			unsigned int intr_num, dma_addr_t *dma);
+	phys_addr_t (*get_xfer_ring_phys_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev, struct usb_host_endpoint *ep,
+			dma_addr_t *dma);
+	int (*get_core_id)(struct usb_hcd *hcd);
 };
 
 static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -449,11 +449,11 @@
 	unsigned int intr_num);
 extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
 	unsigned int intr_num);
-extern dma_addr_t usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
-		unsigned int intr_num);
-extern dma_addr_t usb_hcd_get_dcba_dma_addr(struct usb_device *udev);
-extern dma_addr_t usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
-	struct usb_host_endpoint *ep);
+extern phys_addr_t usb_hcd_get_sec_event_ring_phys_addr(
+	struct usb_device *udev, unsigned int intr_num, dma_addr_t *dma);
+extern phys_addr_t usb_hcd_get_xfer_ring_phys_addr(
+	struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma);
+extern int usb_hcd_get_controller_id(struct usb_device *udev);
 
 struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
 		struct device *sysdev, struct device *dev, const char *bus_name,
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 3527c35..e58a522 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -127,16 +127,7 @@
 extern int icnss_smmu_map(struct device *dev, phys_addr_t paddr,
 			  uint32_t *iova_addr, size_t size);
 extern unsigned int icnss_socinfo_get_serial_number(struct device *dev);
-extern int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
-extern int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
-					 u16 buf_len);
-extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len);
-extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len);
 extern bool icnss_is_qmi_disable(void);
 extern bool icnss_is_fw_ready(void);
-extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len);
-extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
 extern int icnss_trigger_recovery(struct device *dev);
-extern int icnss_get_driver_load_cnt(void);
-extern void icnss_increment_driver_load_cnt(void);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 6ff08de..d5438d3 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -324,6 +324,7 @@
 #define DRM_EVENT_SYS_BACKLIGHT 0x80000003
 #define DRM_EVENT_SDE_POWER 0x80000004
 #define DRM_EVENT_IDLE_NOTIFY 0x80000005
+#define DRM_EVENT_PANEL_DEAD 0x80000006 /* ESD event */
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index 5f70a57..0765527 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -174,6 +174,17 @@
 	__u32 strength;
 };
 
+#define HIST_V_SIZE 256
+/**
+ * struct drm_msm_hist - histogram feature structure
+ * @flags: for customizing operations
+ * @data: histogram data
+ */
+struct drm_msm_hist {
+	__u64 flags;
+	__u32 data[HIST_V_SIZE];
+};
+
 #define AD4_LUT_GRP0_SIZE 33
 #define AD4_LUT_GRP1_SIZE 32
 /*
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index fee26a9..9b7d055 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -216,7 +216,8 @@
 /* Maximum allowed buffers in existence */
 #define CAM_MEM_BUFQ_MAX                        1024
 
-#define CAM_MEM_MGR_HDL_IDX_SIZE                16
+#define CAM_MEM_MGR_SECURE_BIT_POS              15
+#define CAM_MEM_MGR_HDL_IDX_SIZE                15
 #define CAM_MEM_MGR_HDL_FD_SIZE                 16
 #define CAM_MEM_MGR_HDL_IDX_END_POS             16
 #define CAM_MEM_MGR_HDL_FD_END_POS              32
@@ -224,11 +225,19 @@
 #define CAM_MEM_MGR_HDL_IDX_MASK      ((1 << CAM_MEM_MGR_HDL_IDX_SIZE) - 1)
 
 #define GET_MEM_HANDLE(idx, fd) \
-	((idx << (CAM_MEM_MGR_HDL_IDX_END_POS - CAM_MEM_MGR_HDL_IDX_SIZE)) | \
+	((idx & CAM_MEM_MGR_HDL_IDX_MASK) | \
 	(fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \
 
 #define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK)
 
+#define CAM_MEM_MGR_SET_SECURE_HDL(hdl, flag) \
+	((flag) ? (hdl |= (1 << CAM_MEM_MGR_SECURE_BIT_POS)) : \
+	((hdl) &= ~(1 << CAM_MEM_MGR_SECURE_BIT_POS)))
+
+#define CAM_MEM_MGR_IS_SECURE_HDL(hdl) \
+	(((hdl) & \
+	(1<<CAM_MEM_MGR_SECURE_BIT_POS)) >> CAM_MEM_MGR_SECURE_BIT_POS)
+
 /**
  * memory allocation type
  */
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index 2fe7f2b..87f25b0 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -8,6 +8,7 @@
 #define CAM_SENSOR_PROBE_CMD   (CAM_COMMON_OPCODE_MAX + 1)
 #define CAM_FLASH_MAX_LED_TRIGGERS 3
 #define MAX_OIS_NAME_SIZE 32
+#define CAM_CSIPHY_SECURE_MODE_ENABLED 1
 /**
  * struct cam_sensor_query_cap - capabilities info for sensor
  *
@@ -316,15 +317,15 @@
 
 /**
  * cam_csiphy_info: Provides cmdbuffer structre
- * @lane_mask     :  Lane mask details
- * @lane_assign   :  Lane sensor will be using
- * @csiphy_3phase :  Total number of lanes
- * @combo_mode    :  Info regarding combo_mode is enable / disable
- * @lane_cnt      :  Total number of lanes
- * @reserved
- * @3phase        :  Details whether 3Phase / 2Phase operation
- * @settle_time   :  Settling time in ms
- * @data_rate     :  Data rate
+ * @lane_mask     : Lane mask details
+ * @lane_assign   : Lane sensor will be using
+ * @csiphy_3phase : Total number of lanes
+ * @combo_mode    : Info regarding combo_mode is enable / disable
+ * @lane_cnt      : Total number of lanes
+ * @secure_mode   : Secure mode flag to enable / disable
+ * @3phase        : Details whether 3Phase / 2Phase operation
+ * @settle_time   : Settling time in ms
+ * @data_rate     : Data rate
  *
  */
 struct cam_csiphy_info {
@@ -333,7 +334,7 @@
 	uint8_t     csiphy_3phase;
 	uint8_t     combo_mode;
 	uint8_t     lane_cnt;
-	uint8_t     reserved;
+	uint8_t     secure_mode;
 	uint64_t    settle_time;
 	uint64_t    data_rate;
 } __attribute__((packed));
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 352cfca..08fd4be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3305,6 +3305,7 @@
 	bool early_notif;
 	u32 old_load;
 	struct related_thread_group *grp;
+	unsigned int flag = 0;
 
 	sched_clock_tick();
 
@@ -3321,10 +3322,12 @@
 	cpu_load_update_active(rq);
 	calc_global_load_tick(rq);
 	sched_freq_tick(cpu);
-	cpufreq_update_util(rq, 0);
 
 	early_notif = early_detection_notify(rq, wallclock);
+	if (early_notif)
+		flag = SCHED_CPUFREQ_WALT | SCHED_CPUFREQ_EARLY_DET;
 
+	cpufreq_update_util(rq, flag);
 	raw_spin_unlock(&rq->lock);
 
 	if (early_notif)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index c2372f8..2eb966c 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -28,6 +28,7 @@
 struct sugov_tunables {
 	struct gov_attr_set attr_set;
 	unsigned int rate_limit_us;
+	unsigned int hispeed_load;
 	unsigned int hispeed_freq;
 	bool pl;
 };
@@ -266,7 +267,7 @@
 }
 
 #define NL_RATIO 75
-#define HISPEED_LOAD 90
+#define DEFAULT_HISPEED_LOAD 90
 static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 			      unsigned long *max)
 {
@@ -280,7 +281,7 @@
 		return;
 
 	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
-					   HISPEED_LOAD,
+					   sg_policy->tunables->hispeed_load,
 					   100));
 
 	if (is_hiload && !is_migration)
@@ -508,6 +509,26 @@
 	return count;
 }
 
+static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return sprintf(buf, "%u\n", tunables->hispeed_load);
+}
+
+static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
+				  const char *buf, size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	if (kstrtouint(buf, 10, &tunables->hispeed_load))
+		return -EINVAL;
+
+	tunables->hispeed_load = min(100U, tunables->hispeed_load);
+
+	return count;
+}
+
 static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
 {
 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
@@ -559,11 +580,13 @@
 }
 
 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
 static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
 static struct governor_attr pl = __ATTR_RW(pl);
 
 static struct attribute *sugov_attributes[] = {
 	&rate_limit_us.attr,
+	&hispeed_load.attr,
 	&hispeed_freq.attr,
 	&pl.attr,
 	NULL
@@ -710,6 +733,7 @@
 	}
 
 	tunables->rate_limit_us = LATENCY_MULTIPLIER;
+	tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
 	tunables->hispeed_freq = 0;
 	lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
 	if (lat)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 80bf3b0..d61c570 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5658,10 +5658,10 @@
 
 	for (idx = 0; idx < sge->nr_cap_states; idx++) {
 		if (sge->cap_states[idx].cap >= util)
-			break;
+			return idx;
 	}
 
-	return idx;
+	return (sge->nr_cap_states - 1);
 }
 
 static int find_new_capacity(struct energy_env *eenv,
@@ -6161,15 +6161,15 @@
 	return __task_fits(p, cpu, cpu_util(cpu));
 }
 
-static bool __cpu_overutilized(int cpu, int delta)
+bool __cpu_overutilized(int cpu, unsigned long util)
 {
-	return (capacity_orig_of(cpu) * 1024) <
-	       ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin);
+	return (capacity_orig_of(cpu) * 1024 <
+		util * sysctl_sched_capacity_margin);
 }
 
 bool cpu_overutilized(int cpu)
 {
-	return __cpu_overutilized(cpu, 0);
+	return __cpu_overutilized(cpu, cpu_util(cpu));
 }
 
 #ifdef CONFIG_SCHED_TUNE
@@ -7188,7 +7188,9 @@
 		task_util_boosted = 0;
 #endif
 		/* Not enough spare capacity on previous cpu */
-		if (__cpu_overutilized(task_cpu(p), task_util_boosted)) {
+		if (__cpu_overutilized(task_cpu(p),
+				       cpu_util(task_cpu(p)) +
+						task_util_boosted)) {
 			trace_sched_task_util_overutilzed(p, task_cpu(p),
 						task_util(p), target_cpu,
 						target_cpu, 0, need_idle);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1bc4828..35382df 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1724,11 +1724,11 @@
 	unsigned long cpu_capacity;
 	unsigned long best_capacity;
 	unsigned long util, best_cpu_util = ULONG_MAX;
+	unsigned long best_cpu_util_cum = ULONG_MAX;
+	unsigned long util_cum;
+	unsigned long tutil = task_util(task);
 	int best_cpu_idle_idx = INT_MAX;
 	int cpu_idle_idx = -1;
-	long new_util_cum;
-	int max_spare_cap_cpu = -1;
-	long max_spare_cap = -LONG_MAX;
 	bool placement_boost;
 
 	/* Make sure the mask is initialized first */
@@ -1791,55 +1791,55 @@
 			 * double count rt task load.
 			 */
 			util = cpu_util(cpu);
-			if (!cpu_overutilized(cpu)) {
-				if (cpu_isolated(cpu))
+
+			if (__cpu_overutilized(cpu, util + tutil))
+				continue;
+
+			if (cpu_isolated(cpu))
+				continue;
+
+			if (sched_cpu_high_irqload(cpu))
+				continue;
+
+			/* Find the least loaded CPU */
+			if (util > best_cpu_util)
+				continue;
+
+			/*
+			 * If the previous CPU has same load, keep it as
+			 * best_cpu.
+			 */
+			if (best_cpu_util == util && best_cpu == task_cpu(task))
+				continue;
+
+			/*
+			 * If candidate CPU is the previous CPU, select it.
+			 * Otherwise, if its load is same with best_cpu and in
+			 * a shallower C-state, select it.  If all above
+			 * conditions are same, select the least cumulative
+			 * window demand CPU.
+			 */
+			if (sysctl_sched_cstate_aware)
+				cpu_idle_idx = idle_get_state_idx(cpu_rq(cpu));
+
+			util_cum = cpu_util_cum(cpu, 0);
+			if (cpu != task_cpu(task) && best_cpu_util == util) {
+				if (best_cpu_idle_idx < cpu_idle_idx)
 					continue;
 
-				if (sched_cpu_high_irqload(cpu))
+				if (best_cpu_idle_idx == cpu_idle_idx &&
+				    best_cpu_util_cum < util_cum)
 					continue;
-
-				new_util_cum = cpu_util_cum(cpu, 0);
-
-				if (!task_in_cum_window_demand(cpu_rq(cpu),
-							       task))
-					new_util_cum += task_util(task);
-
-				trace_sched_cpu_util(task, cpu, task_util(task),
-						     0, new_util_cum, 0);
-
-				if (sysctl_sched_cstate_aware)
-					cpu_idle_idx =
-					     idle_get_state_idx(cpu_rq(cpu));
-
-				if (add_capacity_margin(new_util_cum, cpu) <
-				    capacity_curr_of(cpu)) {
-					if (cpu_idle_idx < best_cpu_idle_idx ||
-					    (best_cpu != task_cpu(task) &&
-					     (best_cpu_idle_idx ==
-					      cpu_idle_idx &&
-					      best_cpu_util > util))) {
-						best_cpu_util = util;
-						best_cpu = cpu;
-						best_cpu_idle_idx =
-						    cpu_idle_idx;
-					}
-				} else {
-					long spare_cap = capacity_of(cpu) -
-							 util;
-
-					if (spare_cap > 0 &&
-					    max_spare_cap < spare_cap) {
-						max_spare_cap_cpu = cpu;
-						max_spare_cap = spare_cap;
-					}
-				}
 			}
+
+			best_cpu_idle_idx = cpu_idle_idx;
+			best_cpu_util_cum = util_cum;
+			best_cpu_util = util;
+			best_cpu = cpu;
 		}
 
 		if (best_cpu != -1) {
 			return best_cpu;
-		} else if (max_spare_cap_cpu != -1) {
-			return max_spare_cap_cpu;
 		} else if (!cpumask_empty(&backup_search_cpu)) {
 			cpumask_copy(&search_cpu, &backup_search_cpu);
 			cpumask_clear(&backup_search_cpu);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0a1e62f..a5b1377 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1457,6 +1457,7 @@
 
 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
 
+bool __cpu_overutilized(int cpu, unsigned long util);
 bool cpu_overutilized(int cpu);
 
 #endif
@@ -2291,7 +2292,7 @@
 
 #ifdef CONFIG_SCHED_WALT
 	unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG |
-						SCHED_CPUFREQ_PL;
+				SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET;
 
 	/*
 	 * Skip if we've already reported, but not if this is an inter-cluster
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 65e24fb..fe850b9 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -143,7 +143,7 @@
 		.nr_entries = 0,
 		.entries = entries,
 		.max_entries = PAGE_OWNER_STACK_DEPTH,
-		.skip = 0
+		.skip = 2
 	};
 	depot_stack_handle_t handle;
 
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index a52fbaf..ec87467 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -244,7 +244,7 @@
 			transparent = xt_socket_sk_is_transparent(sk);
 
 		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-		    transparent)
+		    transparent && sk_fullsock(sk))
 			pskb->mark = sk->sk_mark;
 
 		sock_gen_put(sk);
@@ -433,7 +433,7 @@
 			transparent = xt_socket_sk_is_transparent(sk);
 
 		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-		    transparent)
+		    transparent && sk_fullsock(sk))
 			pskb->mark = sk->sk_mark;
 
 		if (sk != skb->sk)
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 35be79e..57646ef 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -537,6 +537,7 @@
 {
 	int required_headroom, additional_header_length, ckresult;
 	struct rmnet_map_header_s *map_header;
+	int non_linear_skb;
 
 	additional_header_length = 0;
 
@@ -565,9 +566,11 @@
 		rmnet_stats_ul_checksum(ckresult);
 	}
 
+	non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
+			 skb_is_nonlinear(skb);
+
 	if ((!(config->egress_data_format &
-	    RMNET_EGRESS_FORMAT_AGGREGATION)) ||
-	    ((orig_dev->features & NETIF_F_GSO) && skb_is_nonlinear(skb)))
+	    RMNET_EGRESS_FORMAT_AGGREGATION)) || non_linear_skb)
 		map_header = rmnet_map_add_map_header
 		(skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
 	else
@@ -589,7 +592,8 @@
 
 	skb->protocol = htons(ETH_P_MAP);
 
-	if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+	if ((config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) &&
+	    !non_linear_skb) {
 		rmnet_map_aggregate(skb, config);
 		return RMNET_MAP_CONSUMED;
 	}
diff --git a/scripts/Makefile.dtbo b/scripts/Makefile.dtbo
index db4a0f4..b298f4a 100644
--- a/scripts/Makefile.dtbo
+++ b/scripts/Makefile.dtbo
@@ -10,7 +10,7 @@
 ifneq ($(DTC_OVERLAY_TEST_EXT),)
 DTC_OVERLAY_TEST = $(DTC_OVERLAY_TEST_EXT)
 quiet_cmd_dtbo_verify	= VERIFY  $@
-cmd_dtbo_verify = $(DTC_OVERLAY_TEST) $(addprefix $(obj)/,$($(@F)-base)) $@ $(dot-target).dtb
+cmd_dtbo_verify = $(DTC_OVERLAY_TEST) $(addprefix $(obj)/,$($(@F)-base)) $@ $(dot-target).tmp
 else
 cmd_dtbo_verify = true
 endif
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 8279009..dd81574 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -39,6 +39,8 @@
 #define SUBSTREAM_FLAG_DATA_EP_STARTED	0
 #define SUBSTREAM_FLAG_SYNC_EP_STARTED	1
 
+#define MAX_SETALT_TIMEOUT_MS 1000
+
 /* return the estimated delay based on USB frame counters */
 snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
 				    unsigned int rate)
@@ -508,7 +510,8 @@
 
 	/* close the old interface */
 	if (subs->interface >= 0 && subs->interface != fmt->iface) {
-		err = usb_set_interface(subs->dev, subs->interface, 0);
+		err = usb_set_interface_timeout(subs->dev, subs->interface, 0,
+			MAX_SETALT_TIMEOUT_MS);
 		if (err < 0) {
 			dev_err(&dev->dev,
 				"%d:%d: return to setting 0 failed (%d)\n",
@@ -527,7 +530,8 @@
 		if (err < 0)
 			return -EIO;
 
-		err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+		err = usb_set_interface_timeout(dev, fmt->iface,
+				fmt->altsetting, MAX_SETALT_TIMEOUT_MS);
 		if (err < 0) {
 			dev_err(&dev->dev,
 				"%d:%d: usb_set_interface failed (%d)\n",
@@ -574,7 +578,8 @@
 
 	if (!enable) {
 		if (subs->interface >= 0) {
-			usb_set_interface(subs->dev, subs->interface, 0);
+			usb_set_interface_timeout(subs->dev, subs->interface, 0,
+				MAX_SETALT_TIMEOUT_MS);
 			subs->altset_idx = 0;
 			subs->interface = -1;
 			subs->cur_audiofmt = NULL;
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 801508c..ebc081f 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -45,8 +45,7 @@
 /*  event ring iova base address */
 #define IOVA_BASE 0x1000
 
-#define IOVA_DCBA_BASE 0x2000
-#define IOVA_XFER_RING_BASE (IOVA_DCBA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
+#define IOVA_XFER_RING_BASE (IOVA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
 #define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
 #define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
 #define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
@@ -83,8 +82,6 @@
 	unsigned int card_num;
 	atomic_t in_use;
 	struct kref kref;
-	unsigned long dcba_iova;
-	size_t dcba_size;
 	wait_queue_head_t disconnect_wq;
 
 	/* interface specific */
@@ -101,9 +98,6 @@
 	struct iommu_domain *domain;
 
 	/* list to keep track of available iova */
-	struct list_head dcba_list;
-	size_t dcba_iova_size;
-	unsigned long curr_dcba_iova;
 	struct list_head xfer_ring_list;
 	size_t xfer_ring_iova_size;
 	unsigned long curr_xfer_ring_iova;
@@ -150,7 +144,6 @@
 
 enum mem_type {
 	MEM_EVENT_RING,
-	MEM_DCBA,
 	MEM_XFER_RING,
 	MEM_XFER_BUF,
 };
@@ -176,6 +169,24 @@
 	USB_QMI_PCM_FORMAT_U32_BE,
 };
 
+static enum usb_audio_device_speed_enum_v01
+get_speed_info(enum usb_device_speed udev_speed)
+{
+	switch (udev_speed) {
+	case USB_SPEED_LOW:
+		return USB_AUDIO_DEVICE_SPEED_LOW_V01;
+	case USB_SPEED_FULL:
+		return USB_AUDIO_DEVICE_SPEED_FULL_V01;
+	case USB_SPEED_HIGH:
+		return USB_AUDIO_DEVICE_SPEED_HIGH_V01;
+	case USB_SPEED_SUPER:
+		return USB_AUDIO_DEVICE_SPEED_SUPER_V01;
+	default:
+		pr_err("%s: udev speed %d\n", __func__, udev_speed);
+		return USB_AUDIO_DEVICE_SPEED_INVALID_V01;
+	}
+}
+
 static unsigned long uaudio_get_iova(unsigned long *curr_iova,
 	size_t *curr_iova_size, struct list_head *head, size_t size)
 {
@@ -275,10 +286,6 @@
 		if (uaudio_qdev->er_phys_addr == pa)
 			map = false;
 		break;
-	case MEM_DCBA:
-		va = uaudio_get_iova(&uaudio_qdev->curr_dcba_iova,
-		&uaudio_qdev->dcba_iova_size, &uaudio_qdev->dcba_list, size);
-		break;
 	case MEM_XFER_RING:
 		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova,
 		&uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list,
@@ -363,10 +370,7 @@
 		else
 			unmap = false;
 		break;
-	case MEM_DCBA:
-		uaudio_put_iova(va, size, &uaudio_qdev->dcba_list,
-		&uaudio_qdev->dcba_iova_size);
-		break;
+
 	case MEM_XFER_RING:
 		uaudio_put_iova(va, size, &uaudio_qdev->xfer_ring_list,
 		&uaudio_qdev->xfer_ring_iova_size);
@@ -407,10 +411,12 @@
 	int protocol, card_num, pcm_dev_num;
 	void *hdr_ptr;
 	u8 *xfer_buf;
-	u32 len, mult, remainder, xfer_buf_len;
-	unsigned long va, tr_data_va = 0, tr_sync_va = 0, dcba_va = 0,
-	xfer_buf_va = 0;
-	phys_addr_t xhci_pa, xfer_buf_pa;
+	u32 len, mult, remainder, xfer_buf_len, sg_len, i, total_len = 0;
+	unsigned long va, va_sg, tr_data_va = 0, tr_sync_va = 0;
+	phys_addr_t xhci_pa, xfer_buf_pa, tr_data_pa = 0, tr_sync_pa = 0;
+	dma_addr_t dma;
+	struct sg_table sgt;
+	struct scatterlist *sg;
 
 	iface = usb_ifnum_to_if(subs->dev, subs->interface);
 	if (!iface) {
@@ -524,13 +530,13 @@
 	memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
 	resp->std_as_data_ep_desc_valid = 1;
 
-	xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
-	if (!xhci_pa) {
+	tr_data_pa = usb_get_xfer_ring_phys_addr(subs->dev, ep, &dma);
+	if (!tr_data_pa) {
 		pr_err("%s:failed to get data ep ring dma address\n", __func__);
 		goto err;
 	}
 
-	resp->xhci_mem_info.tr_data.pa = xhci_pa;
+	resp->xhci_mem_info.tr_data.pa = dma;
 
 	if (subs->sync_endpoint) {
 		ep = usb_pipe_endpoint(subs->dev, subs->sync_endpoint->pipe);
@@ -541,19 +547,26 @@
 		memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
 		resp->std_as_sync_ep_desc_valid = 1;
 
-		xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
-		if (!xhci_pa) {
+		tr_sync_pa = usb_get_xfer_ring_phys_addr(subs->dev, ep, &dma);
+		if (!tr_sync_pa) {
 			pr_err("%s:failed to get sync ep ring dma address\n",
 				__func__);
 			goto err;
 		}
-		resp->xhci_mem_info.tr_sync.pa = xhci_pa;
+		resp->xhci_mem_info.tr_sync.pa = dma;
 	}
 
 skip_sync_ep:
 	resp->interrupter_num = uaudio_qdev->intr_num;
 	resp->interrupter_num_valid = 1;
 
+	ret = usb_get_controller_id(subs->dev);
+	if (ret < 0)
+		goto err;
+
+	resp->controller_num =  ret;
+	resp->controller_num_valid = 1;
+
 	/*  map xhci data structures PA memory to iova */
 
 	/* event ring */
@@ -563,8 +576,8 @@
 			ret);
 		goto err;
 	}
-	xhci_pa = usb_get_sec_event_ring_dma_addr(subs->dev,
-			resp->interrupter_num);
+	xhci_pa = usb_get_sec_event_ring_phys_addr(subs->dev,
+			resp->interrupter_num, &dma);
 	if (!xhci_pa) {
 		pr_err("%s: failed to get sec event ring dma address\n",
 		__func__);
@@ -577,37 +590,20 @@
 
 	resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va,
 						uaudio_qdev->sid);
-	resp->xhci_mem_info.evt_ring.pa = xhci_pa;
+	resp->xhci_mem_info.evt_ring.pa = dma;
 	resp->xhci_mem_info.evt_ring.size = PAGE_SIZE;
 	uaudio_qdev->er_phys_addr = xhci_pa;
 
-	/* dcba */
-	xhci_pa = usb_get_dcba_dma_addr(subs->dev);
-	if (!xhci_pa) {
-		pr_err("%s:failed to get dcba dma address\n", __func__);
+	resp->speed_info = get_speed_info(subs->dev->speed);
+	if (resp->speed_info == USB_AUDIO_DEVICE_SPEED_INVALID_V01)
 		goto unmap_er;
-	}
 
-	if (!uadev[card_num].dcba_iova) { /* mappped per usb device */
-		va = uaudio_iommu_map(MEM_DCBA, xhci_pa, PAGE_SIZE);
-		if (!va)
-			goto unmap_er;
-
-		uadev[card_num].dcba_iova = va;
-		uadev[card_num].dcba_size = PAGE_SIZE;
-	}
-
-	dcba_va = uadev[card_num].dcba_iova;
-	resp->xhci_mem_info.dcba.va = PREPEND_SID_TO_IOVA(dcba_va,
-						uaudio_qdev->sid);
-	resp->xhci_mem_info.dcba.pa = xhci_pa;
-	resp->xhci_mem_info.dcba.size = PAGE_SIZE;
+	resp->speed_info_valid = 1;
 
 	/* data transfer ring */
-	xhci_pa = resp->xhci_mem_info.tr_data.pa;
-	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	va = uaudio_iommu_map(MEM_XFER_RING, tr_data_pa, PAGE_SIZE);
 	if (!va)
-		goto unmap_dcba;
+		goto unmap_er;
 
 	tr_data_va = va;
 	resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va,
@@ -619,7 +615,7 @@
 		goto skip_sync;
 
 	xhci_pa = resp->xhci_mem_info.tr_sync.pa;
-	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	va = uaudio_iommu_map(MEM_XFER_RING, tr_sync_pa, PAGE_SIZE);
 	if (!va)
 		goto unmap_data;
 
@@ -648,19 +644,33 @@
 	if (!xfer_buf)
 		goto unmap_sync;
 
+	dma_get_sgtable(subs->dev->bus->sysdev, &sgt, xfer_buf, xfer_buf_pa,
+			len);
+
+	va = 0;
+	for_each_sg(sgt.sgl, sg, sgt.nents, i) {
+		sg_len = PAGE_ALIGN(sg->offset + sg->length);
+		va_sg = uaudio_iommu_map(MEM_XFER_BUF,
+			page_to_phys(sg_page(sg)), sg_len);
+		if (!va_sg)
+			goto unmap_xfer_buf;
+
+		if (!va)
+			va = va_sg;
+
+		total_len += sg_len;
+	}
+
 	resp->xhci_mem_info.xfer_buff.pa = xfer_buf_pa;
 	resp->xhci_mem_info.xfer_buff.size = len;
 
-	va = uaudio_iommu_map(MEM_XFER_BUF, xfer_buf_pa, len);
-	if (!va)
-		goto unmap_sync;
-
-	xfer_buf_va = va;
 	resp->xhci_mem_info.xfer_buff.va = PREPEND_SID_TO_IOVA(va,
 						uaudio_qdev->sid);
 
 	resp->xhci_mem_info_valid = 1;
 
+	sg_free_table(&sgt);
+
 	if (!atomic_read(&uadev[card_num].in_use)) {
 		kref_init(&uadev[card_num].kref);
 		init_waitqueue_head(&uadev[card_num].disconnect_wq);
@@ -686,7 +696,7 @@
 	uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE;
 	uadev[card_num].info[info_idx].sync_xfer_ring_va = tr_sync_va;
 	uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE;
-	uadev[card_num].info[info_idx].xfer_buf_va = xfer_buf_va;
+	uadev[card_num].info[info_idx].xfer_buf_va = va;
 	uadev[card_num].info[info_idx].xfer_buf_pa = xfer_buf_pa;
 	uadev[card_num].info[info_idx].xfer_buf_size = len;
 	uadev[card_num].info[info_idx].xfer_buf = xfer_buf;
@@ -701,14 +711,13 @@
 	return 0;
 
 unmap_xfer_buf:
-	uaudio_iommu_unmap(MEM_XFER_BUF, xfer_buf_va, len);
+	if (va)
+		uaudio_iommu_unmap(MEM_XFER_BUF, va, total_len);
 unmap_sync:
 	usb_free_coherent(subs->dev, len, xfer_buf, xfer_buf_pa);
 	uaudio_iommu_unmap(MEM_XFER_RING, tr_sync_va, PAGE_SIZE);
 unmap_data:
 	uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE);
-unmap_dcba:
-	uaudio_iommu_unmap(MEM_DCBA, dcba_va, PAGE_SIZE);
 unmap_er:
 	uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
 err:
@@ -754,11 +763,6 @@
 			dev->info[if_idx].intf_num, dev->card_num);
 	}
 
-	/* iommu_unmap dcba iova for a usb device */
-	uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
-
-	dev->dcba_iova = 0;
-	dev->dcba_size = 0;
 	dev->num_intf = 0;
 
 	/* free interface info */
@@ -1228,11 +1232,7 @@
 		goto free_domain;
 	}
 
-	/* initialize dcba, xfer ring and xfer buf iova list */
-	INIT_LIST_HEAD(&uaudio_qdev->dcba_list);
-	uaudio_qdev->curr_dcba_iova = IOVA_DCBA_BASE;
-	uaudio_qdev->dcba_iova_size = SNDRV_CARDS * PAGE_SIZE;
-
+	/* initialize xfer ring and xfer buf iova list */
 	INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list);
 	uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE;
 	uaudio_qdev->xfer_ring_iova_size =
diff --git a/sound/usb/usb_audio_qmi_v01.c b/sound/usb/usb_audio_qmi_v01.c
index fef7505..a93665c 100644
--- a/sound/usb/usb_audio_qmi_v01.c
+++ b/sound/usb/usb_audio_qmi_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -633,6 +633,46 @@
 					interrupter_num),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					speed_info_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum usb_audio_device_speed_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					speed_info),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					controller_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					controller_num),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
 		.is_array       = QMI_COMMON_TLV_TYPE,
diff --git a/sound/usb/usb_audio_qmi_v01.h b/sound/usb/usb_audio_qmi_v01.h
index 83a966c..9900764 100644
--- a/sound/usb/usb_audio_qmi_v01.h
+++ b/sound/usb/usb_audio_qmi_v01.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,16 @@
 	USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
+enum usb_audio_device_speed_enum_v01 {
+	USB_AUDIO_DEVICE_SPEED_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_DEVICE_SPEED_INVALID_V01 = 0,
+	USB_AUDIO_DEVICE_SPEED_LOW_V01 = 1,
+	USB_AUDIO_DEVICE_SPEED_FULL_V01 = 2,
+	USB_AUDIO_DEVICE_SPEED_HIGH_V01 = 3,
+	USB_AUDIO_DEVICE_SPEED_SUPER_V01 = 4,
+	USB_AUDIO_DEVICE_SPEED_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
 struct qmi_uaudio_stream_req_msg_v01 {
 	uint8_t enable;
 	uint32_t usb_token;
@@ -118,8 +128,12 @@
 	struct apps_mem_info_v01 xhci_mem_info;
 	uint8_t interrupter_num_valid;
 	uint8_t interrupter_num;
+	uint8_t speed_info_valid;
+	enum usb_audio_device_speed_enum_v01 speed_info;
+	uint8_t controller_num_valid;
+	uint8_t controller_num;
 };
-#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 191
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 202
 extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
 
 struct qmi_uaudio_stream_ind_msg_v01 {